Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 1 | /* |
Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 2 | * trace irqs off critical timings |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
| 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
| 6 | * |
| 7 | * From code in the latency_tracer, that is: |
| 8 | * |
| 9 | * Copyright (C) 2004-2006 Ingo Molnar |
Nadia Yvette Chambers | 6d49e35 | 2012-12-06 10:39:54 +0100 | [diff] [blame] | 10 | * Copyright (C) 2004 Nadia Yvette Chambers |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 11 | */ |
| 12 | #include <linux/kallsyms.h> |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 13 | #include <linux/uaccess.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/ftrace.h> |
Pavankumar Kondeti | 0d2621b | 2018-03-26 15:56:26 +0530 | [diff] [blame] | 16 | #include <linux/sched/sysctl.h> |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 17 | |
| 18 | #include "trace.h" |
| 19 | |
Joel Fernandes | 2b3a26c | 2017-10-05 17:54:32 -0700 | [diff] [blame] | 20 | #define CREATE_TRACE_POINTS |
| 21 | #include <trace/events/preemptirq.h> |
| 22 | |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 23 | #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 24 | static struct trace_array *irqsoff_trace __read_mostly; |
| 25 | static int tracer_enabled __read_mostly; |
| 26 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 27 | static DEFINE_PER_CPU(int, tracing_cpu); |
| 28 | |
Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 29 | static DEFINE_RAW_SPINLOCK(max_trace_lock); |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 30 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 31 | enum { |
| 32 | TRACER_IRQS_OFF = (1 << 1), |
| 33 | TRACER_PREEMPT_OFF = (1 << 2), |
| 34 | }; |
| 35 | |
| 36 | static int trace_type __read_mostly; |
| 37 | |
Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 38 | static int save_flags; |
Steven Rostedt | e9d25fe | 2009-03-04 22:15:30 -0500 | [diff] [blame] | 39 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 40 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); |
| 41 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); |
| 42 | |
Pavankumar Kondeti | 0d2621b | 2018-03-26 15:56:26 +0530 | [diff] [blame] | 43 | /* |
| 44 | * irqsoff stack tracing threshold in ns. |
| 45 | * default: 1ms |
| 46 | */ |
| 47 | unsigned int sysctl_irqsoff_tracing_threshold_ns = 1000000UL; |
| 48 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 49 | #ifdef CONFIG_PREEMPT_TRACER |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 50 | static inline int |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 51 | preempt_trace(void) |
| 52 | { |
| 53 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); |
| 54 | } |
| 55 | #else |
| 56 | # define preempt_trace() (0) |
| 57 | #endif |
| 58 | |
| 59 | #ifdef CONFIG_IRQSOFF_TRACER |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 60 | static inline int |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 61 | irq_trace(void) |
| 62 | { |
| 63 | return ((trace_type & TRACER_IRQS_OFF) && |
| 64 | irqs_disabled()); |
| 65 | } |
| 66 | #else |
| 67 | # define irq_trace() (0) |
| 68 | #endif |
| 69 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 70 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
Steven Rostedt (Red Hat) | 0390558 | 2015-09-28 15:37:49 -0400 | [diff] [blame] | 71 | static int irqsoff_display_graph(struct trace_array *tr, int set); |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 72 | # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) |
Steven Rostedt (Red Hat) | 0390558 | 2015-09-28 15:37:49 -0400 | [diff] [blame] | 73 | #else |
| 74 | static inline int irqsoff_display_graph(struct trace_array *tr, int set) |
| 75 | { |
| 76 | return -EINVAL; |
| 77 | } |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 78 | # define is_graph(tr) false |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 79 | #endif |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 80 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 81 | /* |
| 82 | * Sequence count - we record it when starting a measurement and |
| 83 | * skip the latency if the sequence has changed - some other section |
| 84 | * did a maximum and could disturb our measurement with serial console |
| 85 | * printouts, etc. Truly coinciding maximum latencies should be rare |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 86 | * and what happens together happens separately as well, so this doesn't |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 87 | * decrease the validity of the maximum found: |
| 88 | */ |
| 89 | static __cacheline_aligned_in_smp unsigned long max_sequence; |
| 90 | |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 91 | #ifdef CONFIG_FUNCTION_TRACER |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 92 | /* |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 93 | * Prologue for the preempt and irqs off function tracers. |
| 94 | * |
| 95 | * Returns 1 if it is OK to continue, and data->disabled is |
| 96 | * incremented. |
| 97 | * 0 if the trace is to be ignored, and data->disabled |
| 98 | * is kept the same. |
| 99 | * |
| 100 | * Note, this function is also used outside this ifdef but |
| 101 | * inside the #ifdef of the function graph tracer below. |
| 102 | * This is OK, since the function graph tracer is |
| 103 | * dependent on the function tracer. |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 104 | */ |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 105 | static int func_prolog_dec(struct trace_array *tr, |
| 106 | struct trace_array_cpu **data, |
| 107 | unsigned long *flags) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 108 | { |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 109 | long disabled; |
| 110 | int cpu; |
| 111 | |
Steven Rostedt | 361943a | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 112 | /* |
| 113 | * Does not matter if we preempt. We test the flags |
| 114 | * afterward, to see if irqs are disabled or not. |
| 115 | * If we preempt and get a false positive, the flags |
| 116 | * test will fail. |
| 117 | */ |
| 118 | cpu = raw_smp_processor_id(); |
| 119 | if (likely(!per_cpu(tracing_cpu, cpu))) |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 120 | return 0; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 121 | |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 122 | local_save_flags(*flags); |
Steven Rostedt (Red Hat) | cb86e05 | 2016-03-18 12:27:43 -0400 | [diff] [blame] | 123 | /* |
| 124 | * Slight chance to get a false positive on tracing_cpu, |
| 125 | * although I'm starting to think there isn't a chance. |
| 126 | * Leave this for now just to be paranoid. |
| 127 | */ |
| 128 | if (!irqs_disabled_flags(*flags) && !preempt_count()) |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 129 | return 0; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 130 | |
Steven Rostedt (Red Hat) | 12883ef | 2013-03-05 09:24:35 -0500 | [diff] [blame] | 131 | *data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 132 | disabled = atomic_inc_return(&(*data)->disabled); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 133 | |
| 134 | if (likely(disabled == 1)) |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 135 | return 1; |
| 136 | |
| 137 | atomic_dec(&(*data)->disabled); |
| 138 | |
| 139 | return 0; |
| 140 | } |
| 141 | |
| 142 | /* |
| 143 | * irqsoff uses its own tracer function to keep the overhead down: |
| 144 | */ |
| 145 | static void |
Steven Rostedt | 2f5f6ad | 2011-08-08 16:57:47 -0400 | [diff] [blame] | 146 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, |
Steven Rostedt | a1e2e31 | 2011-08-09 12:50:46 -0400 | [diff] [blame] | 147 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 148 | { |
| 149 | struct trace_array *tr = irqsoff_trace; |
| 150 | struct trace_array_cpu *data; |
| 151 | unsigned long flags; |
| 152 | |
| 153 | if (!func_prolog_dec(tr, &data, &flags)) |
| 154 | return; |
| 155 | |
| 156 | trace_function(tr, ip, parent_ip, flags, preempt_count()); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 157 | |
| 158 | atomic_dec(&data->disabled); |
| 159 | } |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 160 | #endif /* CONFIG_FUNCTION_TRACER */ |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 161 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 162 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
Steven Rostedt (Red Hat) | 0390558 | 2015-09-28 15:37:49 -0400 | [diff] [blame] | 163 | static int irqsoff_display_graph(struct trace_array *tr, int set) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 164 | { |
| 165 | int cpu; |
| 166 | |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 167 | if (!(is_graph(tr) ^ set)) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 168 | return 0; |
| 169 | |
| 170 | stop_irqsoff_tracer(irqsoff_trace, !set); |
| 171 | |
| 172 | for_each_possible_cpu(cpu) |
| 173 | per_cpu(tracing_cpu, cpu) = 0; |
| 174 | |
Steven Rostedt (Red Hat) | 6d9b3fa | 2014-01-14 11:28:38 -0500 | [diff] [blame] | 175 | tr->max_latency = 0; |
Steven Rostedt (Red Hat) | 12883ef | 2013-03-05 09:24:35 -0500 | [diff] [blame] | 176 | tracing_reset_online_cpus(&irqsoff_trace->trace_buffer); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 177 | |
| 178 | return start_irqsoff_tracer(irqsoff_trace, set); |
| 179 | } |
| 180 | |
| 181 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) |
| 182 | { |
| 183 | struct trace_array *tr = irqsoff_trace; |
| 184 | struct trace_array_cpu *data; |
| 185 | unsigned long flags; |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 186 | int ret; |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 187 | int pc; |
| 188 | |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 189 | if (!func_prolog_dec(tr, &data, &flags)) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 190 | return 0; |
| 191 | |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 192 | pc = preempt_count(); |
| 193 | ret = __trace_graph_entry(tr, trace, flags, pc); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 194 | atomic_dec(&data->disabled); |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 195 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 196 | return ret; |
| 197 | } |
| 198 | |
| 199 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) |
| 200 | { |
| 201 | struct trace_array *tr = irqsoff_trace; |
| 202 | struct trace_array_cpu *data; |
| 203 | unsigned long flags; |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 204 | int pc; |
| 205 | |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 206 | if (!func_prolog_dec(tr, &data, &flags)) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 207 | return; |
| 208 | |
Steven Rostedt | 5e6d2b9 | 2010-10-05 19:41:43 -0400 | [diff] [blame] | 209 | pc = preempt_count(); |
| 210 | __trace_graph_return(tr, trace, flags, pc); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 211 | atomic_dec(&data->disabled); |
| 212 | } |
| 213 | |
| 214 | static void irqsoff_trace_open(struct trace_iterator *iter) |
| 215 | { |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 216 | if (is_graph(iter->tr)) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 217 | graph_trace_open(iter); |
| 218 | |
| 219 | } |
| 220 | |
| 221 | static void irqsoff_trace_close(struct trace_iterator *iter) |
| 222 | { |
| 223 | if (iter->private) |
| 224 | graph_trace_close(iter); |
| 225 | } |
| 226 | |
| 227 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ |
Jiri Olsa | 321e68b | 2011-06-03 16:58:47 +0200 | [diff] [blame] | 228 | TRACE_GRAPH_PRINT_PROC | \ |
| 229 | TRACE_GRAPH_PRINT_ABS_TIME | \ |
| 230 | TRACE_GRAPH_PRINT_DURATION) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 231 | |
| 232 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) |
| 233 | { |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 234 | /* |
| 235 | * In graph mode call the graph tracer output function, |
| 236 | * otherwise go with the TRACE_FN event handler |
| 237 | */ |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 238 | if (is_graph(iter->tr)) |
Jiri Olsa | 0a77262 | 2010-09-23 14:00:52 +0200 | [diff] [blame] | 239 | return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 240 | |
| 241 | return TRACE_TYPE_UNHANDLED; |
| 242 | } |
| 243 | |
| 244 | static void irqsoff_print_header(struct seq_file *s) |
| 245 | { |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 246 | struct trace_array *tr = irqsoff_trace; |
| 247 | |
| 248 | if (is_graph(tr)) |
Jiri Olsa | 0a77262 | 2010-09-23 14:00:52 +0200 | [diff] [blame] | 249 | print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); |
| 250 | else |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 251 | trace_default_header(s); |
| 252 | } |
| 253 | |
| 254 | static void |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 255 | __trace_function(struct trace_array *tr, |
| 256 | unsigned long ip, unsigned long parent_ip, |
| 257 | unsigned long flags, int pc) |
| 258 | { |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 259 | if (is_graph(tr)) |
Jiri Olsa | 0a77262 | 2010-09-23 14:00:52 +0200 | [diff] [blame] | 260 | trace_graph_function(tr, ip, parent_ip, flags, pc); |
| 261 | else |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 262 | trace_function(tr, ip, parent_ip, flags, pc); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | #else |
| 266 | #define __trace_function trace_function |
| 267 | |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 268 | #ifdef CONFIG_FUNCTION_TRACER |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 269 | static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) |
| 270 | { |
| 271 | return -1; |
| 272 | } |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 273 | #endif |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 274 | |
| 275 | static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) |
| 276 | { |
| 277 | return TRACE_TYPE_UNHANDLED; |
| 278 | } |
| 279 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 280 | static void irqsoff_trace_open(struct trace_iterator *iter) { } |
| 281 | static void irqsoff_trace_close(struct trace_iterator *iter) { } |
Jiri Olsa | 7e9a49e | 2011-11-07 16:08:49 +0100 | [diff] [blame] | 282 | |
| 283 | #ifdef CONFIG_FUNCTION_TRACER |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 284 | static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } |
Jiri Olsa | 7e9a49e | 2011-11-07 16:08:49 +0100 | [diff] [blame] | 285 | static void irqsoff_print_header(struct seq_file *s) |
| 286 | { |
| 287 | trace_default_header(s); |
| 288 | } |
| 289 | #else |
| 290 | static void irqsoff_print_header(struct seq_file *s) |
| 291 | { |
| 292 | trace_latency_header(s); |
| 293 | } |
| 294 | #endif /* CONFIG_FUNCTION_TRACER */ |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 295 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 296 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 297 | /* |
| 298 | * Should this new latency be reported/recorded? |
| 299 | */ |
Yaowei Bai | 7985182 | 2015-09-29 22:43:30 +0800 | [diff] [blame] | 300 | static bool report_latency(struct trace_array *tr, cycle_t delta) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 301 | { |
| 302 | if (tracing_thresh) { |
| 303 | if (delta < tracing_thresh) |
Yaowei Bai | 7985182 | 2015-09-29 22:43:30 +0800 | [diff] [blame] | 304 | return false; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 305 | } else { |
Steven Rostedt (Red Hat) | 6d9b3fa | 2014-01-14 11:28:38 -0500 | [diff] [blame] | 306 | if (delta <= tr->max_latency) |
Yaowei Bai | 7985182 | 2015-09-29 22:43:30 +0800 | [diff] [blame] | 307 | return false; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 308 | } |
Yaowei Bai | 7985182 | 2015-09-29 22:43:30 +0800 | [diff] [blame] | 309 | return true; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 310 | } |
| 311 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 312 | static void |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 313 | check_critical_timing(struct trace_array *tr, |
| 314 | struct trace_array_cpu *data, |
| 315 | unsigned long parent_ip, |
| 316 | int cpu) |
| 317 | { |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 318 | cycle_t T0, T1, delta; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 319 | unsigned long flags; |
Steven Rostedt | 3869705 | 2008-10-01 13:14:09 -0400 | [diff] [blame] | 320 | int pc; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 321 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 322 | T0 = data->preempt_timestamp; |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 323 | T1 = ftrace_now(cpu); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 324 | delta = T1-T0; |
| 325 | |
| 326 | local_save_flags(flags); |
| 327 | |
Steven Rostedt | 6450c1d | 2008-10-02 19:23:04 -0400 | [diff] [blame] | 328 | pc = preempt_count(); |
| 329 | |
Steven Rostedt (Red Hat) | 6d9b3fa | 2014-01-14 11:28:38 -0500 | [diff] [blame] | 330 | if (!report_latency(tr, delta)) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 331 | goto out; |
| 332 | |
Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 333 | raw_spin_lock_irqsave(&max_trace_lock, flags); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 334 | |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 335 | /* check if we are still the max latency */ |
Steven Rostedt (Red Hat) | 6d9b3fa | 2014-01-14 11:28:38 -0500 | [diff] [blame] | 336 | if (!report_latency(tr, delta)) |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 337 | goto out_unlock; |
| 338 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 339 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
Steven Rostedt | cc51a0f | 2009-12-11 11:54:51 -0500 | [diff] [blame] | 340 | /* Skip 5 functions to get to the irq/preempt enable function */ |
| 341 | __trace_stack(tr, flags, 5, pc); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 342 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 343 | if (data->critical_sequence != max_sequence) |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 344 | goto out_unlock; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 345 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 346 | data->critical_end = parent_ip; |
| 347 | |
Carsten Emde | b5130b1 | 2009-09-13 01:43:07 +0200 | [diff] [blame] | 348 | if (likely(!is_tracing_stopped())) { |
Steven Rostedt (Red Hat) | 6d9b3fa | 2014-01-14 11:28:38 -0500 | [diff] [blame] | 349 | tr->max_latency = delta; |
Carsten Emde | b5130b1 | 2009-09-13 01:43:07 +0200 | [diff] [blame] | 350 | update_max_tr_single(tr, current, cpu); |
| 351 | } |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 352 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 353 | max_sequence++; |
| 354 | |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 355 | out_unlock: |
Thomas Gleixner | 5389f6f | 2009-07-25 17:13:33 +0200 | [diff] [blame] | 356 | raw_spin_unlock_irqrestore(&max_trace_lock, flags); |
Steven Rostedt | 89b2f97 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 357 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 358 | out: |
| 359 | data->critical_sequence = max_sequence; |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 360 | data->preempt_timestamp = ftrace_now(cpu); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 361 | __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 362 | } |
| 363 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 364 | static inline void |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 365 | start_critical_timing(unsigned long ip, unsigned long parent_ip) |
| 366 | { |
| 367 | int cpu; |
| 368 | struct trace_array *tr = irqsoff_trace; |
| 369 | struct trace_array_cpu *data; |
| 370 | unsigned long flags; |
| 371 | |
Steven Rostedt (Red Hat) | 10246fa | 2013-07-01 15:58:24 -0400 | [diff] [blame] | 372 | if (!tracer_enabled || !tracing_is_enabled()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 373 | return; |
| 374 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 375 | cpu = raw_smp_processor_id(); |
| 376 | |
| 377 | if (per_cpu(tracing_cpu, cpu)) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 378 | return; |
| 379 | |
Steven Rostedt (Red Hat) | 12883ef | 2013-03-05 09:24:35 -0500 | [diff] [blame] | 380 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 381 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 382 | if (unlikely(!data) || atomic_read(&data->disabled)) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 383 | return; |
| 384 | |
| 385 | atomic_inc(&data->disabled); |
| 386 | |
| 387 | data->critical_sequence = max_sequence; |
Ingo Molnar | 750ed1a | 2008-05-12 21:20:46 +0200 | [diff] [blame] | 388 | data->preempt_timestamp = ftrace_now(cpu); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 389 | data->critical_start = parent_ip ? : ip; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 390 | |
| 391 | local_save_flags(flags); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 392 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 393 | __trace_function(tr, ip, parent_ip, flags, preempt_count()); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 394 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 395 | per_cpu(tracing_cpu, cpu) = 1; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 396 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 397 | atomic_dec(&data->disabled); |
| 398 | } |
| 399 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 400 | static inline void |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 401 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) |
| 402 | { |
| 403 | int cpu; |
| 404 | struct trace_array *tr = irqsoff_trace; |
| 405 | struct trace_array_cpu *data; |
| 406 | unsigned long flags; |
| 407 | |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 408 | cpu = raw_smp_processor_id(); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 409 | /* Always clear the tracing cpu on stopping the trace */ |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 410 | if (unlikely(per_cpu(tracing_cpu, cpu))) |
| 411 | per_cpu(tracing_cpu, cpu) = 0; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 412 | else |
| 413 | return; |
| 414 | |
Steven Rostedt (Red Hat) | 10246fa | 2013-07-01 15:58:24 -0400 | [diff] [blame] | 415 | if (!tracer_enabled || !tracing_is_enabled()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 416 | return; |
| 417 | |
Steven Rostedt (Red Hat) | 12883ef | 2013-03-05 09:24:35 -0500 | [diff] [blame] | 418 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 419 | |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 420 | if (unlikely(!data) || |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 421 | !data->critical_start || atomic_read(&data->disabled)) |
| 422 | return; |
| 423 | |
| 424 | atomic_inc(&data->disabled); |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 425 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 426 | local_save_flags(flags); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 427 | __trace_function(tr, ip, parent_ip, flags, preempt_count()); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 428 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 429 | data->critical_start = 0; |
| 430 | atomic_dec(&data->disabled); |
| 431 | } |
| 432 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 433 | /* start and stop critical timings used to for stoppage (in idle) */ |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 434 | void start_critical_timings(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 435 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 436 | if (preempt_trace() || irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 437 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 438 | } |
Ingo Molnar | 1fe3710 | 2008-07-26 15:09:47 +0200 | [diff] [blame] | 439 | EXPORT_SYMBOL_GPL(start_critical_timings); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 440 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 441 | void stop_critical_timings(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 442 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 443 | if (preempt_trace() || irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 444 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 445 | } |
Ingo Molnar | 1fe3710 | 2008-07-26 15:09:47 +0200 | [diff] [blame] | 446 | EXPORT_SYMBOL_GPL(stop_critical_timings); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 447 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 448 | #ifdef CONFIG_IRQSOFF_TRACER |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 449 | #ifdef CONFIG_PROVE_LOCKING |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 450 | void time_hardirqs_on(unsigned long a0, unsigned long a1) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 451 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 452 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 453 | stop_critical_timing(a0, a1); |
| 454 | } |
| 455 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 456 | void time_hardirqs_off(unsigned long a0, unsigned long a1) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 457 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 458 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 459 | start_critical_timing(a0, a1); |
| 460 | } |
| 461 | |
| 462 | #else /* !CONFIG_PROVE_LOCKING */ |
| 463 | |
Pavankumar Kondeti | 0d2621b | 2018-03-26 15:56:26 +0530 | [diff] [blame] | 464 | #ifdef CONFIG_PREEMPTIRQ_EVENTS |
| 465 | struct irqsoff_store { |
| 466 | u64 ts; |
| 467 | unsigned long caddr[4]; |
| 468 | }; |
| 469 | |
| 470 | static DEFINE_PER_CPU(struct irqsoff_store, the_irqsoff); |
| 471 | #endif /* CONFIG_PREEMPTIRQ_EVENTS */ |
| 472 | |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 473 | /* |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 474 | * We are only interested in hardirq on/off events: |
| 475 | */ |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 476 | static inline void tracer_hardirqs_on(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 477 | { |
Pavankumar Kondeti | 0d2621b | 2018-03-26 15:56:26 +0530 | [diff] [blame] | 478 | #ifdef CONFIG_PREEMPTIRQ_EVENTS |
| 479 | struct irqsoff_store *is = &per_cpu(the_irqsoff, |
| 480 | raw_smp_processor_id()); |
Lingutla Chandrasekhar | 505be8e | 2018-05-08 15:25:06 +0530 | [diff] [blame] | 481 | u64 delta = sched_clock() - is->ts; |
Pavankumar Kondeti | 0d2621b | 2018-03-26 15:56:26 +0530 | [diff] [blame] | 482 | |
Lingutla Chandrasekhar | 505be8e | 2018-05-08 15:25:06 +0530 | [diff] [blame] | 483 | if (delta > sysctl_irqsoff_tracing_threshold_ns) |
| 484 | trace_irqs_disable(delta, is->caddr[0], is->caddr[1], |
| 485 | is->caddr[2], is->caddr[3]); |
Pavankumar Kondeti | 0d2621b | 2018-03-26 15:56:26 +0530 | [diff] [blame] | 486 | #endif /* CONFIG_PREEMPTIRQ_EVENTS */ |
| 487 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 488 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 489 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 490 | } |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 491 | |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 492 | static inline void tracer_hardirqs_off(void) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 493 | { |
Pavankumar Kondeti | 0d2621b | 2018-03-26 15:56:26 +0530 | [diff] [blame] | 494 | #ifdef CONFIG_PREEMPTIRQ_EVENTS |
| 495 | struct irqsoff_store *is = &per_cpu(the_irqsoff, |
| 496 | raw_smp_processor_id()); |
Pavankumar Kondeti | 0d2621b | 2018-03-26 15:56:26 +0530 | [diff] [blame] | 497 | |
Lingutla Chandrasekhar | 505be8e | 2018-05-08 15:25:06 +0530 | [diff] [blame] | 498 | is->ts = sched_clock(); |
| 499 | is->caddr[0] = CALLER_ADDR0; |
| 500 | is->caddr[1] = CALLER_ADDR1; |
| 501 | is->caddr[2] = CALLER_ADDR2; |
| 502 | is->caddr[3] = CALLER_ADDR3; |
Pavankumar Kondeti | 0d2621b | 2018-03-26 15:56:26 +0530 | [diff] [blame] | 503 | #endif /* CONFIG_PREEMPTIRQ_EVENTS */ |
| 504 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 505 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 506 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
| 507 | } |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 508 | |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 509 | static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 510 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 511 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 512 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
| 513 | } |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 514 | |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 515 | static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 516 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 517 | if (!preempt_trace() && irq_trace()) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 518 | start_critical_timing(CALLER_ADDR0, caller_addr); |
| 519 | } |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 520 | |
| 521 | #endif /* CONFIG_PROVE_LOCKING */ |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 522 | #endif /* CONFIG_IRQSOFF_TRACER */ |
| 523 | |
| 524 | #ifdef CONFIG_PREEMPT_TRACER |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 525 | static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 526 | { |
Steven Rostedt | e36de1d | 2011-09-22 11:11:51 -0400 | [diff] [blame] | 527 | if (preempt_trace() && !irq_trace()) |
Steven Rostedt | 1e01cb0 | 2008-07-15 09:53:37 -0400 | [diff] [blame] | 528 | stop_critical_timing(a0, a1); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 529 | } |
| 530 | |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 531 | static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 532 | { |
Steven Rostedt | e36de1d | 2011-09-22 11:11:51 -0400 | [diff] [blame] | 533 | if (preempt_trace() && !irq_trace()) |
Steven Rostedt | 1e01cb0 | 2008-07-15 09:53:37 -0400 | [diff] [blame] | 534 | start_critical_timing(a0, a1); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 535 | } |
| 536 | #endif /* CONFIG_PREEMPT_TRACER */ |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 537 | |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 538 | #ifdef CONFIG_FUNCTION_TRACER |
| 539 | static bool function_enabled; |
| 540 | |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 541 | static int register_irqsoff_function(struct trace_array *tr, int graph, int set) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 542 | { |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 543 | int ret; |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 544 | |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 545 | /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 546 | if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 547 | return 0; |
| 548 | |
| 549 | if (graph) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 550 | ret = register_ftrace_graph(&irqsoff_graph_return, |
| 551 | &irqsoff_graph_entry); |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 552 | else |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 553 | ret = register_ftrace_function(tr->ops); |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 554 | |
| 555 | if (!ret) |
| 556 | function_enabled = true; |
| 557 | |
| 558 | return ret; |
| 559 | } |
| 560 | |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 561 | static void unregister_irqsoff_function(struct trace_array *tr, int graph) |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 562 | { |
| 563 | if (!function_enabled) |
| 564 | return; |
| 565 | |
| 566 | if (graph) |
| 567 | unregister_ftrace_graph(); |
| 568 | else |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 569 | unregister_ftrace_function(tr->ops); |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 570 | |
| 571 | function_enabled = false; |
| 572 | } |
| 573 | |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 574 | static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 575 | { |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 576 | if (!(mask & TRACE_ITER_FUNCTION)) |
| 577 | return 0; |
| 578 | |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 579 | if (set) |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 580 | register_irqsoff_function(tr, is_graph(tr), 1); |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 581 | else |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 582 | unregister_irqsoff_function(tr, is_graph(tr)); |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 583 | return 1; |
| 584 | } |
| 585 | #else |
| 586 | static int register_irqsoff_function(struct trace_array *tr, int graph, int set) |
| 587 | { |
Steven Rostedt (Red Hat) | 0390558 | 2015-09-28 15:37:49 -0400 | [diff] [blame] | 588 | return 0; |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 589 | } |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 590 | static void unregister_irqsoff_function(struct trace_array *tr, int graph) { } |
| 591 | static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set) |
| 592 | { |
| 593 | return 0; |
| 594 | } |
| 595 | #endif /* CONFIG_FUNCTION_TRACER */ |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 596 | |
Steven Rostedt (Red Hat) | bf6065b | 2014-01-10 17:51:01 -0500 | [diff] [blame] | 597 | static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 598 | { |
Steven Rostedt (Red Hat) | bf6065b | 2014-01-10 17:51:01 -0500 | [diff] [blame] | 599 | struct tracer *tracer = tr->current_trace; |
| 600 | |
Steven Rostedt (Red Hat) | 8179e8a | 2015-09-29 10:24:56 -0400 | [diff] [blame] | 601 | if (irqsoff_function_set(tr, mask, set)) |
| 602 | return 0; |
Steven Rostedt (Red Hat) | 0390558 | 2015-09-28 15:37:49 -0400 | [diff] [blame] | 603 | |
Steven Rostedt (Red Hat) | 729358d | 2015-09-29 10:15:10 -0400 | [diff] [blame] | 604 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
Steven Rostedt (Red Hat) | 0390558 | 2015-09-28 15:37:49 -0400 | [diff] [blame] | 605 | if (mask & TRACE_ITER_DISPLAY_GRAPH) |
| 606 | return irqsoff_display_graph(tr, set); |
Steven Rostedt (Red Hat) | 729358d | 2015-09-29 10:15:10 -0400 | [diff] [blame] | 607 | #endif |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 608 | |
| 609 | return trace_keep_overwrite(tracer, mask, set); |
| 610 | } |
| 611 | |
| 612 | static int start_irqsoff_tracer(struct trace_array *tr, int graph) |
| 613 | { |
| 614 | int ret; |
| 615 | |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 616 | ret = register_irqsoff_function(tr, graph, 0); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 617 | |
| 618 | if (!ret && tracing_is_enabled()) |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 619 | tracer_enabled = 1; |
Steven Rostedt | 94523e8 | 2009-01-22 11:18:06 -0500 | [diff] [blame] | 620 | else |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 621 | tracer_enabled = 0; |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 622 | |
| 623 | return ret; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 624 | } |
| 625 | |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 626 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 627 | { |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 628 | tracer_enabled = 0; |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 629 | |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 630 | unregister_irqsoff_function(tr, graph); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 631 | } |
| 632 | |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 633 | static bool irqsoff_busy; |
| 634 | |
| 635 | static int __irqsoff_tracer_init(struct trace_array *tr) |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 636 | { |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 637 | if (irqsoff_busy) |
| 638 | return -EBUSY; |
| 639 | |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 640 | save_flags = tr->trace_flags; |
Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 641 | |
| 642 | /* non overwrite screws up the latency tracers */ |
Steven Rostedt | 2b6080f | 2012-05-11 13:29:49 -0400 | [diff] [blame] | 643 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); |
| 644 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); |
Steven Rostedt | e9d25fe | 2009-03-04 22:15:30 -0500 | [diff] [blame] | 645 | |
Steven Rostedt (Red Hat) | 6d9b3fa | 2014-01-14 11:28:38 -0500 | [diff] [blame] | 646 | tr->max_latency = 0; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 647 | irqsoff_trace = tr; |
Steven Rostedt | c5f888c | 2008-05-12 21:20:55 +0200 | [diff] [blame] | 648 | /* make sure that the tracer is visible */ |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 649 | smp_wmb(); |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 650 | |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 651 | ftrace_init_array_ops(tr, irqsoff_tracer_call); |
| 652 | |
| 653 | /* Only toplevel instance supports graph tracing */ |
| 654 | if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL && |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 655 | is_graph(tr)))) |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 656 | printk(KERN_ERR "failed to start irqsoff tracer\n"); |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 657 | |
| 658 | irqsoff_busy = true; |
| 659 | return 0; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 660 | } |
| 661 | |
| 662 | static void irqsoff_tracer_reset(struct trace_array *tr) |
| 663 | { |
Steven Rostedt (Red Hat) | 613f04a | 2013-03-14 15:03:53 -0400 | [diff] [blame] | 664 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; |
| 665 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; |
| 666 | |
Steven Rostedt (Red Hat) | 983f938 | 2015-09-30 09:42:05 -0400 | [diff] [blame] | 667 | stop_irqsoff_tracer(tr, is_graph(tr)); |
Steven Rostedt | e9d25fe | 2009-03-04 22:15:30 -0500 | [diff] [blame] | 668 | |
Steven Rostedt | 2b6080f | 2012-05-11 13:29:49 -0400 | [diff] [blame] | 669 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); |
| 670 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); |
Steven Rostedt (Red Hat) | 4104d32 | 2014-01-10 17:01:58 -0500 | [diff] [blame] | 671 | ftrace_reset_array_ops(tr); |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 672 | |
| 673 | irqsoff_busy = false; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 674 | } |
| 675 | |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 676 | static void irqsoff_tracer_start(struct trace_array *tr) |
| 677 | { |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 678 | tracer_enabled = 1; |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 679 | } |
| 680 | |
| 681 | static void irqsoff_tracer_stop(struct trace_array *tr) |
| 682 | { |
| 683 | tracer_enabled = 0; |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 684 | } |
| 685 | |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 686 | #ifdef CONFIG_IRQSOFF_TRACER |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 687 | static int irqsoff_tracer_init(struct trace_array *tr) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 688 | { |
| 689 | trace_type = TRACER_IRQS_OFF; |
| 690 | |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 691 | return __irqsoff_tracer_init(tr); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 692 | } |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 693 | static struct tracer irqsoff_tracer __read_mostly = |
| 694 | { |
| 695 | .name = "irqsoff", |
| 696 | .init = irqsoff_tracer_init, |
| 697 | .reset = irqsoff_tracer_reset, |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 698 | .start = irqsoff_tracer_start, |
| 699 | .stop = irqsoff_tracer_stop, |
Hiraku Toyooka | f43c738 | 2012-10-02 17:27:10 +0900 | [diff] [blame] | 700 | .print_max = true, |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 701 | .print_header = irqsoff_print_header, |
| 702 | .print_line = irqsoff_print_line, |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 703 | .flag_changed = irqsoff_flag_changed, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 704 | #ifdef CONFIG_FTRACE_SELFTEST |
| 705 | .selftest = trace_selftest_startup_irqsoff, |
| 706 | #endif |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 707 | .open = irqsoff_trace_open, |
| 708 | .close = irqsoff_trace_close, |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 709 | .allow_instances = true, |
Hiraku Toyooka | f43c738 | 2012-10-02 17:27:10 +0900 | [diff] [blame] | 710 | .use_max_tr = true, |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 711 | }; |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 712 | # define register_irqsoff(trace) register_tracer(&trace) |
| 713 | #else |
| 714 | # define register_irqsoff(trace) do { } while (0) |
| 715 | #endif |
| 716 | |
| 717 | #ifdef CONFIG_PREEMPT_TRACER |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 718 | static int preemptoff_tracer_init(struct trace_array *tr) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 719 | { |
| 720 | trace_type = TRACER_PREEMPT_OFF; |
| 721 | |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 722 | return __irqsoff_tracer_init(tr); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 723 | } |
| 724 | |
| 725 | static struct tracer preemptoff_tracer __read_mostly = |
| 726 | { |
| 727 | .name = "preemptoff", |
| 728 | .init = preemptoff_tracer_init, |
| 729 | .reset = irqsoff_tracer_reset, |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 730 | .start = irqsoff_tracer_start, |
| 731 | .stop = irqsoff_tracer_stop, |
Hiraku Toyooka | f43c738 | 2012-10-02 17:27:10 +0900 | [diff] [blame] | 732 | .print_max = true, |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 733 | .print_header = irqsoff_print_header, |
| 734 | .print_line = irqsoff_print_line, |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 735 | .flag_changed = irqsoff_flag_changed, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 736 | #ifdef CONFIG_FTRACE_SELFTEST |
| 737 | .selftest = trace_selftest_startup_preemptoff, |
| 738 | #endif |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 739 | .open = irqsoff_trace_open, |
| 740 | .close = irqsoff_trace_close, |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 741 | .allow_instances = true, |
Hiraku Toyooka | f43c738 | 2012-10-02 17:27:10 +0900 | [diff] [blame] | 742 | .use_max_tr = true, |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 743 | }; |
| 744 | # define register_preemptoff(trace) register_tracer(&trace) |
| 745 | #else |
| 746 | # define register_preemptoff(trace) do { } while (0) |
| 747 | #endif |
| 748 | |
| 749 | #if defined(CONFIG_IRQSOFF_TRACER) && \ |
| 750 | defined(CONFIG_PREEMPT_TRACER) |
| 751 | |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 752 | static int preemptirqsoff_tracer_init(struct trace_array *tr) |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 753 | { |
| 754 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; |
| 755 | |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 756 | return __irqsoff_tracer_init(tr); |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 757 | } |
| 758 | |
| 759 | static struct tracer preemptirqsoff_tracer __read_mostly = |
| 760 | { |
| 761 | .name = "preemptirqsoff", |
| 762 | .init = preemptirqsoff_tracer_init, |
| 763 | .reset = irqsoff_tracer_reset, |
Steven Rostedt | 9036990 | 2008-11-05 16:05:44 -0500 | [diff] [blame] | 764 | .start = irqsoff_tracer_start, |
| 765 | .stop = irqsoff_tracer_stop, |
Hiraku Toyooka | f43c738 | 2012-10-02 17:27:10 +0900 | [diff] [blame] | 766 | .print_max = true, |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 767 | .print_header = irqsoff_print_header, |
| 768 | .print_line = irqsoff_print_line, |
Steven Rostedt (Red Hat) | 328df47 | 2013-03-14 12:10:40 -0400 | [diff] [blame] | 769 | .flag_changed = irqsoff_flag_changed, |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 770 | #ifdef CONFIG_FTRACE_SELFTEST |
| 771 | .selftest = trace_selftest_startup_preemptirqsoff, |
| 772 | #endif |
Jiri Olsa | 62b915f | 2010-04-02 19:01:22 +0200 | [diff] [blame] | 773 | .open = irqsoff_trace_open, |
| 774 | .close = irqsoff_trace_close, |
Steven Rostedt (Red Hat) | 02f2f76 | 2014-01-14 09:32:58 -0500 | [diff] [blame] | 775 | .allow_instances = true, |
Hiraku Toyooka | f43c738 | 2012-10-02 17:27:10 +0900 | [diff] [blame] | 776 | .use_max_tr = true, |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 777 | }; |
| 778 | |
| 779 | # define register_preemptirqsoff(trace) register_tracer(&trace) |
| 780 | #else |
| 781 | # define register_preemptirqsoff(trace) do { } while (0) |
| 782 | #endif |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 783 | |
| 784 | __init static int init_irqsoff_tracer(void) |
| 785 | { |
Steven Rostedt | 6cd8a4b | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 786 | register_irqsoff(irqsoff_tracer); |
| 787 | register_preemptoff(preemptoff_tracer); |
| 788 | register_preemptirqsoff(preemptirqsoff_tracer); |
Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 789 | |
| 790 | return 0; |
| 791 | } |
Steven Rostedt | 6f41567 | 2012-10-05 12:13:07 -0400 | [diff] [blame] | 792 | core_initcall(init_irqsoff_tracer); |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 793 | #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */ |
| 794 | |
| 795 | #ifndef CONFIG_IRQSOFF_TRACER |
| 796 | static inline void tracer_hardirqs_on(void) { } |
| 797 | static inline void tracer_hardirqs_off(void) { } |
| 798 | static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { } |
| 799 | static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { } |
| 800 | #endif |
| 801 | |
| 802 | #ifndef CONFIG_PREEMPT_TRACER |
| 803 | static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } |
| 804 | static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } |
| 805 | #endif |
| 806 | |
Lingutla Chandrasekhar | 56384aa | 2018-07-20 11:05:35 +0530 | [diff] [blame] | 807 | #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING) |
Joel Fernandes | 2b3a26c | 2017-10-05 17:54:32 -0700 | [diff] [blame] | 808 | /* Per-cpu variable to prevent redundant calls when IRQs already off */ |
| 809 | static DEFINE_PER_CPU(int, tracing_irq_cpu); |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 810 | void trace_hardirqs_on(void) |
| 811 | { |
Joel Fernandes | 2b3a26c | 2017-10-05 17:54:32 -0700 | [diff] [blame] | 812 | if (!this_cpu_read(tracing_irq_cpu)) |
| 813 | return; |
| 814 | |
| 815 | trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 816 | tracer_hardirqs_on(); |
Joel Fernandes | 2b3a26c | 2017-10-05 17:54:32 -0700 | [diff] [blame] | 817 | |
| 818 | this_cpu_write(tracing_irq_cpu, 0); |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 819 | } |
| 820 | EXPORT_SYMBOL(trace_hardirqs_on); |
| 821 | |
| 822 | void trace_hardirqs_off(void) |
| 823 | { |
Joel Fernandes | 2b3a26c | 2017-10-05 17:54:32 -0700 | [diff] [blame] | 824 | if (this_cpu_read(tracing_irq_cpu)) |
| 825 | return; |
| 826 | |
| 827 | this_cpu_write(tracing_irq_cpu, 1); |
| 828 | |
| 829 | trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 830 | tracer_hardirqs_off(); |
| 831 | } |
| 832 | EXPORT_SYMBOL(trace_hardirqs_off); |
| 833 | |
| 834 | __visible void trace_hardirqs_on_caller(unsigned long caller_addr) |
| 835 | { |
Joel Fernandes | 2b3a26c | 2017-10-05 17:54:32 -0700 | [diff] [blame] | 836 | if (!this_cpu_read(tracing_irq_cpu)) |
| 837 | return; |
| 838 | |
| 839 | trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr); |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 840 | tracer_hardirqs_on_caller(caller_addr); |
Joel Fernandes | 2b3a26c | 2017-10-05 17:54:32 -0700 | [diff] [blame] | 841 | |
| 842 | this_cpu_write(tracing_irq_cpu, 0); |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 843 | } |
| 844 | EXPORT_SYMBOL(trace_hardirqs_on_caller); |
| 845 | |
| 846 | __visible void trace_hardirqs_off_caller(unsigned long caller_addr) |
| 847 | { |
Joel Fernandes | 2b3a26c | 2017-10-05 17:54:32 -0700 | [diff] [blame] | 848 | if (this_cpu_read(tracing_irq_cpu)) |
| 849 | return; |
| 850 | |
| 851 | this_cpu_write(tracing_irq_cpu, 1); |
| 852 | |
| 853 | trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 854 | tracer_hardirqs_off_caller(caller_addr); |
| 855 | } |
| 856 | EXPORT_SYMBOL(trace_hardirqs_off_caller); |
| 857 | |
| 858 | /* |
| 859 | * Stubs: |
| 860 | */ |
| 861 | |
| 862 | void trace_softirqs_on(unsigned long ip) |
| 863 | { |
| 864 | } |
| 865 | |
| 866 | void trace_softirqs_off(unsigned long ip) |
| 867 | { |
| 868 | } |
| 869 | |
| 870 | inline void print_irqtrace_events(struct task_struct *curr) |
| 871 | { |
| 872 | } |
| 873 | #endif |
| 874 | |
Joel Fernandes | 2b3a26c | 2017-10-05 17:54:32 -0700 | [diff] [blame] | 875 | #if defined(CONFIG_PREEMPT_TRACER) || \ |
| 876 | (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS)) |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 877 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
| 878 | { |
Joel Fernandes | 2b3a26c | 2017-10-05 17:54:32 -0700 | [diff] [blame] | 879 | trace_preempt_enable_rcuidle(a0, a1); |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 880 | tracer_preempt_on(a0, a1); |
| 881 | } |
| 882 | |
| 883 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
| 884 | { |
Joel Fernandes | 2b3a26c | 2017-10-05 17:54:32 -0700 | [diff] [blame] | 885 | trace_preempt_disable_rcuidle(a0, a1); |
Joel Fernandes | a7e410c135 | 2017-10-05 17:54:31 -0700 | [diff] [blame] | 886 | tracer_preempt_off(a0, a1); |
| 887 | } |
| 888 | #endif |