blob: de63dab4119378bd475b7141d8c20f845b2c6fa3 [file] [log] [blame]
Steven Rostedt81d68a92008-05-12 21:20:42 +02001/*
Wenji Huang73d8b8b2009-02-17 01:10:02 -05002 * trace irqs off critical timings
Steven Rostedt81d68a92008-05-12 21:20:42 +02003 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010010 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedt81d68a92008-05-12 21:20:42 +020011 */
12#include <linux/kallsyms.h>
Steven Rostedt81d68a92008-05-12 21:20:42 +020013#include <linux/uaccess.h>
14#include <linux/module.h>
15#include <linux/ftrace.h>
Pavankumar Kondeti0d2621b2018-03-26 15:56:26 +053016#include <linux/sched/sysctl.h>
Steven Rostedt81d68a92008-05-12 21:20:42 +020017
18#include "trace.h"
19
Joel Fernandes2b3a26c2017-10-05 17:54:32 -070020#define CREATE_TRACE_POINTS
21#include <trace/events/preemptirq.h>
22
Joel Fernandesa7e410c1352017-10-05 17:54:31 -070023#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
Steven Rostedt81d68a92008-05-12 21:20:42 +020024static struct trace_array *irqsoff_trace __read_mostly;
25static int tracer_enabled __read_mostly;
26
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020027static DEFINE_PER_CPU(int, tracing_cpu);
28
Thomas Gleixner5389f6f2009-07-25 17:13:33 +020029static DEFINE_RAW_SPINLOCK(max_trace_lock);
Steven Rostedt89b2f972008-05-12 21:20:44 +020030
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020031enum {
32 TRACER_IRQS_OFF = (1 << 1),
33 TRACER_PREEMPT_OFF = (1 << 2),
34};
35
36static int trace_type __read_mostly;
37
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -040038static int save_flags;
Steven Rostedte9d25fe2009-03-04 22:15:30 -050039
Jiri Olsa62b915f2010-04-02 19:01:22 +020040static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
41static int start_irqsoff_tracer(struct trace_array *tr, int graph);
42
Pavankumar Kondeti0d2621b2018-03-26 15:56:26 +053043/*
44 * irqsoff stack tracing threshold in ns.
45 * default: 1ms
46 */
47unsigned int sysctl_irqsoff_tracing_threshold_ns = 1000000UL;
48
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020049#ifdef CONFIG_PREEMPT_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +020050static inline int
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020051preempt_trace(void)
52{
53 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
54}
55#else
56# define preempt_trace() (0)
57#endif
58
59#ifdef CONFIG_IRQSOFF_TRACER
Ingo Molnare309b412008-05-12 21:20:51 +020060static inline int
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +020061irq_trace(void)
62{
63 return ((trace_type & TRACER_IRQS_OFF) &&
64 irqs_disabled());
65}
66#else
67# define irq_trace() (0)
68#endif
69
Jiri Olsa62b915f2010-04-02 19:01:22 +020070#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -040071static int irqsoff_display_graph(struct trace_array *tr, int set);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -040072# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -040073#else
74static inline int irqsoff_display_graph(struct trace_array *tr, int set)
75{
76 return -EINVAL;
77}
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -040078# define is_graph(tr) false
Jiri Olsa62b915f2010-04-02 19:01:22 +020079#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +020080
Steven Rostedt81d68a92008-05-12 21:20:42 +020081/*
82 * Sequence count - we record it when starting a measurement and
83 * skip the latency if the sequence has changed - some other section
84 * did a maximum and could disturb our measurement with serial console
85 * printouts, etc. Truly coinciding maximum latencies should be rare
Lucas De Marchi25985ed2011-03-30 22:57:33 -030086 * and what happens together happens separately as well, so this doesn't
Steven Rostedt81d68a92008-05-12 21:20:42 +020087 * decrease the validity of the maximum found:
88 */
89static __cacheline_aligned_in_smp unsigned long max_sequence;
90
Steven Rostedt606576c2008-10-06 19:06:12 -040091#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt81d68a92008-05-12 21:20:42 +020092/*
Steven Rostedt5e6d2b92010-10-05 19:41:43 -040093 * Prologue for the preempt and irqs off function tracers.
94 *
95 * Returns 1 if it is OK to continue, and data->disabled is
96 * incremented.
97 * 0 if the trace is to be ignored, and data->disabled
98 * is kept the same.
99 *
100 * Note, this function is also used outside this ifdef but
101 * inside the #ifdef of the function graph tracer below.
102 * This is OK, since the function graph tracer is
103 * dependent on the function tracer.
Steven Rostedt81d68a92008-05-12 21:20:42 +0200104 */
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400105static int func_prolog_dec(struct trace_array *tr,
106 struct trace_array_cpu **data,
107 unsigned long *flags)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200108{
Steven Rostedt81d68a92008-05-12 21:20:42 +0200109 long disabled;
110 int cpu;
111
Steven Rostedt361943a2008-05-12 21:20:44 +0200112 /*
113 * Does not matter if we preempt. We test the flags
114 * afterward, to see if irqs are disabled or not.
115 * If we preempt and get a false positive, the flags
116 * test will fail.
117 */
118 cpu = raw_smp_processor_id();
119 if (likely(!per_cpu(tracing_cpu, cpu)))
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400120 return 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200121
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400122 local_save_flags(*flags);
Steven Rostedt (Red Hat)cb86e052016-03-18 12:27:43 -0400123 /*
124 * Slight chance to get a false positive on tracing_cpu,
125 * although I'm starting to think there isn't a chance.
126 * Leave this for now just to be paranoid.
127 */
128 if (!irqs_disabled_flags(*flags) && !preempt_count())
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400129 return 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200130
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500131 *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400132 disabled = atomic_inc_return(&(*data)->disabled);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200133
134 if (likely(disabled == 1))
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400135 return 1;
136
137 atomic_dec(&(*data)->disabled);
138
139 return 0;
140}
141
142/*
143 * irqsoff uses its own tracer function to keep the overhead down:
144 */
145static void
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -0400146irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
Steven Rostedta1e2e312011-08-09 12:50:46 -0400147 struct ftrace_ops *op, struct pt_regs *pt_regs)
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400148{
149 struct trace_array *tr = irqsoff_trace;
150 struct trace_array_cpu *data;
151 unsigned long flags;
152
153 if (!func_prolog_dec(tr, &data, &flags))
154 return;
155
156 trace_function(tr, ip, parent_ip, flags, preempt_count());
Steven Rostedt81d68a92008-05-12 21:20:42 +0200157
158 atomic_dec(&data->disabled);
159}
Steven Rostedt606576c2008-10-06 19:06:12 -0400160#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200161
Jiri Olsa62b915f2010-04-02 19:01:22 +0200162#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400163static int irqsoff_display_graph(struct trace_array *tr, int set)
Jiri Olsa62b915f2010-04-02 19:01:22 +0200164{
165 int cpu;
166
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400167 if (!(is_graph(tr) ^ set))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200168 return 0;
169
170 stop_irqsoff_tracer(irqsoff_trace, !set);
171
172 for_each_possible_cpu(cpu)
173 per_cpu(tracing_cpu, cpu) = 0;
174
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500175 tr->max_latency = 0;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500176 tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200177
178 return start_irqsoff_tracer(irqsoff_trace, set);
179}
180
181static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
182{
183 struct trace_array *tr = irqsoff_trace;
184 struct trace_array_cpu *data;
185 unsigned long flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200186 int ret;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200187 int pc;
188
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400189 if (!func_prolog_dec(tr, &data, &flags))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200190 return 0;
191
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400192 pc = preempt_count();
193 ret = __trace_graph_entry(tr, trace, flags, pc);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200194 atomic_dec(&data->disabled);
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400195
Jiri Olsa62b915f2010-04-02 19:01:22 +0200196 return ret;
197}
198
199static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
200{
201 struct trace_array *tr = irqsoff_trace;
202 struct trace_array_cpu *data;
203 unsigned long flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200204 int pc;
205
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400206 if (!func_prolog_dec(tr, &data, &flags))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200207 return;
208
Steven Rostedt5e6d2b92010-10-05 19:41:43 -0400209 pc = preempt_count();
210 __trace_graph_return(tr, trace, flags, pc);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200211 atomic_dec(&data->disabled);
212}
213
214static void irqsoff_trace_open(struct trace_iterator *iter)
215{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400216 if (is_graph(iter->tr))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200217 graph_trace_open(iter);
218
219}
220
221static void irqsoff_trace_close(struct trace_iterator *iter)
222{
223 if (iter->private)
224 graph_trace_close(iter);
225}
226
227#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
Jiri Olsa321e68b2011-06-03 16:58:47 +0200228 TRACE_GRAPH_PRINT_PROC | \
229 TRACE_GRAPH_PRINT_ABS_TIME | \
230 TRACE_GRAPH_PRINT_DURATION)
Jiri Olsa62b915f2010-04-02 19:01:22 +0200231
232static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
233{
Jiri Olsa62b915f2010-04-02 19:01:22 +0200234 /*
235 * In graph mode call the graph tracer output function,
236 * otherwise go with the TRACE_FN event handler
237 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400238 if (is_graph(iter->tr))
Jiri Olsa0a772622010-09-23 14:00:52 +0200239 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200240
241 return TRACE_TYPE_UNHANDLED;
242}
243
244static void irqsoff_print_header(struct seq_file *s)
245{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400246 struct trace_array *tr = irqsoff_trace;
247
248 if (is_graph(tr))
Jiri Olsa0a772622010-09-23 14:00:52 +0200249 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
250 else
Jiri Olsa62b915f2010-04-02 19:01:22 +0200251 trace_default_header(s);
252}
253
254static void
Jiri Olsa62b915f2010-04-02 19:01:22 +0200255__trace_function(struct trace_array *tr,
256 unsigned long ip, unsigned long parent_ip,
257 unsigned long flags, int pc)
258{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400259 if (is_graph(tr))
Jiri Olsa0a772622010-09-23 14:00:52 +0200260 trace_graph_function(tr, ip, parent_ip, flags, pc);
261 else
Jiri Olsa62b915f2010-04-02 19:01:22 +0200262 trace_function(tr, ip, parent_ip, flags, pc);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200263}
264
265#else
266#define __trace_function trace_function
267
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400268#ifdef CONFIG_FUNCTION_TRACER
Jiri Olsa62b915f2010-04-02 19:01:22 +0200269static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
270{
271 return -1;
272}
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400273#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200274
275static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
276{
277 return TRACE_TYPE_UNHANDLED;
278}
279
Jiri Olsa62b915f2010-04-02 19:01:22 +0200280static void irqsoff_trace_open(struct trace_iterator *iter) { }
281static void irqsoff_trace_close(struct trace_iterator *iter) { }
Jiri Olsa7e9a49e2011-11-07 16:08:49 +0100282
283#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400284static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
Jiri Olsa7e9a49e2011-11-07 16:08:49 +0100285static void irqsoff_print_header(struct seq_file *s)
286{
287 trace_default_header(s);
288}
289#else
290static void irqsoff_print_header(struct seq_file *s)
291{
292 trace_latency_header(s);
293}
294#endif /* CONFIG_FUNCTION_TRACER */
Jiri Olsa62b915f2010-04-02 19:01:22 +0200295#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
296
Steven Rostedt81d68a92008-05-12 21:20:42 +0200297/*
298 * Should this new latency be reported/recorded?
299 */
Yaowei Bai79851822015-09-29 22:43:30 +0800300static bool report_latency(struct trace_array *tr, cycle_t delta)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200301{
302 if (tracing_thresh) {
303 if (delta < tracing_thresh)
Yaowei Bai79851822015-09-29 22:43:30 +0800304 return false;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200305 } else {
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500306 if (delta <= tr->max_latency)
Yaowei Bai79851822015-09-29 22:43:30 +0800307 return false;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200308 }
Yaowei Bai79851822015-09-29 22:43:30 +0800309 return true;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200310}
311
Ingo Molnare309b412008-05-12 21:20:51 +0200312static void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200313check_critical_timing(struct trace_array *tr,
314 struct trace_array_cpu *data,
315 unsigned long parent_ip,
316 int cpu)
317{
Steven Rostedt89b2f972008-05-12 21:20:44 +0200318 cycle_t T0, T1, delta;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200319 unsigned long flags;
Steven Rostedt38697052008-10-01 13:14:09 -0400320 int pc;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200321
Steven Rostedt81d68a92008-05-12 21:20:42 +0200322 T0 = data->preempt_timestamp;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200323 T1 = ftrace_now(cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200324 delta = T1-T0;
325
326 local_save_flags(flags);
327
Steven Rostedt6450c1d2008-10-02 19:23:04 -0400328 pc = preempt_count();
329
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500330 if (!report_latency(tr, delta))
Steven Rostedt81d68a92008-05-12 21:20:42 +0200331 goto out;
332
Thomas Gleixner5389f6f2009-07-25 17:13:33 +0200333 raw_spin_lock_irqsave(&max_trace_lock, flags);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200334
Steven Rostedt89b2f972008-05-12 21:20:44 +0200335 /* check if we are still the max latency */
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500336 if (!report_latency(tr, delta))
Steven Rostedt89b2f972008-05-12 21:20:44 +0200337 goto out_unlock;
338
Jiri Olsa62b915f2010-04-02 19:01:22 +0200339 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
Steven Rostedtcc51a0f2009-12-11 11:54:51 -0500340 /* Skip 5 functions to get to the irq/preempt enable function */
341 __trace_stack(tr, flags, 5, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200342
Steven Rostedt81d68a92008-05-12 21:20:42 +0200343 if (data->critical_sequence != max_sequence)
Steven Rostedt89b2f972008-05-12 21:20:44 +0200344 goto out_unlock;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200345
Steven Rostedt81d68a92008-05-12 21:20:42 +0200346 data->critical_end = parent_ip;
347
Carsten Emdeb5130b12009-09-13 01:43:07 +0200348 if (likely(!is_tracing_stopped())) {
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500349 tr->max_latency = delta;
Carsten Emdeb5130b12009-09-13 01:43:07 +0200350 update_max_tr_single(tr, current, cpu);
351 }
Steven Rostedt81d68a92008-05-12 21:20:42 +0200352
Steven Rostedt81d68a92008-05-12 21:20:42 +0200353 max_sequence++;
354
Steven Rostedt89b2f972008-05-12 21:20:44 +0200355out_unlock:
Thomas Gleixner5389f6f2009-07-25 17:13:33 +0200356 raw_spin_unlock_irqrestore(&max_trace_lock, flags);
Steven Rostedt89b2f972008-05-12 21:20:44 +0200357
Steven Rostedt81d68a92008-05-12 21:20:42 +0200358out:
359 data->critical_sequence = max_sequence;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200360 data->preempt_timestamp = ftrace_now(cpu);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200361 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200362}
363
Ingo Molnare309b412008-05-12 21:20:51 +0200364static inline void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200365start_critical_timing(unsigned long ip, unsigned long parent_ip)
366{
367 int cpu;
368 struct trace_array *tr = irqsoff_trace;
369 struct trace_array_cpu *data;
370 unsigned long flags;
371
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400372 if (!tracer_enabled || !tracing_is_enabled())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200373 return;
374
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200375 cpu = raw_smp_processor_id();
376
377 if (per_cpu(tracing_cpu, cpu))
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200378 return;
379
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500380 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200381
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200382 if (unlikely(!data) || atomic_read(&data->disabled))
Steven Rostedt81d68a92008-05-12 21:20:42 +0200383 return;
384
385 atomic_inc(&data->disabled);
386
387 data->critical_sequence = max_sequence;
Ingo Molnar750ed1a2008-05-12 21:20:46 +0200388 data->preempt_timestamp = ftrace_now(cpu);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200389 data->critical_start = parent_ip ? : ip;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200390
391 local_save_flags(flags);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200392
Jiri Olsa62b915f2010-04-02 19:01:22 +0200393 __trace_function(tr, ip, parent_ip, flags, preempt_count());
Steven Rostedt81d68a92008-05-12 21:20:42 +0200394
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200395 per_cpu(tracing_cpu, cpu) = 1;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200396
Steven Rostedt81d68a92008-05-12 21:20:42 +0200397 atomic_dec(&data->disabled);
398}
399
Ingo Molnare309b412008-05-12 21:20:51 +0200400static inline void
Steven Rostedt81d68a92008-05-12 21:20:42 +0200401stop_critical_timing(unsigned long ip, unsigned long parent_ip)
402{
403 int cpu;
404 struct trace_array *tr = irqsoff_trace;
405 struct trace_array_cpu *data;
406 unsigned long flags;
407
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200408 cpu = raw_smp_processor_id();
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200409 /* Always clear the tracing cpu on stopping the trace */
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200410 if (unlikely(per_cpu(tracing_cpu, cpu)))
411 per_cpu(tracing_cpu, cpu) = 0;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200412 else
413 return;
414
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400415 if (!tracer_enabled || !tracing_is_enabled())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200416 return;
417
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500418 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200419
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400420 if (unlikely(!data) ||
Steven Rostedt81d68a92008-05-12 21:20:42 +0200421 !data->critical_start || atomic_read(&data->disabled))
422 return;
423
424 atomic_inc(&data->disabled);
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200425
Steven Rostedt81d68a92008-05-12 21:20:42 +0200426 local_save_flags(flags);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200427 __trace_function(tr, ip, parent_ip, flags, preempt_count());
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200428 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200429 data->critical_start = 0;
430 atomic_dec(&data->disabled);
431}
432
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200433/* start and stop critical timings used to for stoppage (in idle) */
Ingo Molnare309b412008-05-12 21:20:51 +0200434void start_critical_timings(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200435{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200436 if (preempt_trace() || irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200437 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
438}
Ingo Molnar1fe37102008-07-26 15:09:47 +0200439EXPORT_SYMBOL_GPL(start_critical_timings);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200440
Ingo Molnare309b412008-05-12 21:20:51 +0200441void stop_critical_timings(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200442{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200443 if (preempt_trace() || irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200444 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
445}
Ingo Molnar1fe37102008-07-26 15:09:47 +0200446EXPORT_SYMBOL_GPL(stop_critical_timings);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200447
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200448#ifdef CONFIG_IRQSOFF_TRACER
Steven Rostedt81d68a92008-05-12 21:20:42 +0200449#ifdef CONFIG_PROVE_LOCKING
Ingo Molnare309b412008-05-12 21:20:51 +0200450void time_hardirqs_on(unsigned long a0, unsigned long a1)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200451{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200452 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200453 stop_critical_timing(a0, a1);
454}
455
Ingo Molnare309b412008-05-12 21:20:51 +0200456void time_hardirqs_off(unsigned long a0, unsigned long a1)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200457{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200458 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200459 start_critical_timing(a0, a1);
460}
461
462#else /* !CONFIG_PROVE_LOCKING */
463
Pavankumar Kondeti0d2621b2018-03-26 15:56:26 +0530464#ifdef CONFIG_PREEMPTIRQ_EVENTS
465struct irqsoff_store {
466 u64 ts;
467 unsigned long caddr[4];
468};
469
470static DEFINE_PER_CPU(struct irqsoff_store, the_irqsoff);
471#endif /* CONFIG_PREEMPTIRQ_EVENTS */
472
Steven Rostedt81d68a92008-05-12 21:20:42 +0200473/*
Steven Rostedt81d68a92008-05-12 21:20:42 +0200474 * We are only interested in hardirq on/off events:
475 */
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700476static inline void tracer_hardirqs_on(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200477{
Pavankumar Kondeti0d2621b2018-03-26 15:56:26 +0530478#ifdef CONFIG_PREEMPTIRQ_EVENTS
479 struct irqsoff_store *is = &per_cpu(the_irqsoff,
480 raw_smp_processor_id());
Lingutla Chandrasekhar505be8e2018-05-08 15:25:06 +0530481 u64 delta = sched_clock() - is->ts;
Pavankumar Kondeti0d2621b2018-03-26 15:56:26 +0530482
Lingutla Chandrasekhar505be8e2018-05-08 15:25:06 +0530483 if (delta > sysctl_irqsoff_tracing_threshold_ns)
484 trace_irqs_disable(delta, is->caddr[0], is->caddr[1],
485 is->caddr[2], is->caddr[3]);
Pavankumar Kondeti0d2621b2018-03-26 15:56:26 +0530486#endif /* CONFIG_PREEMPTIRQ_EVENTS */
487
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200488 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200489 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
490}
Steven Rostedt81d68a92008-05-12 21:20:42 +0200491
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700492static inline void tracer_hardirqs_off(void)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200493{
Pavankumar Kondeti0d2621b2018-03-26 15:56:26 +0530494#ifdef CONFIG_PREEMPTIRQ_EVENTS
495 struct irqsoff_store *is = &per_cpu(the_irqsoff,
496 raw_smp_processor_id());
Pavankumar Kondeti0d2621b2018-03-26 15:56:26 +0530497
Lingutla Chandrasekhar505be8e2018-05-08 15:25:06 +0530498 is->ts = sched_clock();
499 is->caddr[0] = CALLER_ADDR0;
500 is->caddr[1] = CALLER_ADDR1;
501 is->caddr[2] = CALLER_ADDR2;
502 is->caddr[3] = CALLER_ADDR3;
Pavankumar Kondeti0d2621b2018-03-26 15:56:26 +0530503#endif /* CONFIG_PREEMPTIRQ_EVENTS */
504
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200505 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200506 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
507}
Steven Rostedt81d68a92008-05-12 21:20:42 +0200508
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700509static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200510{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200511 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200512 stop_critical_timing(CALLER_ADDR0, caller_addr);
513}
Steven Rostedt81d68a92008-05-12 21:20:42 +0200514
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700515static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200516{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200517 if (!preempt_trace() && irq_trace())
Steven Rostedt81d68a92008-05-12 21:20:42 +0200518 start_critical_timing(CALLER_ADDR0, caller_addr);
519}
Steven Rostedt81d68a92008-05-12 21:20:42 +0200520
521#endif /* CONFIG_PROVE_LOCKING */
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200522#endif /* CONFIG_IRQSOFF_TRACER */
523
524#ifdef CONFIG_PREEMPT_TRACER
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700525static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200526{
Steven Rostedte36de1d2011-09-22 11:11:51 -0400527 if (preempt_trace() && !irq_trace())
Steven Rostedt1e01cb02008-07-15 09:53:37 -0400528 stop_critical_timing(a0, a1);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200529}
530
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700531static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200532{
Steven Rostedte36de1d2011-09-22 11:11:51 -0400533 if (preempt_trace() && !irq_trace())
Steven Rostedt1e01cb02008-07-15 09:53:37 -0400534 start_critical_timing(a0, a1);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200535}
536#endif /* CONFIG_PREEMPT_TRACER */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200537
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400538#ifdef CONFIG_FUNCTION_TRACER
539static bool function_enabled;
540
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500541static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200542{
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400543 int ret;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200544
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400545 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400546 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400547 return 0;
548
549 if (graph)
Jiri Olsa62b915f2010-04-02 19:01:22 +0200550 ret = register_ftrace_graph(&irqsoff_graph_return,
551 &irqsoff_graph_entry);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400552 else
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500553 ret = register_ftrace_function(tr->ops);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400554
555 if (!ret)
556 function_enabled = true;
557
558 return ret;
559}
560
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500561static void unregister_irqsoff_function(struct trace_array *tr, int graph)
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400562{
563 if (!function_enabled)
564 return;
565
566 if (graph)
567 unregister_ftrace_graph();
568 else
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500569 unregister_ftrace_function(tr->ops);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400570
571 function_enabled = false;
572}
573
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400574static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400575{
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400576 if (!(mask & TRACE_ITER_FUNCTION))
577 return 0;
578
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400579 if (set)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400580 register_irqsoff_function(tr, is_graph(tr), 1);
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400581 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400582 unregister_irqsoff_function(tr, is_graph(tr));
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400583 return 1;
584}
585#else
586static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
587{
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400588 return 0;
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400589}
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400590static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
591static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
592{
593 return 0;
594}
595#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400596
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -0500597static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400598{
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -0500599 struct tracer *tracer = tr->current_trace;
600
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400601 if (irqsoff_function_set(tr, mask, set))
602 return 0;
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400603
Steven Rostedt (Red Hat)729358d2015-09-29 10:15:10 -0400604#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt (Red Hat)03905582015-09-28 15:37:49 -0400605 if (mask & TRACE_ITER_DISPLAY_GRAPH)
606 return irqsoff_display_graph(tr, set);
Steven Rostedt (Red Hat)729358d2015-09-29 10:15:10 -0400607#endif
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400608
609 return trace_keep_overwrite(tracer, mask, set);
610}
611
612static int start_irqsoff_tracer(struct trace_array *tr, int graph)
613{
614 int ret;
615
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500616 ret = register_irqsoff_function(tr, graph, 0);
Jiri Olsa62b915f2010-04-02 19:01:22 +0200617
618 if (!ret && tracing_is_enabled())
Steven Rostedt90369902008-11-05 16:05:44 -0500619 tracer_enabled = 1;
Steven Rostedt94523e82009-01-22 11:18:06 -0500620 else
Steven Rostedt90369902008-11-05 16:05:44 -0500621 tracer_enabled = 0;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200622
623 return ret;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200624}
625
Jiri Olsa62b915f2010-04-02 19:01:22 +0200626static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200627{
Steven Rostedt81d68a92008-05-12 21:20:42 +0200628 tracer_enabled = 0;
Jiri Olsa62b915f2010-04-02 19:01:22 +0200629
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500630 unregister_irqsoff_function(tr, graph);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200631}
632
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500633static bool irqsoff_busy;
634
635static int __irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt81d68a92008-05-12 21:20:42 +0200636{
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500637 if (irqsoff_busy)
638 return -EBUSY;
639
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400640 save_flags = tr->trace_flags;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -0400641
642 /* non overwrite screws up the latency tracers */
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400643 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
644 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500645
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500646 tr->max_latency = 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200647 irqsoff_trace = tr;
Steven Rostedtc5f888c2008-05-12 21:20:55 +0200648 /* make sure that the tracer is visible */
Steven Rostedt81d68a92008-05-12 21:20:42 +0200649 smp_wmb();
Jiri Olsa62b915f2010-04-02 19:01:22 +0200650
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500651 ftrace_init_array_ops(tr, irqsoff_tracer_call);
652
653 /* Only toplevel instance supports graph tracing */
654 if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400655 is_graph(tr))))
Jiri Olsa62b915f2010-04-02 19:01:22 +0200656 printk(KERN_ERR "failed to start irqsoff tracer\n");
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500657
658 irqsoff_busy = true;
659 return 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200660}
661
662static void irqsoff_tracer_reset(struct trace_array *tr)
663{
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -0400664 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
665 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
666
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400667 stop_irqsoff_tracer(tr, is_graph(tr));
Steven Rostedte9d25fe2009-03-04 22:15:30 -0500668
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400669 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
670 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -0500671 ftrace_reset_array_ops(tr);
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500672
673 irqsoff_busy = false;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200674}
675
Steven Rostedt90369902008-11-05 16:05:44 -0500676static void irqsoff_tracer_start(struct trace_array *tr)
677{
Steven Rostedt90369902008-11-05 16:05:44 -0500678 tracer_enabled = 1;
Steven Rostedt90369902008-11-05 16:05:44 -0500679}
680
681static void irqsoff_tracer_stop(struct trace_array *tr)
682{
683 tracer_enabled = 0;
Steven Rostedt81d68a92008-05-12 21:20:42 +0200684}
685
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200686#ifdef CONFIG_IRQSOFF_TRACER
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100687static int irqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200688{
689 trace_type = TRACER_IRQS_OFF;
690
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500691 return __irqsoff_tracer_init(tr);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200692}
Steven Rostedt81d68a92008-05-12 21:20:42 +0200693static struct tracer irqsoff_tracer __read_mostly =
694{
695 .name = "irqsoff",
696 .init = irqsoff_tracer_init,
697 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500698 .start = irqsoff_tracer_start,
699 .stop = irqsoff_tracer_stop,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900700 .print_max = true,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200701 .print_header = irqsoff_print_header,
702 .print_line = irqsoff_print_line,
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400703 .flag_changed = irqsoff_flag_changed,
Steven Rostedt60a11772008-05-12 21:20:44 +0200704#ifdef CONFIG_FTRACE_SELFTEST
705 .selftest = trace_selftest_startup_irqsoff,
706#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200707 .open = irqsoff_trace_open,
708 .close = irqsoff_trace_close,
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500709 .allow_instances = true,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900710 .use_max_tr = true,
Steven Rostedt81d68a92008-05-12 21:20:42 +0200711};
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200712# define register_irqsoff(trace) register_tracer(&trace)
713#else
714# define register_irqsoff(trace) do { } while (0)
715#endif
716
717#ifdef CONFIG_PREEMPT_TRACER
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100718static int preemptoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200719{
720 trace_type = TRACER_PREEMPT_OFF;
721
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500722 return __irqsoff_tracer_init(tr);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200723}
724
725static struct tracer preemptoff_tracer __read_mostly =
726{
727 .name = "preemptoff",
728 .init = preemptoff_tracer_init,
729 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500730 .start = irqsoff_tracer_start,
731 .stop = irqsoff_tracer_stop,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900732 .print_max = true,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200733 .print_header = irqsoff_print_header,
734 .print_line = irqsoff_print_line,
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400735 .flag_changed = irqsoff_flag_changed,
Steven Rostedt60a11772008-05-12 21:20:44 +0200736#ifdef CONFIG_FTRACE_SELFTEST
737 .selftest = trace_selftest_startup_preemptoff,
738#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200739 .open = irqsoff_trace_open,
740 .close = irqsoff_trace_close,
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500741 .allow_instances = true,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900742 .use_max_tr = true,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200743};
744# define register_preemptoff(trace) register_tracer(&trace)
745#else
746# define register_preemptoff(trace) do { } while (0)
747#endif
748
749#if defined(CONFIG_IRQSOFF_TRACER) && \
750 defined(CONFIG_PREEMPT_TRACER)
751
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100752static int preemptirqsoff_tracer_init(struct trace_array *tr)
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200753{
754 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
755
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500756 return __irqsoff_tracer_init(tr);
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200757}
758
759static struct tracer preemptirqsoff_tracer __read_mostly =
760{
761 .name = "preemptirqsoff",
762 .init = preemptirqsoff_tracer_init,
763 .reset = irqsoff_tracer_reset,
Steven Rostedt90369902008-11-05 16:05:44 -0500764 .start = irqsoff_tracer_start,
765 .stop = irqsoff_tracer_stop,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900766 .print_max = true,
Jiri Olsa62b915f2010-04-02 19:01:22 +0200767 .print_header = irqsoff_print_header,
768 .print_line = irqsoff_print_line,
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400769 .flag_changed = irqsoff_flag_changed,
Steven Rostedt60a11772008-05-12 21:20:44 +0200770#ifdef CONFIG_FTRACE_SELFTEST
771 .selftest = trace_selftest_startup_preemptirqsoff,
772#endif
Jiri Olsa62b915f2010-04-02 19:01:22 +0200773 .open = irqsoff_trace_open,
774 .close = irqsoff_trace_close,
Steven Rostedt (Red Hat)02f2f762014-01-14 09:32:58 -0500775 .allow_instances = true,
Hiraku Toyookaf43c7382012-10-02 17:27:10 +0900776 .use_max_tr = true,
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200777};
778
779# define register_preemptirqsoff(trace) register_tracer(&trace)
780#else
781# define register_preemptirqsoff(trace) do { } while (0)
782#endif
Steven Rostedt81d68a92008-05-12 21:20:42 +0200783
784__init static int init_irqsoff_tracer(void)
785{
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200786 register_irqsoff(irqsoff_tracer);
787 register_preemptoff(preemptoff_tracer);
788 register_preemptirqsoff(preemptirqsoff_tracer);
Steven Rostedt81d68a92008-05-12 21:20:42 +0200789
790 return 0;
791}
Steven Rostedt6f415672012-10-05 12:13:07 -0400792core_initcall(init_irqsoff_tracer);
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700793#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
794
795#ifndef CONFIG_IRQSOFF_TRACER
796static inline void tracer_hardirqs_on(void) { }
797static inline void tracer_hardirqs_off(void) { }
798static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
799static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
800#endif
801
802#ifndef CONFIG_PREEMPT_TRACER
803static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
804static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
805#endif
806
Lingutla Chandrasekhar56384aa2018-07-20 11:05:35 +0530807#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
Joel Fernandes2b3a26c2017-10-05 17:54:32 -0700808/* Per-cpu variable to prevent redundant calls when IRQs already off */
809static DEFINE_PER_CPU(int, tracing_irq_cpu);
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700810void trace_hardirqs_on(void)
811{
Joel Fernandes2b3a26c2017-10-05 17:54:32 -0700812 if (!this_cpu_read(tracing_irq_cpu))
813 return;
814
815 trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700816 tracer_hardirqs_on();
Joel Fernandes2b3a26c2017-10-05 17:54:32 -0700817
818 this_cpu_write(tracing_irq_cpu, 0);
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700819}
820EXPORT_SYMBOL(trace_hardirqs_on);
821
822void trace_hardirqs_off(void)
823{
Joel Fernandes2b3a26c2017-10-05 17:54:32 -0700824 if (this_cpu_read(tracing_irq_cpu))
825 return;
826
827 this_cpu_write(tracing_irq_cpu, 1);
828
829 trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700830 tracer_hardirqs_off();
831}
832EXPORT_SYMBOL(trace_hardirqs_off);
833
834__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
835{
Joel Fernandes2b3a26c2017-10-05 17:54:32 -0700836 if (!this_cpu_read(tracing_irq_cpu))
837 return;
838
839 trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700840 tracer_hardirqs_on_caller(caller_addr);
Joel Fernandes2b3a26c2017-10-05 17:54:32 -0700841
842 this_cpu_write(tracing_irq_cpu, 0);
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700843}
844EXPORT_SYMBOL(trace_hardirqs_on_caller);
845
846__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
847{
Joel Fernandes2b3a26c2017-10-05 17:54:32 -0700848 if (this_cpu_read(tracing_irq_cpu))
849 return;
850
851 this_cpu_write(tracing_irq_cpu, 1);
852
853 trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700854 tracer_hardirqs_off_caller(caller_addr);
855}
856EXPORT_SYMBOL(trace_hardirqs_off_caller);
857
858/*
859 * Stubs:
860 */
861
862void trace_softirqs_on(unsigned long ip)
863{
864}
865
866void trace_softirqs_off(unsigned long ip)
867{
868}
869
870inline void print_irqtrace_events(struct task_struct *curr)
871{
872}
873#endif
874
Joel Fernandes2b3a26c2017-10-05 17:54:32 -0700875#if defined(CONFIG_PREEMPT_TRACER) || \
876 (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700877void trace_preempt_on(unsigned long a0, unsigned long a1)
878{
Joel Fernandes2b3a26c2017-10-05 17:54:32 -0700879 trace_preempt_enable_rcuidle(a0, a1);
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700880 tracer_preempt_on(a0, a1);
881}
882
883void trace_preempt_off(unsigned long a0, unsigned long a1)
884{
Joel Fernandes2b3a26c2017-10-05 17:54:32 -0700885 trace_preempt_disable_rcuidle(a0, a1);
Joel Fernandesa7e410c1352017-10-05 17:54:31 -0700886 tracer_preempt_off(a0, a1);
887}
888#endif