blob: 6833ffaa9db0abafa77dcf4b99e558d77b8fe4d0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/softirq.c
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 *
Pavel Machekb10db7f2008-01-30 13:30:00 +01006 * Distribute under GPLv2.
7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 */
10
Joe Perches40322762014-01-27 17:07:15 -080011#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Paul Gortmaker9984de12011-05-23 14:51:41 -040013#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kernel_stat.h>
15#include <linux/interrupt.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/notifier.h>
19#include <linux/percpu.h>
20#include <linux/cpu.h>
Rafael J. Wysocki83144182007-07-17 04:03:35 -070021#include <linux/freezer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kthread.h>
23#include <linux/rcupdate.h>
Steven Rostedt7e49fcc2009-01-22 19:01:40 -050024#include <linux/ftrace.h>
Andrew Morton78eef012006-03-22 00:08:16 -080025#include <linux/smp.h>
Thomas Gleixner3e339b52012-07-16 10:42:37 +000026#include <linux/smpboot.h>
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -080027#include <linux/tick.h>
Thomas Gleixnerd5326762014-03-19 11:19:52 +010028#include <linux/irq.h>
Heiko Carstensa0e39ed2009-04-29 13:51:39 +020029
30#define CREATE_TRACE_POINTS
Steven Rostedtad8d75f2009-04-14 19:39:12 -040031#include <trace/events/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Linus Torvalds1da177e2005-04-16 15:20:36 -070033/*
34 - No shared variables, all the data are CPU local.
35 - If a softirq needs serialization, let it serialize itself
36 by its own spinlocks.
37 - Even if softirq is serialized, only local cpu is marked for
38 execution. Hence, we get something sort of weak cpu binding.
39 Though it is still not clear, will it result in better locality
40 or will not.
41
42 Examples:
43 - NET RX softirq. It is multithreaded and does not require
44 any global serialization.
45 - NET TX softirq. It kicks software netdevice queues, hence
46 it is logically serialized per device, but this serialization
47 is invisible to common code.
48 - Tasklets: serialized wrt itself.
49 */
50
51#ifndef __ARCH_IRQ_STAT
52irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
53EXPORT_SYMBOL(irq_stat);
54#endif
55
Alexey Dobriyan978b0112008-09-06 20:04:36 +020056static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Venkatesh Pallipadi4dd53d82010-12-21 17:09:00 -080058DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
John Dias01072322016-10-05 15:11:40 -070060/*
61 * active_softirqs -- per cpu, a mask of softirqs that are being handled,
62 * with the expectation that approximate answers are acceptable and therefore
63 * no synchronization.
64 */
65DEFINE_PER_CPU(__u32, active_softirqs);
66
Joe Perchesce85b4f2014-01-27 17:07:16 -080067const char * const softirq_to_name[NR_SOFTIRQS] = {
Sagi Grimbergf660f602016-10-10 15:10:51 +030068 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
Shaohua Li09223372011-06-14 13:26:25 +080069 "TASKLET", "SCHED", "HRTIMER", "RCU"
Jason Baron5d592b42009-03-12 14:33:36 -040070};
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/*
73 * we cannot loop indefinitely here to avoid userspace starvation,
74 * but we also don't want to introduce a worst case 1/HZ latency
75 * to the pending events, so lets the scheduler to balance
76 * the softirq load for us.
77 */
Thomas Gleixner676cb022009-07-20 23:33:49 +020078static void wakeup_softirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
80 /* Interrupts are disabled: no need to stop preemption */
Christoph Lameter909ea962010-12-08 16:22:55 +010081 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
83 if (tsk && tsk->state != TASK_RUNNING)
84 wake_up_process(tsk);
85}
86
87/*
Eric Dumazet4cd13c22016-08-31 10:42:29 -070088 * If ksoftirqd is scheduled, we do not want to process pending softirqs
89 * right now. Let ksoftirqd handle this at its own rate, to get fairness.
90 */
91static bool ksoftirqd_running(void)
92{
93 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
94
95 return tsk && (tsk->state == TASK_RUNNING);
96}
97
98/*
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -070099 * preempt_count and SOFTIRQ_OFFSET usage:
100 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
101 * softirq processing.
102 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
103 * on local_bh_disable or local_bh_enable.
104 * This lets us distinguish between whether we are currently processing
105 * softirq and whether we just have bh disabled.
106 */
107
108/*
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700109 * This one is for softirq.c-internal use,
110 * where hardirqs are disabled legitimately:
111 */
Tim Chen3c829c32006-07-30 03:04:02 -0700112#ifdef CONFIG_TRACE_IRQFLAGS
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100113void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700114{
115 unsigned long flags;
116
117 WARN_ON_ONCE(in_irq());
118
119 raw_local_irq_save(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500120 /*
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200121 * The preempt tracer hooks into preempt_count_add and will break
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500122 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
123 * is set and before current->softirq_enabled is cleared.
124 * We must manually increment preempt_count here and manually
125 * call the trace_preempt_off later.
126 */
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200127 __preempt_count_add(cnt);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700128 /*
129 * Were softirqs turned off above:
130 */
Peter Zijlstra9ea4c382013-11-19 16:13:38 +0100131 if (softirq_count() == (cnt & SOFTIRQ_MASK))
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700132 trace_softirqs_off(ip);
133 raw_local_irq_restore(flags);
Steven Rostedt7e49fcc2009-01-22 19:01:40 -0500134
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100135 if (preempt_count() == cnt) {
136#ifdef CONFIG_DEBUG_PREEMPT
Sebastian Andrzej Siewiorf904f582016-02-26 14:54:56 +0100137 current->preempt_disable_ip = get_lock_parent_ip();
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100138#endif
Sebastian Andrzej Siewiorf904f582016-02-26 14:54:56 +0100139 trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
Heiko Carstens0f1ba9a2015-01-07 10:04:41 +0100140 }
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700141}
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100142EXPORT_SYMBOL(__local_bh_disable_ip);
Tim Chen3c829c32006-07-30 03:04:02 -0700143#endif /* CONFIG_TRACE_IRQFLAGS */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700144
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700145static void __local_bh_enable(unsigned int cnt)
146{
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700147 WARN_ON_ONCE(!irqs_disabled());
148
Peter Zijlstra9ea4c382013-11-19 16:13:38 +0100149 if (softirq_count() == (cnt & SOFTIRQ_MASK))
Davidlohr Buesod2e08472013-04-30 11:46:09 -0700150 trace_softirqs_on(_RET_IP_);
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200151 preempt_count_sub(cnt);
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700152}
153
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700154/*
155 * Special-case - softirqs can safely be enabled in
156 * cond_resched_softirq(), or by __do_softirq(),
157 * without processing still-pending softirqs:
158 */
159void _local_bh_enable(void)
160{
Frederic Weisbecker5d60d3e2013-09-24 04:11:35 +0200161 WARN_ON_ONCE(in_irq());
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700162 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700163}
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700164EXPORT_SYMBOL(_local_bh_enable);
165
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100166void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700167{
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200168 WARN_ON_ONCE(in_irq() || irqs_disabled());
Tim Chen3c829c32006-07-30 03:04:02 -0700169#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200170 local_irq_disable();
Tim Chen3c829c32006-07-30 03:04:02 -0700171#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700172 /*
173 * Are softirqs going to be turned on now:
174 */
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700175 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700176 trace_softirqs_on(ip);
177 /*
178 * Keep preemption disabled until we are done with
179 * softirq processing:
Joe Perchesce85b4f2014-01-27 17:07:16 -0800180 */
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100181 preempt_count_sub(cnt - 1);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700182
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200183 if (unlikely(!in_interrupt() && local_softirq_pending())) {
184 /*
185 * Run softirq if any pending. And do it in its own stack
186 * as we may be calling this deep in a task call stack already.
187 */
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700188 do_softirq();
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200189 }
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700190
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200191 preempt_count_dec();
Tim Chen3c829c32006-07-30 03:04:02 -0700192#ifdef CONFIG_TRACE_IRQFLAGS
Johannes Berg0f476b6d2008-06-18 09:29:37 +0200193 local_irq_enable();
Tim Chen3c829c32006-07-30 03:04:02 -0700194#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700195 preempt_check_resched();
196}
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100197EXPORT_SYMBOL(__local_bh_enable_ip);
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700198
199/*
Ben Greear34376a52013-06-06 14:29:49 -0700200 * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
201 * but break the loop if need_resched() is set or after 2 ms.
202 * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
203 * certain cases, such as stop_machine(), jiffies may cease to
204 * increment and so we need the MAX_SOFTIRQ_RESTART limit as
205 * well to make sure we eventually return from this method.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 *
Eric Dumazetc10d7362013-01-10 15:26:34 -0800207 * These limits have been established via experimentation.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 * The two things to balance is latency against fairness -
209 * we want to handle softirqs as soon as possible, but they
210 * should not be able to lock up the box.
211 */
Eric Dumazetc10d7362013-01-10 15:26:34 -0800212#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2)
Ben Greear34376a52013-06-06 14:29:49 -0700213#define MAX_SOFTIRQ_RESTART 10
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100215#ifdef CONFIG_TRACE_IRQFLAGS
216/*
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100217 * When we run softirqs from irq_exit() and thus on the hardirq stack we need
218 * to keep the lockdep irq context tracking as tight as possible in order to
219 * not miss-qualify lock contexts and miss possible deadlocks.
220 */
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100221
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100222static inline bool lockdep_softirq_start(void)
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100223{
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100224 bool in_hardirq = false;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100225
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100226 if (trace_hardirq_context(current)) {
227 in_hardirq = true;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100228 trace_hardirq_exit();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100229 }
230
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100231 lockdep_softirq_enter();
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100232
233 return in_hardirq;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100234}
235
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100236static inline void lockdep_softirq_end(bool in_hardirq)
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100237{
238 lockdep_softirq_exit();
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100239
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100240 if (in_hardirq)
241 trace_hardirq_enter();
242}
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100243#else
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100244static inline bool lockdep_softirq_start(void) { return false; }
245static inline void lockdep_softirq_end(bool in_hardirq) { }
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100246#endif
247
Pavankumar Kondetif332a9d2017-06-28 12:00:31 +0530248#define long_softirq_pending() (local_softirq_pending() & LONG_SOFTIRQ_MASK)
249#define defer_for_rt() (long_softirq_pending() && cpupri_check_rt())
Alexander Potapenkobe7635e2016-03-25 14:22:05 -0700250asmlinkage __visible void __softirq_entry __do_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251{
Eric Dumazetc10d7362013-01-10 15:26:34 -0800252 unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
Mel Gorman907aed42012-07-31 16:44:07 -0700253 unsigned long old_flags = current->flags;
Ben Greear34376a52013-06-06 14:29:49 -0700254 int max_restart = MAX_SOFTIRQ_RESTART;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100255 struct softirq_action *h;
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100256 bool in_hardirq;
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100257 __u32 pending;
Joe Perches2e702b92014-01-27 17:07:14 -0800258 int softirq_bit;
Mel Gorman907aed42012-07-31 16:44:07 -0700259
260 /*
261 * Mask out PF_MEMALLOC s current task context is borrowed for the
262 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
263 * again if the socket is related to swap
264 */
265 current->flags &= ~PF_MEMALLOC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
267 pending = local_softirq_pending();
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100268 account_irq_enter_time(current);
Paul Mackerras829035fd2006-07-03 00:25:40 -0700269
Peter Zijlstra0bd3a172013-11-19 16:13:38 +0100270 __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100271 in_hardirq = lockdep_softirq_start();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273restart:
274 /* Reset the pending bitmask before enabling irqs */
Andi Kleen3f744782005-09-12 18:49:24 +0200275 set_softirq_pending(0);
John Dias01072322016-10-05 15:11:40 -0700276 __this_cpu_write(active_softirqs, pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700278 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
280 h = softirq_vec;
281
Joe Perches2e702b92014-01-27 17:07:14 -0800282 while ((softirq_bit = ffs(pending))) {
283 unsigned int vec_nr;
284 int prev_count;
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200285
Joe Perches2e702b92014-01-27 17:07:14 -0800286 h += softirq_bit - 1;
Thomas Gleixnerf4bc6bb2010-10-19 15:00:13 +0200287
Joe Perches2e702b92014-01-27 17:07:14 -0800288 vec_nr = h - softirq_vec;
289 prev_count = preempt_count();
Thomas Gleixner8e85b4b2008-10-02 10:50:53 +0200290
Joe Perches2e702b92014-01-27 17:07:14 -0800291 kstat_incr_softirqs_this_cpu(vec_nr);
292
293 trace_softirq_entry(vec_nr);
294 h->action(h);
295 trace_softirq_exit(vec_nr);
296 if (unlikely(prev_count != preempt_count())) {
Joe Perches40322762014-01-27 17:07:15 -0800297 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
Joe Perches2e702b92014-01-27 17:07:14 -0800298 vec_nr, softirq_to_name[vec_nr], h->action,
299 prev_count, preempt_count());
300 preempt_count_set(prev_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 }
302 h++;
Joe Perches2e702b92014-01-27 17:07:14 -0800303 pending >>= softirq_bit;
304 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
John Dias01072322016-10-05 15:11:40 -0700306 __this_cpu_write(active_softirqs, 0);
Paul E. McKenney284a8c92014-08-14 16:38:46 -0700307 rcu_bh_qs();
Andrew Mortonc70f5d62005-07-30 10:22:49 -0700308 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
310 pending = local_softirq_pending();
Eric Dumazetc10d7362013-01-10 15:26:34 -0800311 if (pending) {
Ben Greear34376a52013-06-06 14:29:49 -0700312 if (time_before(jiffies, end) && !need_resched() &&
Pavankumar Kondetif332a9d2017-06-28 12:00:31 +0530313 !defer_for_rt() &&
Ben Greear34376a52013-06-06 14:29:49 -0700314 --max_restart)
Eric Dumazetc10d7362013-01-10 15:26:34 -0800315 goto restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 wakeup_softirqd();
Eric Dumazetc10d7362013-01-10 15:26:34 -0800318 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
Frederic Weisbecker5c4853b2013-11-20 01:07:34 +0100320 lockdep_softirq_end(in_hardirq);
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100321 account_irq_exit_time(current);
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -0700322 __local_bh_enable(SOFTIRQ_OFFSET);
Frederic Weisbecker5d60d3e2013-09-24 04:11:35 +0200323 WARN_ON_ONCE(in_interrupt());
Mel Gorman907aed42012-07-31 16:44:07 -0700324 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325}
326
Andi Kleen722a9f92014-05-02 00:44:38 +0200327asmlinkage __visible void do_softirq(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
329 __u32 pending;
330 unsigned long flags;
331
332 if (in_interrupt())
333 return;
334
335 local_irq_save(flags);
336
337 pending = local_softirq_pending();
338
Eric Dumazet4cd13c22016-08-31 10:42:29 -0700339 if (pending && !ksoftirqd_running())
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +0200340 do_softirq_own_stack();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
342 local_irq_restore(flags);
343}
344
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800345/*
346 * Enter an interrupt context.
347 */
348void irq_enter(void)
349{
Paul E. McKenney64db4cf2008-12-18 21:55:32 +0100350 rcu_irq_enter();
Frederic Weisbecker0a8a2e72012-01-24 18:59:44 +0100351 if (is_idle_task(current) && !in_interrupt()) {
Venkatesh Pallipadid267f872010-10-04 17:03:23 -0700352 /*
353 * Prevent raise_softirq from needlessly waking up ksoftirqd
354 * here, as softirq will be serviced on return from interrupt.
355 */
356 local_bh_disable();
Frederic Weisbecker5acac1b2013-12-04 18:28:20 +0100357 tick_irq_enter();
Venkatesh Pallipadid267f872010-10-04 17:03:23 -0700358 _local_bh_enable();
359 }
360
361 __irq_enter();
Ingo Molnardde4b2b2007-02-16 01:27:45 -0800362}
363
Heiko Carstensb2a00172012-03-05 15:07:25 -0800364static inline void invoke_softirq(void)
365{
Eric Dumazet4cd13c22016-08-31 10:42:29 -0700366 if (ksoftirqd_running())
367 return;
368
Pavankumar Kondetif332a9d2017-06-28 12:00:31 +0530369 if (!force_irqthreads && !defer_for_rt()) {
Frederic Weisbeckercc1f0272013-09-24 17:17:47 +0200370#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
Frederic Weisbeckerded79752013-09-24 00:50:25 +0200371 /*
372 * We can safely execute softirq on the current stack if
373 * it is the irq stack, because it should be near empty
Frederic Weisbeckercc1f0272013-09-24 17:17:47 +0200374 * at this stage.
375 */
376 __do_softirq();
377#else
378 /*
379 * Otherwise, irq_exit() is called on the task stack that can
380 * be potentially deep already. So call softirq in its own stack
381 * to prevent from any overrun.
Frederic Weisbeckerded79752013-09-24 00:50:25 +0200382 */
Frederic Weisbeckerbe6e1012013-09-24 16:39:41 +0200383 do_softirq_own_stack();
Frederic Weisbeckercc1f0272013-09-24 17:17:47 +0200384#endif
Frederic Weisbeckerded79752013-09-24 00:50:25 +0200385 } else {
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000386 wakeup_softirqd();
Frederic Weisbeckerded79752013-09-24 00:50:25 +0200387 }
Thomas Gleixner8d32a302011-02-23 23:52:23 +0000388}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200390static inline void tick_irq_exit(void)
391{
392#ifdef CONFIG_NO_HZ_COMMON
393 int cpu = smp_processor_id();
394
395 /* Make sure that timer wheel updates are propagated */
396 if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
397 if (!in_interrupt())
398 tick_nohz_irq_exit();
399 }
400#endif
401}
402
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403/*
404 * Exit an interrupt context. Process softirqs if needed and possible:
405 */
406void irq_exit(void)
407{
Thomas Gleixner74eed012013-02-20 22:00:48 +0100408#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
Frederic Weisbecker4cd5d112013-02-28 20:00:43 +0100409 local_irq_disable();
Thomas Gleixner74eed012013-02-20 22:00:48 +0100410#else
411 WARN_ON_ONCE(!irqs_disabled());
412#endif
413
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100414 account_irq_exit_time(current);
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200415 preempt_count_sub(HARDIRQ_OFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 if (!in_interrupt() && local_softirq_pending())
417 invoke_softirq();
Thomas Gleixner79bf2bb2007-02-16 01:28:03 -0800418
Frederic Weisbecker67826ea2013-04-20 17:43:13 +0200419 tick_irq_exit();
Frederic Weisbecker416eb332011-10-07 16:31:02 -0700420 rcu_irq_exit();
Peter Zijlstraf1a83e62013-11-19 16:42:47 +0100421 trace_hardirq_exit(); /* must be last! */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422}
423
424/*
425 * This function must run with irqs disabled!
426 */
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800427inline void raise_softirq_irqoff(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428{
429 __raise_softirq_irqoff(nr);
430
431 /*
432 * If we're in an interrupt or softirq, we're done
433 * (this also catches softirq-disabled code). We will
434 * actually run the softirq once we return from
435 * the irq or softirq.
436 *
437 * Otherwise we wake up ksoftirqd to make sure we
438 * schedule the softirq soon.
439 */
440 if (!in_interrupt())
441 wakeup_softirqd();
442}
443
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800444void raise_softirq(unsigned int nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445{
446 unsigned long flags;
447
448 local_irq_save(flags);
449 raise_softirq_irqoff(nr);
450 local_irq_restore(flags);
451}
452
Steven Rostedtf0696862012-01-25 20:18:55 -0500453void __raise_softirq_irqoff(unsigned int nr)
454{
455 trace_softirq_raise(nr);
456 or_softirq_pending(1UL << nr);
457}
458
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300459void open_softirq(int nr, void (*action)(struct softirq_action *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 softirq_vec[nr].action = action;
462}
463
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200464/*
465 * Tasklets
466 */
Joe Perchesce85b4f2014-01-27 17:07:16 -0800467struct tasklet_head {
Olof Johansson48f20a92008-03-04 15:23:25 -0800468 struct tasklet_struct *head;
469 struct tasklet_struct **tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470};
471
Vegard Nossum4620b492008-06-12 23:21:53 +0200472static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
473static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800475void __tasklet_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476{
477 unsigned long flags;
478
479 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800480 t->next = NULL;
Christoph Lameter909ea962010-12-08 16:22:55 +0100481 *__this_cpu_read(tasklet_vec.tail) = t;
482 __this_cpu_write(tasklet_vec.tail, &(t->next));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 raise_softirq_irqoff(TASKLET_SOFTIRQ);
484 local_irq_restore(flags);
485}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486EXPORT_SYMBOL(__tasklet_schedule);
487
Harvey Harrison7ad5b3a2008-02-08 04:19:53 -0800488void __tasklet_hi_schedule(struct tasklet_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
490 unsigned long flags;
491
492 local_irq_save(flags);
Olof Johansson48f20a92008-03-04 15:23:25 -0800493 t->next = NULL;
Christoph Lameter909ea962010-12-08 16:22:55 +0100494 *__this_cpu_read(tasklet_hi_vec.tail) = t;
495 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 raise_softirq_irqoff(HI_SOFTIRQ);
497 local_irq_restore(flags);
498}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499EXPORT_SYMBOL(__tasklet_hi_schedule);
500
Vegard Nossum7c692cb2008-05-21 22:53:13 +0200501void __tasklet_hi_schedule_first(struct tasklet_struct *t)
502{
503 BUG_ON(!irqs_disabled());
504
Christoph Lameter909ea962010-12-08 16:22:55 +0100505 t->next = __this_cpu_read(tasklet_hi_vec.head);
506 __this_cpu_write(tasklet_hi_vec.head, t);
Vegard Nossum7c692cb2008-05-21 22:53:13 +0200507 __raise_softirq_irqoff(HI_SOFTIRQ);
508}
Vegard Nossum7c692cb2008-05-21 22:53:13 +0200509EXPORT_SYMBOL(__tasklet_hi_schedule_first);
510
Emese Revfy0766f782016-06-20 20:42:34 +0200511static __latent_entropy void tasklet_action(struct softirq_action *a)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512{
513 struct tasklet_struct *list;
514
515 local_irq_disable();
Christoph Lameter909ea962010-12-08 16:22:55 +0100516 list = __this_cpu_read(tasklet_vec.head);
517 __this_cpu_write(tasklet_vec.head, NULL);
Christoph Lameter22127e92014-08-17 12:30:25 -0500518 __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 local_irq_enable();
520
521 while (list) {
522 struct tasklet_struct *t = list;
523
524 list = list->next;
525
526 if (tasklet_trylock(t)) {
527 if (!atomic_read(&t->count)) {
Joe Perchesce85b4f2014-01-27 17:07:16 -0800528 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
529 &t->state))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 BUG();
531 t->func(t->data);
532 tasklet_unlock(t);
533 continue;
534 }
535 tasklet_unlock(t);
536 }
537
538 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800539 t->next = NULL;
Christoph Lameter909ea962010-12-08 16:22:55 +0100540 *__this_cpu_read(tasklet_vec.tail) = t;
541 __this_cpu_write(tasklet_vec.tail, &(t->next));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
543 local_irq_enable();
544 }
545}
546
Emese Revfy0766f782016-06-20 20:42:34 +0200547static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548{
549 struct tasklet_struct *list;
550
551 local_irq_disable();
Christoph Lameter909ea962010-12-08 16:22:55 +0100552 list = __this_cpu_read(tasklet_hi_vec.head);
553 __this_cpu_write(tasklet_hi_vec.head, NULL);
Christoph Lameter22127e92014-08-17 12:30:25 -0500554 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 local_irq_enable();
556
557 while (list) {
558 struct tasklet_struct *t = list;
559
560 list = list->next;
561
562 if (tasklet_trylock(t)) {
563 if (!atomic_read(&t->count)) {
Joe Perchesce85b4f2014-01-27 17:07:16 -0800564 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
565 &t->state))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 BUG();
567 t->func(t->data);
568 tasklet_unlock(t);
569 continue;
570 }
571 tasklet_unlock(t);
572 }
573
574 local_irq_disable();
Olof Johansson48f20a92008-03-04 15:23:25 -0800575 t->next = NULL;
Christoph Lameter909ea962010-12-08 16:22:55 +0100576 *__this_cpu_read(tasklet_hi_vec.tail) = t;
577 __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 __raise_softirq_irqoff(HI_SOFTIRQ);
579 local_irq_enable();
580 }
581}
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583void tasklet_init(struct tasklet_struct *t,
584 void (*func)(unsigned long), unsigned long data)
585{
586 t->next = NULL;
587 t->state = 0;
588 atomic_set(&t->count, 0);
589 t->func = func;
590 t->data = data;
591}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592EXPORT_SYMBOL(tasklet_init);
593
594void tasklet_kill(struct tasklet_struct *t)
595{
596 if (in_interrupt())
Joe Perches40322762014-01-27 17:07:15 -0800597 pr_notice("Attempt to kill tasklet from interrupt\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
599 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
H Hartley Sweeten79d381c2009-04-16 19:30:18 -0400600 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 yield();
H Hartley Sweeten79d381c2009-04-16 19:30:18 -0400602 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 }
604 tasklet_unlock_wait(t);
605 clear_bit(TASKLET_STATE_SCHED, &t->state);
606}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607EXPORT_SYMBOL(tasklet_kill);
608
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200609/*
610 * tasklet_hrtimer
611 */
612
613/*
Peter Zijlstrab9c30322010-02-03 18:08:52 +0100614 * The trampoline is called when the hrtimer expires. It schedules a tasklet
615 * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
616 * hrtimer callback, but from softirq context.
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200617 */
618static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
619{
620 struct tasklet_hrtimer *ttimer =
621 container_of(timer, struct tasklet_hrtimer, timer);
622
Peter Zijlstrab9c30322010-02-03 18:08:52 +0100623 tasklet_hi_schedule(&ttimer->tasklet);
624 return HRTIMER_NORESTART;
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200625}
626
627/*
628 * Helper function which calls the hrtimer callback from
629 * tasklet/softirq context
630 */
631static void __tasklet_hrtimer_trampoline(unsigned long data)
632{
633 struct tasklet_hrtimer *ttimer = (void *)data;
634 enum hrtimer_restart restart;
635
636 restart = ttimer->function(&ttimer->timer);
637 if (restart != HRTIMER_NORESTART)
638 hrtimer_restart(&ttimer->timer);
639}
640
641/**
642 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
643 * @ttimer: tasklet_hrtimer which is initialized
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300644 * @function: hrtimer callback function which gets called from softirq context
Peter Zijlstra9ba5f002009-07-22 14:18:35 +0200645 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
646 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
647 */
648void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
649 enum hrtimer_restart (*function)(struct hrtimer *),
650 clockid_t which_clock, enum hrtimer_mode mode)
651{
652 hrtimer_init(&ttimer->timer, which_clock, mode);
653 ttimer->timer.function = __hrtimer_tasklet_trampoline;
654 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
655 (unsigned long)ttimer);
656 ttimer->function = function;
657}
658EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
659
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660void __init softirq_init(void)
661{
Olof Johansson48f20a92008-03-04 15:23:25 -0800662 int cpu;
663
664 for_each_possible_cpu(cpu) {
665 per_cpu(tasklet_vec, cpu).tail =
666 &per_cpu(tasklet_vec, cpu).head;
667 per_cpu(tasklet_hi_vec, cpu).tail =
668 &per_cpu(tasklet_hi_vec, cpu).head;
669 }
670
Carlos R. Mafra962cf362008-05-15 11:15:37 -0300671 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
672 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673}
674
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000675static int ksoftirqd_should_run(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676{
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000677 return local_softirq_pending();
678}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000680static void run_ksoftirqd(unsigned int cpu)
681{
682 local_irq_disable();
683 if (local_softirq_pending()) {
Frederic Weisbecker0bed6982013-09-05 16:14:00 +0200684 /*
685 * We can safely run softirq on inline stack, as we are not deep
686 * in the task stack here.
687 */
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000688 __do_softirq();
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000689 local_irq_enable();
Paul E. McKenney60479672015-01-14 13:20:26 -0800690 cond_resched_rcu_qs();
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000691 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 }
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000693 local_irq_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694}
695
696#ifdef CONFIG_HOTPLUG_CPU
697/*
698 * tasklet_kill_immediate is called to remove a tasklet which can already be
699 * scheduled for execution on @cpu.
700 *
701 * Unlike tasklet_kill, this function removes the tasklet
702 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
703 *
704 * When this function is called, @cpu must be in the CPU_DEAD state.
705 */
706void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
707{
708 struct tasklet_struct **i;
709
710 BUG_ON(cpu_online(cpu));
711 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
712
713 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
714 return;
715
716 /* CPU is dead, so no lock needed. */
Olof Johansson48f20a92008-03-04 15:23:25 -0800717 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 if (*i == t) {
719 *i = t->next;
Olof Johansson48f20a92008-03-04 15:23:25 -0800720 /* If this was the tail element, move the tail ptr */
721 if (*i == NULL)
722 per_cpu(tasklet_vec, cpu).tail = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 return;
724 }
725 }
726 BUG();
727}
728
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200729static int takeover_tasklets(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 /* CPU is dead, so no lock needed. */
732 local_irq_disable();
733
734 /* Find end, append list for that CPU. */
Christian Borntraegere5e41722008-05-01 04:34:23 -0700735 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100736 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
737 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700738 per_cpu(tasklet_vec, cpu).head = NULL;
739 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
740 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 raise_softirq_irqoff(TASKLET_SOFTIRQ);
742
Christian Borntraegere5e41722008-05-01 04:34:23 -0700743 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
Christoph Lameter909ea962010-12-08 16:22:55 +0100744 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
745 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
Christian Borntraegere5e41722008-05-01 04:34:23 -0700746 per_cpu(tasklet_hi_vec, cpu).head = NULL;
747 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
748 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 raise_softirq_irqoff(HI_SOFTIRQ);
750
751 local_irq_enable();
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200752 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753}
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200754#else
755#define takeover_tasklets NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756#endif /* CONFIG_HOTPLUG_CPU */
757
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000758static struct smp_hotplug_thread softirq_threads = {
759 .store = &ksoftirqd,
760 .thread_should_run = ksoftirqd_should_run,
761 .thread_fn = run_ksoftirqd,
762 .thread_comm = "ksoftirqd/%u",
763};
764
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700765static __init int spawn_ksoftirqd(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766{
Sebastian Andrzej Siewiorc4544db2016-08-18 14:57:21 +0200767 cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
768 takeover_tasklets);
Thomas Gleixner3e339b52012-07-16 10:42:37 +0000769 BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
770
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 return 0;
772}
Eduard - Gabriel Munteanu7babe8d2008-07-25 19:45:11 -0700773early_initcall(spawn_ksoftirqd);
Andrew Morton78eef012006-03-22 00:08:16 -0800774
Yinghai Lu43a25632008-12-28 16:01:13 -0800775/*
776 * [ These __weak aliases are kept in a separate compilation unit, so that
777 * GCC does not inline them incorrectly. ]
778 */
779
780int __init __weak early_irq_init(void)
781{
782 return 0;
783}
784
Yinghai Lu4a046d12009-01-12 17:39:24 -0800785int __init __weak arch_probe_nr_irqs(void)
786{
Thomas Gleixnerb683de22010-09-27 20:55:03 +0200787 return NR_IRQS_LEGACY;
Yinghai Lu4a046d12009-01-12 17:39:24 -0800788}
789
Yinghai Lu43a25632008-12-28 16:01:13 -0800790int __init __weak arch_early_irq_init(void)
791{
792 return 0;
793}
Thomas Gleixner62a08ae2014-04-24 09:50:53 +0200794
795unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
796{
797 return from;
798}