Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/softirq.c |
| 3 | * |
| 4 | * Copyright (C) 1992 Linus Torvalds |
| 5 | * |
Pavel Machek | b10db7f | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 6 | * Distribute under GPLv2. |
| 7 | * |
| 8 | * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | */ |
| 10 | |
Joe Perches | 4032276 | 2014-01-27 17:07:15 -0800 | [diff] [blame] | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 12 | |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 13 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/kernel_stat.h> |
| 15 | #include <linux/interrupt.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/mm.h> |
| 18 | #include <linux/notifier.h> |
| 19 | #include <linux/percpu.h> |
| 20 | #include <linux/cpu.h> |
Rafael J. Wysocki | 8314418 | 2007-07-17 04:03:35 -0700 | [diff] [blame] | 21 | #include <linux/freezer.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/kthread.h> |
| 23 | #include <linux/rcupdate.h> |
Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 24 | #include <linux/ftrace.h> |
Andrew Morton | 78eef01 | 2006-03-22 00:08:16 -0800 | [diff] [blame] | 25 | #include <linux/smp.h> |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 26 | #include <linux/smpboot.h> |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 27 | #include <linux/tick.h> |
Thomas Gleixner | d532676 | 2014-03-19 11:19:52 +0100 | [diff] [blame] | 28 | #include <linux/irq.h> |
Heiko Carstens | a0e39ed | 2009-04-29 13:51:39 +0200 | [diff] [blame] | 29 | |
| 30 | #define CREATE_TRACE_POINTS |
Steven Rostedt | ad8d75f | 2009-04-14 19:39:12 -0400 | [diff] [blame] | 31 | #include <trace/events/irq.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | /* |
| 34 | - No shared variables, all the data are CPU local. |
| 35 | - If a softirq needs serialization, let it serialize itself |
| 36 | by its own spinlocks. |
| 37 | - Even if softirq is serialized, only local cpu is marked for |
| 38 | execution. Hence, we get something sort of weak cpu binding. |
| 39 | Though it is still not clear, will it result in better locality |
| 40 | or will not. |
| 41 | |
| 42 | Examples: |
| 43 | - NET RX softirq. It is multithreaded and does not require |
| 44 | any global serialization. |
| 45 | - NET TX softirq. It kicks software netdevice queues, hence |
| 46 | it is logically serialized per device, but this serialization |
| 47 | is invisible to common code. |
| 48 | - Tasklets: serialized wrt itself. |
| 49 | */ |
| 50 | |
| 51 | #ifndef __ARCH_IRQ_STAT |
| 52 | irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; |
| 53 | EXPORT_SYMBOL(irq_stat); |
| 54 | #endif |
| 55 | |
Alexey Dobriyan | 978b011 | 2008-09-06 20:04:36 +0200 | [diff] [blame] | 56 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
Venkatesh Pallipadi | 4dd53d8 | 2010-12-21 17:09:00 -0800 | [diff] [blame] | 58 | DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | |
John Dias | 0107232 | 2016-10-05 15:11:40 -0700 | [diff] [blame] | 60 | /* |
| 61 | * active_softirqs -- per cpu, a mask of softirqs that are being handled, |
| 62 | * with the expectation that approximate answers are acceptable and therefore |
| 63 | * no synchronization. |
| 64 | */ |
| 65 | DEFINE_PER_CPU(__u32, active_softirqs); |
| 66 | |
Joe Perches | ce85b4f | 2014-01-27 17:07:16 -0800 | [diff] [blame] | 67 | const char * const softirq_to_name[NR_SOFTIRQS] = { |
Sagi Grimberg | f660f60 | 2016-10-10 15:10:51 +0300 | [diff] [blame] | 68 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", |
Shaohua Li | 0922337 | 2011-06-14 13:26:25 +0800 | [diff] [blame] | 69 | "TASKLET", "SCHED", "HRTIMER", "RCU" |
Jason Baron | 5d592b4 | 2009-03-12 14:33:36 -0400 | [diff] [blame] | 70 | }; |
| 71 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | /* |
| 73 | * we cannot loop indefinitely here to avoid userspace starvation, |
| 74 | * but we also don't want to introduce a worst case 1/HZ latency |
| 75 | * to the pending events, so lets the scheduler to balance |
| 76 | * the softirq load for us. |
| 77 | */ |
Thomas Gleixner | 676cb02 | 2009-07-20 23:33:49 +0200 | [diff] [blame] | 78 | static void wakeup_softirqd(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | { |
| 80 | /* Interrupts are disabled: no need to stop preemption */ |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 81 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | |
| 83 | if (tsk && tsk->state != TASK_RUNNING) |
| 84 | wake_up_process(tsk); |
| 85 | } |
| 86 | |
| 87 | /* |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 88 | * preempt_count and SOFTIRQ_OFFSET usage: |
| 89 | * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving |
| 90 | * softirq processing. |
| 91 | * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) |
| 92 | * on local_bh_disable or local_bh_enable. |
| 93 | * This lets us distinguish between whether we are currently processing |
| 94 | * softirq and whether we just have bh disabled. |
| 95 | */ |
| 96 | |
| 97 | /* |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 98 | * This one is for softirq.c-internal use, |
| 99 | * where hardirqs are disabled legitimately: |
| 100 | */ |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 101 | #ifdef CONFIG_TRACE_IRQFLAGS |
Peter Zijlstra | 0bd3a17 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 102 | void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 103 | { |
| 104 | unsigned long flags; |
| 105 | |
| 106 | WARN_ON_ONCE(in_irq()); |
| 107 | |
| 108 | raw_local_irq_save(flags); |
Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 109 | /* |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 110 | * The preempt tracer hooks into preempt_count_add and will break |
Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 111 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET |
| 112 | * is set and before current->softirq_enabled is cleared. |
| 113 | * We must manually increment preempt_count here and manually |
| 114 | * call the trace_preempt_off later. |
| 115 | */ |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 116 | __preempt_count_add(cnt); |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 117 | /* |
| 118 | * Were softirqs turned off above: |
| 119 | */ |
Peter Zijlstra | 9ea4c38 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 120 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 121 | trace_softirqs_off(ip); |
| 122 | raw_local_irq_restore(flags); |
Steven Rostedt | 7e49fcc | 2009-01-22 19:01:40 -0500 | [diff] [blame] | 123 | |
Heiko Carstens | 0f1ba9a | 2015-01-07 10:04:41 +0100 | [diff] [blame] | 124 | if (preempt_count() == cnt) { |
| 125 | #ifdef CONFIG_DEBUG_PREEMPT |
Sebastian Andrzej Siewior | f904f58 | 2016-02-26 14:54:56 +0100 | [diff] [blame] | 126 | current->preempt_disable_ip = get_lock_parent_ip(); |
Heiko Carstens | 0f1ba9a | 2015-01-07 10:04:41 +0100 | [diff] [blame] | 127 | #endif |
Sebastian Andrzej Siewior | f904f58 | 2016-02-26 14:54:56 +0100 | [diff] [blame] | 128 | trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip()); |
Heiko Carstens | 0f1ba9a | 2015-01-07 10:04:41 +0100 | [diff] [blame] | 129 | } |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 130 | } |
Peter Zijlstra | 0bd3a17 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 131 | EXPORT_SYMBOL(__local_bh_disable_ip); |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 132 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 133 | |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 134 | static void __local_bh_enable(unsigned int cnt) |
| 135 | { |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 136 | WARN_ON_ONCE(!irqs_disabled()); |
| 137 | |
Peter Zijlstra | 9ea4c38 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 138 | if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
Davidlohr Bueso | d2e0847 | 2013-04-30 11:46:09 -0700 | [diff] [blame] | 139 | trace_softirqs_on(_RET_IP_); |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 140 | preempt_count_sub(cnt); |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 141 | } |
| 142 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 143 | /* |
| 144 | * Special-case - softirqs can safely be enabled in |
| 145 | * cond_resched_softirq(), or by __do_softirq(), |
| 146 | * without processing still-pending softirqs: |
| 147 | */ |
| 148 | void _local_bh_enable(void) |
| 149 | { |
Frederic Weisbecker | 5d60d3e | 2013-09-24 04:11:35 +0200 | [diff] [blame] | 150 | WARN_ON_ONCE(in_irq()); |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 151 | __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 152 | } |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 153 | EXPORT_SYMBOL(_local_bh_enable); |
| 154 | |
Peter Zijlstra | 0bd3a17 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 155 | void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 156 | { |
Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 157 | WARN_ON_ONCE(in_irq() || irqs_disabled()); |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 158 | #ifdef CONFIG_TRACE_IRQFLAGS |
Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 159 | local_irq_disable(); |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 160 | #endif |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 161 | /* |
| 162 | * Are softirqs going to be turned on now: |
| 163 | */ |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 164 | if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 165 | trace_softirqs_on(ip); |
| 166 | /* |
| 167 | * Keep preemption disabled until we are done with |
| 168 | * softirq processing: |
Joe Perches | ce85b4f | 2014-01-27 17:07:16 -0800 | [diff] [blame] | 169 | */ |
Peter Zijlstra | 0bd3a17 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 170 | preempt_count_sub(cnt - 1); |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 171 | |
Frederic Weisbecker | 0bed698 | 2013-09-05 16:14:00 +0200 | [diff] [blame] | 172 | if (unlikely(!in_interrupt() && local_softirq_pending())) { |
| 173 | /* |
| 174 | * Run softirq if any pending. And do it in its own stack |
| 175 | * as we may be calling this deep in a task call stack already. |
| 176 | */ |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 177 | do_softirq(); |
Frederic Weisbecker | 0bed698 | 2013-09-05 16:14:00 +0200 | [diff] [blame] | 178 | } |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 179 | |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 180 | preempt_count_dec(); |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 181 | #ifdef CONFIG_TRACE_IRQFLAGS |
Johannes Berg | 0f476b6d | 2008-06-18 09:29:37 +0200 | [diff] [blame] | 182 | local_irq_enable(); |
Tim Chen | 3c829c3 | 2006-07-30 03:04:02 -0700 | [diff] [blame] | 183 | #endif |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 184 | preempt_check_resched(); |
| 185 | } |
Peter Zijlstra | 0bd3a17 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 186 | EXPORT_SYMBOL(__local_bh_enable_ip); |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 187 | |
| 188 | /* |
Ben Greear | 34376a5 | 2013-06-06 14:29:49 -0700 | [diff] [blame] | 189 | * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, |
| 190 | * but break the loop if need_resched() is set or after 2 ms. |
| 191 | * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in |
| 192 | * certain cases, such as stop_machine(), jiffies may cease to |
| 193 | * increment and so we need the MAX_SOFTIRQ_RESTART limit as |
| 194 | * well to make sure we eventually return from this method. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | * |
Eric Dumazet | c10d736 | 2013-01-10 15:26:34 -0800 | [diff] [blame] | 196 | * These limits have been established via experimentation. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | * The two things to balance is latency against fairness - |
| 198 | * we want to handle softirqs as soon as possible, but they |
| 199 | * should not be able to lock up the box. |
| 200 | */ |
Eric Dumazet | c10d736 | 2013-01-10 15:26:34 -0800 | [diff] [blame] | 201 | #define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) |
Ben Greear | 34376a5 | 2013-06-06 14:29:49 -0700 | [diff] [blame] | 202 | #define MAX_SOFTIRQ_RESTART 10 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 204 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 205 | /* |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 206 | * When we run softirqs from irq_exit() and thus on the hardirq stack we need |
| 207 | * to keep the lockdep irq context tracking as tight as possible in order to |
| 208 | * not miss-qualify lock contexts and miss possible deadlocks. |
| 209 | */ |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 210 | |
Frederic Weisbecker | 5c4853b | 2013-11-20 01:07:34 +0100 | [diff] [blame] | 211 | static inline bool lockdep_softirq_start(void) |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 212 | { |
Frederic Weisbecker | 5c4853b | 2013-11-20 01:07:34 +0100 | [diff] [blame] | 213 | bool in_hardirq = false; |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 214 | |
Frederic Weisbecker | 5c4853b | 2013-11-20 01:07:34 +0100 | [diff] [blame] | 215 | if (trace_hardirq_context(current)) { |
| 216 | in_hardirq = true; |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 217 | trace_hardirq_exit(); |
Frederic Weisbecker | 5c4853b | 2013-11-20 01:07:34 +0100 | [diff] [blame] | 218 | } |
| 219 | |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 220 | lockdep_softirq_enter(); |
Frederic Weisbecker | 5c4853b | 2013-11-20 01:07:34 +0100 | [diff] [blame] | 221 | |
| 222 | return in_hardirq; |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 223 | } |
| 224 | |
Frederic Weisbecker | 5c4853b | 2013-11-20 01:07:34 +0100 | [diff] [blame] | 225 | static inline void lockdep_softirq_end(bool in_hardirq) |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 226 | { |
| 227 | lockdep_softirq_exit(); |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 228 | |
Frederic Weisbecker | 5c4853b | 2013-11-20 01:07:34 +0100 | [diff] [blame] | 229 | if (in_hardirq) |
| 230 | trace_hardirq_enter(); |
| 231 | } |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 232 | #else |
Frederic Weisbecker | 5c4853b | 2013-11-20 01:07:34 +0100 | [diff] [blame] | 233 | static inline bool lockdep_softirq_start(void) { return false; } |
| 234 | static inline void lockdep_softirq_end(bool in_hardirq) { } |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 235 | #endif |
| 236 | |
Pavankumar Kondeti | f332a9d | 2017-06-28 12:00:31 +0530 | [diff] [blame] | 237 | #define long_softirq_pending() (local_softirq_pending() & LONG_SOFTIRQ_MASK) |
| 238 | #define defer_for_rt() (long_softirq_pending() && cpupri_check_rt()) |
Alexander Potapenko | be7635e | 2016-03-25 14:22:05 -0700 | [diff] [blame] | 239 | asmlinkage __visible void __softirq_entry __do_softirq(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | { |
Eric Dumazet | c10d736 | 2013-01-10 15:26:34 -0800 | [diff] [blame] | 241 | unsigned long end = jiffies + MAX_SOFTIRQ_TIME; |
Mel Gorman | 907aed4 | 2012-07-31 16:44:07 -0700 | [diff] [blame] | 242 | unsigned long old_flags = current->flags; |
Ben Greear | 34376a5 | 2013-06-06 14:29:49 -0700 | [diff] [blame] | 243 | int max_restart = MAX_SOFTIRQ_RESTART; |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 244 | struct softirq_action *h; |
Frederic Weisbecker | 5c4853b | 2013-11-20 01:07:34 +0100 | [diff] [blame] | 245 | bool in_hardirq; |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 246 | __u32 pending; |
Joe Perches | 2e702b9 | 2014-01-27 17:07:14 -0800 | [diff] [blame] | 247 | int softirq_bit; |
Mel Gorman | 907aed4 | 2012-07-31 16:44:07 -0700 | [diff] [blame] | 248 | |
| 249 | /* |
| 250 | * Mask out PF_MEMALLOC s current task context is borrowed for the |
| 251 | * softirq. A softirq handled such as network RX might set PF_MEMALLOC |
| 252 | * again if the socket is related to swap |
| 253 | */ |
| 254 | current->flags &= ~PF_MEMALLOC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | |
| 256 | pending = local_softirq_pending(); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 257 | account_irq_enter_time(current); |
Paul Mackerras | 829035fd | 2006-07-03 00:25:40 -0700 | [diff] [blame] | 258 | |
Peter Zijlstra | 0bd3a17 | 2013-11-19 16:13:38 +0100 | [diff] [blame] | 259 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
Frederic Weisbecker | 5c4853b | 2013-11-20 01:07:34 +0100 | [diff] [blame] | 260 | in_hardirq = lockdep_softirq_start(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | restart: |
| 263 | /* Reset the pending bitmask before enabling irqs */ |
Andi Kleen | 3f74478 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 264 | set_softirq_pending(0); |
John Dias | 0107232 | 2016-10-05 15:11:40 -0700 | [diff] [blame] | 265 | __this_cpu_write(active_softirqs, pending); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | |
Andrew Morton | c70f5d6 | 2005-07-30 10:22:49 -0700 | [diff] [blame] | 267 | local_irq_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | |
| 269 | h = softirq_vec; |
| 270 | |
Joe Perches | 2e702b9 | 2014-01-27 17:07:14 -0800 | [diff] [blame] | 271 | while ((softirq_bit = ffs(pending))) { |
| 272 | unsigned int vec_nr; |
| 273 | int prev_count; |
Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 274 | |
Joe Perches | 2e702b9 | 2014-01-27 17:07:14 -0800 | [diff] [blame] | 275 | h += softirq_bit - 1; |
Thomas Gleixner | f4bc6bb | 2010-10-19 15:00:13 +0200 | [diff] [blame] | 276 | |
Joe Perches | 2e702b9 | 2014-01-27 17:07:14 -0800 | [diff] [blame] | 277 | vec_nr = h - softirq_vec; |
| 278 | prev_count = preempt_count(); |
Thomas Gleixner | 8e85b4b | 2008-10-02 10:50:53 +0200 | [diff] [blame] | 279 | |
Joe Perches | 2e702b9 | 2014-01-27 17:07:14 -0800 | [diff] [blame] | 280 | kstat_incr_softirqs_this_cpu(vec_nr); |
| 281 | |
| 282 | trace_softirq_entry(vec_nr); |
| 283 | h->action(h); |
| 284 | trace_softirq_exit(vec_nr); |
| 285 | if (unlikely(prev_count != preempt_count())) { |
Joe Perches | 4032276 | 2014-01-27 17:07:15 -0800 | [diff] [blame] | 286 | pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", |
Joe Perches | 2e702b9 | 2014-01-27 17:07:14 -0800 | [diff] [blame] | 287 | vec_nr, softirq_to_name[vec_nr], h->action, |
| 288 | prev_count, preempt_count()); |
| 289 | preempt_count_set(prev_count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | } |
| 291 | h++; |
Joe Perches | 2e702b9 | 2014-01-27 17:07:14 -0800 | [diff] [blame] | 292 | pending >>= softirq_bit; |
| 293 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | |
John Dias | 0107232 | 2016-10-05 15:11:40 -0700 | [diff] [blame] | 295 | __this_cpu_write(active_softirqs, 0); |
Paul E. McKenney | 284a8c9 | 2014-08-14 16:38:46 -0700 | [diff] [blame] | 296 | rcu_bh_qs(); |
Andrew Morton | c70f5d6 | 2005-07-30 10:22:49 -0700 | [diff] [blame] | 297 | local_irq_disable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | |
| 299 | pending = local_softirq_pending(); |
Eric Dumazet | c10d736 | 2013-01-10 15:26:34 -0800 | [diff] [blame] | 300 | if (pending) { |
Ben Greear | 34376a5 | 2013-06-06 14:29:49 -0700 | [diff] [blame] | 301 | if (time_before(jiffies, end) && !need_resched() && |
Pavankumar Kondeti | f332a9d | 2017-06-28 12:00:31 +0530 | [diff] [blame] | 302 | !defer_for_rt() && |
Ben Greear | 34376a5 | 2013-06-06 14:29:49 -0700 | [diff] [blame] | 303 | --max_restart) |
Eric Dumazet | c10d736 | 2013-01-10 15:26:34 -0800 | [diff] [blame] | 304 | goto restart; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | wakeup_softirqd(); |
Eric Dumazet | c10d736 | 2013-01-10 15:26:34 -0800 | [diff] [blame] | 307 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | |
Frederic Weisbecker | 5c4853b | 2013-11-20 01:07:34 +0100 | [diff] [blame] | 309 | lockdep_softirq_end(in_hardirq); |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 310 | account_irq_exit_time(current); |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 311 | __local_bh_enable(SOFTIRQ_OFFSET); |
Frederic Weisbecker | 5d60d3e | 2013-09-24 04:11:35 +0200 | [diff] [blame] | 312 | WARN_ON_ONCE(in_interrupt()); |
Mel Gorman | 907aed4 | 2012-07-31 16:44:07 -0700 | [diff] [blame] | 313 | tsk_restore_flags(current, old_flags, PF_MEMALLOC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | } |
| 315 | |
Andi Kleen | 722a9f9 | 2014-05-02 00:44:38 +0200 | [diff] [blame] | 316 | asmlinkage __visible void do_softirq(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | { |
| 318 | __u32 pending; |
| 319 | unsigned long flags; |
| 320 | |
| 321 | if (in_interrupt()) |
| 322 | return; |
| 323 | |
| 324 | local_irq_save(flags); |
| 325 | |
| 326 | pending = local_softirq_pending(); |
| 327 | |
Lingutla Chandrasekhar | 14cc2d1 | 2018-03-01 18:36:36 +0530 | [diff] [blame^] | 328 | if (pending) |
Frederic Weisbecker | 7d65f4a | 2013-09-05 15:49:45 +0200 | [diff] [blame] | 329 | do_softirq_own_stack(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | |
| 331 | local_irq_restore(flags); |
| 332 | } |
| 333 | |
Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 334 | /* |
| 335 | * Enter an interrupt context. |
| 336 | */ |
| 337 | void irq_enter(void) |
| 338 | { |
Paul E. McKenney | 64db4cf | 2008-12-18 21:55:32 +0100 | [diff] [blame] | 339 | rcu_irq_enter(); |
Frederic Weisbecker | 0a8a2e7 | 2012-01-24 18:59:44 +0100 | [diff] [blame] | 340 | if (is_idle_task(current) && !in_interrupt()) { |
Venkatesh Pallipadi | d267f87 | 2010-10-04 17:03:23 -0700 | [diff] [blame] | 341 | /* |
| 342 | * Prevent raise_softirq from needlessly waking up ksoftirqd |
| 343 | * here, as softirq will be serviced on return from interrupt. |
| 344 | */ |
| 345 | local_bh_disable(); |
Frederic Weisbecker | 5acac1b | 2013-12-04 18:28:20 +0100 | [diff] [blame] | 346 | tick_irq_enter(); |
Venkatesh Pallipadi | d267f87 | 2010-10-04 17:03:23 -0700 | [diff] [blame] | 347 | _local_bh_enable(); |
| 348 | } |
| 349 | |
| 350 | __irq_enter(); |
Ingo Molnar | dde4b2b | 2007-02-16 01:27:45 -0800 | [diff] [blame] | 351 | } |
| 352 | |
Heiko Carstens | b2a0017 | 2012-03-05 15:07:25 -0800 | [diff] [blame] | 353 | static inline void invoke_softirq(void) |
| 354 | { |
Pavankumar Kondeti | f332a9d | 2017-06-28 12:00:31 +0530 | [diff] [blame] | 355 | if (!force_irqthreads && !defer_for_rt()) { |
Frederic Weisbecker | cc1f027 | 2013-09-24 17:17:47 +0200 | [diff] [blame] | 356 | #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
Frederic Weisbecker | ded7975 | 2013-09-24 00:50:25 +0200 | [diff] [blame] | 357 | /* |
| 358 | * We can safely execute softirq on the current stack if |
| 359 | * it is the irq stack, because it should be near empty |
Frederic Weisbecker | cc1f027 | 2013-09-24 17:17:47 +0200 | [diff] [blame] | 360 | * at this stage. |
| 361 | */ |
| 362 | __do_softirq(); |
| 363 | #else |
| 364 | /* |
| 365 | * Otherwise, irq_exit() is called on the task stack that can |
| 366 | * be potentially deep already. So call softirq in its own stack |
| 367 | * to prevent from any overrun. |
Frederic Weisbecker | ded7975 | 2013-09-24 00:50:25 +0200 | [diff] [blame] | 368 | */ |
Frederic Weisbecker | be6e101 | 2013-09-24 16:39:41 +0200 | [diff] [blame] | 369 | do_softirq_own_stack(); |
Frederic Weisbecker | cc1f027 | 2013-09-24 17:17:47 +0200 | [diff] [blame] | 370 | #endif |
Frederic Weisbecker | ded7975 | 2013-09-24 00:50:25 +0200 | [diff] [blame] | 371 | } else { |
Thomas Gleixner | 8d32a30 | 2011-02-23 23:52:23 +0000 | [diff] [blame] | 372 | wakeup_softirqd(); |
Frederic Weisbecker | ded7975 | 2013-09-24 00:50:25 +0200 | [diff] [blame] | 373 | } |
Thomas Gleixner | 8d32a30 | 2011-02-23 23:52:23 +0000 | [diff] [blame] | 374 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | |
Frederic Weisbecker | 67826ea | 2013-04-20 17:43:13 +0200 | [diff] [blame] | 376 | static inline void tick_irq_exit(void) |
| 377 | { |
| 378 | #ifdef CONFIG_NO_HZ_COMMON |
| 379 | int cpu = smp_processor_id(); |
| 380 | |
| 381 | /* Make sure that timer wheel updates are propagated */ |
| 382 | if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { |
| 383 | if (!in_interrupt()) |
| 384 | tick_nohz_irq_exit(); |
| 385 | } |
| 386 | #endif |
| 387 | } |
| 388 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | /* |
| 390 | * Exit an interrupt context. Process softirqs if needed and possible: |
| 391 | */ |
| 392 | void irq_exit(void) |
| 393 | { |
Thomas Gleixner | 74eed01 | 2013-02-20 22:00:48 +0100 | [diff] [blame] | 394 | #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED |
Frederic Weisbecker | 4cd5d11 | 2013-02-28 20:00:43 +0100 | [diff] [blame] | 395 | local_irq_disable(); |
Thomas Gleixner | 74eed01 | 2013-02-20 22:00:48 +0100 | [diff] [blame] | 396 | #else |
| 397 | WARN_ON_ONCE(!irqs_disabled()); |
| 398 | #endif |
| 399 | |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 400 | account_irq_exit_time(current); |
Peter Zijlstra | bdb4380 | 2013-09-10 12:15:23 +0200 | [diff] [blame] | 401 | preempt_count_sub(HARDIRQ_OFFSET); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | if (!in_interrupt() && local_softirq_pending()) |
| 403 | invoke_softirq(); |
Thomas Gleixner | 79bf2bb | 2007-02-16 01:28:03 -0800 | [diff] [blame] | 404 | |
Frederic Weisbecker | 67826ea | 2013-04-20 17:43:13 +0200 | [diff] [blame] | 405 | tick_irq_exit(); |
Frederic Weisbecker | 416eb33 | 2011-10-07 16:31:02 -0700 | [diff] [blame] | 406 | rcu_irq_exit(); |
Peter Zijlstra | f1a83e6 | 2013-11-19 16:42:47 +0100 | [diff] [blame] | 407 | trace_hardirq_exit(); /* must be last! */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | } |
| 409 | |
| 410 | /* |
| 411 | * This function must run with irqs disabled! |
| 412 | */ |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 413 | inline void raise_softirq_irqoff(unsigned int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | { |
| 415 | __raise_softirq_irqoff(nr); |
| 416 | |
| 417 | /* |
| 418 | * If we're in an interrupt or softirq, we're done |
| 419 | * (this also catches softirq-disabled code). We will |
| 420 | * actually run the softirq once we return from |
| 421 | * the irq or softirq. |
| 422 | * |
| 423 | * Otherwise we wake up ksoftirqd to make sure we |
| 424 | * schedule the softirq soon. |
| 425 | */ |
| 426 | if (!in_interrupt()) |
| 427 | wakeup_softirqd(); |
| 428 | } |
| 429 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 430 | void raise_softirq(unsigned int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | { |
| 432 | unsigned long flags; |
| 433 | |
| 434 | local_irq_save(flags); |
| 435 | raise_softirq_irqoff(nr); |
| 436 | local_irq_restore(flags); |
| 437 | } |
| 438 | |
Steven Rostedt | f069686 | 2012-01-25 20:18:55 -0500 | [diff] [blame] | 439 | void __raise_softirq_irqoff(unsigned int nr) |
| 440 | { |
| 441 | trace_softirq_raise(nr); |
| 442 | or_softirq_pending(1UL << nr); |
| 443 | } |
| 444 | |
Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 445 | void open_softirq(int nr, void (*action)(struct softirq_action *)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | softirq_vec[nr].action = action; |
| 448 | } |
| 449 | |
Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 450 | /* |
| 451 | * Tasklets |
| 452 | */ |
Joe Perches | ce85b4f | 2014-01-27 17:07:16 -0800 | [diff] [blame] | 453 | struct tasklet_head { |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 454 | struct tasklet_struct *head; |
| 455 | struct tasklet_struct **tail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | }; |
| 457 | |
Vegard Nossum | 4620b49 | 2008-06-12 23:21:53 +0200 | [diff] [blame] | 458 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); |
| 459 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 461 | void __tasklet_schedule(struct tasklet_struct *t) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | { |
| 463 | unsigned long flags; |
| 464 | |
| 465 | local_irq_save(flags); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 466 | t->next = NULL; |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 467 | *__this_cpu_read(tasklet_vec.tail) = t; |
| 468 | __this_cpu_write(tasklet_vec.tail, &(t->next)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
| 470 | local_irq_restore(flags); |
| 471 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | EXPORT_SYMBOL(__tasklet_schedule); |
| 473 | |
Harvey Harrison | 7ad5b3a | 2008-02-08 04:19:53 -0800 | [diff] [blame] | 474 | void __tasklet_hi_schedule(struct tasklet_struct *t) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 475 | { |
| 476 | unsigned long flags; |
| 477 | |
| 478 | local_irq_save(flags); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 479 | t->next = NULL; |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 480 | *__this_cpu_read(tasklet_hi_vec.tail) = t; |
| 481 | __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | raise_softirq_irqoff(HI_SOFTIRQ); |
| 483 | local_irq_restore(flags); |
| 484 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | EXPORT_SYMBOL(__tasklet_hi_schedule); |
| 486 | |
Vegard Nossum | 7c692cb | 2008-05-21 22:53:13 +0200 | [diff] [blame] | 487 | void __tasklet_hi_schedule_first(struct tasklet_struct *t) |
| 488 | { |
| 489 | BUG_ON(!irqs_disabled()); |
| 490 | |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 491 | t->next = __this_cpu_read(tasklet_hi_vec.head); |
| 492 | __this_cpu_write(tasklet_hi_vec.head, t); |
Vegard Nossum | 7c692cb | 2008-05-21 22:53:13 +0200 | [diff] [blame] | 493 | __raise_softirq_irqoff(HI_SOFTIRQ); |
| 494 | } |
Vegard Nossum | 7c692cb | 2008-05-21 22:53:13 +0200 | [diff] [blame] | 495 | EXPORT_SYMBOL(__tasklet_hi_schedule_first); |
| 496 | |
Emese Revfy | 0766f78 | 2016-06-20 20:42:34 +0200 | [diff] [blame] | 497 | static __latent_entropy void tasklet_action(struct softirq_action *a) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | { |
| 499 | struct tasklet_struct *list; |
| 500 | |
| 501 | local_irq_disable(); |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 502 | list = __this_cpu_read(tasklet_vec.head); |
| 503 | __this_cpu_write(tasklet_vec.head, NULL); |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 504 | __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | local_irq_enable(); |
| 506 | |
| 507 | while (list) { |
| 508 | struct tasklet_struct *t = list; |
| 509 | |
| 510 | list = list->next; |
| 511 | |
| 512 | if (tasklet_trylock(t)) { |
| 513 | if (!atomic_read(&t->count)) { |
Joe Perches | ce85b4f | 2014-01-27 17:07:16 -0800 | [diff] [blame] | 514 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, |
| 515 | &t->state)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | BUG(); |
| 517 | t->func(t->data); |
| 518 | tasklet_unlock(t); |
| 519 | continue; |
| 520 | } |
| 521 | tasklet_unlock(t); |
| 522 | } |
| 523 | |
| 524 | local_irq_disable(); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 525 | t->next = NULL; |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 526 | *__this_cpu_read(tasklet_vec.tail) = t; |
| 527 | __this_cpu_write(tasklet_vec.tail, &(t->next)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 528 | __raise_softirq_irqoff(TASKLET_SOFTIRQ); |
| 529 | local_irq_enable(); |
| 530 | } |
| 531 | } |
| 532 | |
Emese Revfy | 0766f78 | 2016-06-20 20:42:34 +0200 | [diff] [blame] | 533 | static __latent_entropy void tasklet_hi_action(struct softirq_action *a) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | { |
| 535 | struct tasklet_struct *list; |
| 536 | |
| 537 | local_irq_disable(); |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 538 | list = __this_cpu_read(tasklet_hi_vec.head); |
| 539 | __this_cpu_write(tasklet_hi_vec.head, NULL); |
Christoph Lameter | 22127e9 | 2014-08-17 12:30:25 -0500 | [diff] [blame] | 540 | __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | local_irq_enable(); |
| 542 | |
| 543 | while (list) { |
| 544 | struct tasklet_struct *t = list; |
| 545 | |
| 546 | list = list->next; |
| 547 | |
| 548 | if (tasklet_trylock(t)) { |
| 549 | if (!atomic_read(&t->count)) { |
Joe Perches | ce85b4f | 2014-01-27 17:07:16 -0800 | [diff] [blame] | 550 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, |
| 551 | &t->state)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | BUG(); |
| 553 | t->func(t->data); |
| 554 | tasklet_unlock(t); |
| 555 | continue; |
| 556 | } |
| 557 | tasklet_unlock(t); |
| 558 | } |
| 559 | |
| 560 | local_irq_disable(); |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 561 | t->next = NULL; |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 562 | *__this_cpu_read(tasklet_hi_vec.tail) = t; |
| 563 | __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 564 | __raise_softirq_irqoff(HI_SOFTIRQ); |
| 565 | local_irq_enable(); |
| 566 | } |
| 567 | } |
| 568 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | void tasklet_init(struct tasklet_struct *t, |
| 570 | void (*func)(unsigned long), unsigned long data) |
| 571 | { |
| 572 | t->next = NULL; |
| 573 | t->state = 0; |
| 574 | atomic_set(&t->count, 0); |
| 575 | t->func = func; |
| 576 | t->data = data; |
| 577 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | EXPORT_SYMBOL(tasklet_init); |
| 579 | |
| 580 | void tasklet_kill(struct tasklet_struct *t) |
| 581 | { |
| 582 | if (in_interrupt()) |
Joe Perches | 4032276 | 2014-01-27 17:07:15 -0800 | [diff] [blame] | 583 | pr_notice("Attempt to kill tasklet from interrupt\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | |
| 585 | while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { |
H Hartley Sweeten | 79d381c | 2009-04-16 19:30:18 -0400 | [diff] [blame] | 586 | do { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 587 | yield(); |
H Hartley Sweeten | 79d381c | 2009-04-16 19:30:18 -0400 | [diff] [blame] | 588 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | } |
| 590 | tasklet_unlock_wait(t); |
| 591 | clear_bit(TASKLET_STATE_SCHED, &t->state); |
| 592 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | EXPORT_SYMBOL(tasklet_kill); |
| 594 | |
Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 595 | /* |
| 596 | * tasklet_hrtimer |
| 597 | */ |
| 598 | |
| 599 | /* |
Peter Zijlstra | b9c3032 | 2010-02-03 18:08:52 +0100 | [diff] [blame] | 600 | * The trampoline is called when the hrtimer expires. It schedules a tasklet |
| 601 | * to run __tasklet_hrtimer_trampoline() which in turn will call the intended |
| 602 | * hrtimer callback, but from softirq context. |
Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 603 | */ |
| 604 | static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) |
| 605 | { |
| 606 | struct tasklet_hrtimer *ttimer = |
| 607 | container_of(timer, struct tasklet_hrtimer, timer); |
| 608 | |
Peter Zijlstra | b9c3032 | 2010-02-03 18:08:52 +0100 | [diff] [blame] | 609 | tasklet_hi_schedule(&ttimer->tasklet); |
| 610 | return HRTIMER_NORESTART; |
Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 611 | } |
| 612 | |
| 613 | /* |
| 614 | * Helper function which calls the hrtimer callback from |
| 615 | * tasklet/softirq context |
| 616 | */ |
| 617 | static void __tasklet_hrtimer_trampoline(unsigned long data) |
| 618 | { |
| 619 | struct tasklet_hrtimer *ttimer = (void *)data; |
| 620 | enum hrtimer_restart restart; |
| 621 | |
| 622 | restart = ttimer->function(&ttimer->timer); |
| 623 | if (restart != HRTIMER_NORESTART) |
| 624 | hrtimer_restart(&ttimer->timer); |
| 625 | } |
| 626 | |
| 627 | /** |
| 628 | * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks |
| 629 | * @ttimer: tasklet_hrtimer which is initialized |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 630 | * @function: hrtimer callback function which gets called from softirq context |
Peter Zijlstra | 9ba5f00 | 2009-07-22 14:18:35 +0200 | [diff] [blame] | 631 | * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) |
| 632 | * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) |
| 633 | */ |
| 634 | void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, |
| 635 | enum hrtimer_restart (*function)(struct hrtimer *), |
| 636 | clockid_t which_clock, enum hrtimer_mode mode) |
| 637 | { |
| 638 | hrtimer_init(&ttimer->timer, which_clock, mode); |
| 639 | ttimer->timer.function = __hrtimer_tasklet_trampoline; |
| 640 | tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, |
| 641 | (unsigned long)ttimer); |
| 642 | ttimer->function = function; |
| 643 | } |
| 644 | EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); |
| 645 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | void __init softirq_init(void) |
| 647 | { |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 648 | int cpu; |
| 649 | |
| 650 | for_each_possible_cpu(cpu) { |
| 651 | per_cpu(tasklet_vec, cpu).tail = |
| 652 | &per_cpu(tasklet_vec, cpu).head; |
| 653 | per_cpu(tasklet_hi_vec, cpu).tail = |
| 654 | &per_cpu(tasklet_hi_vec, cpu).head; |
| 655 | } |
| 656 | |
Carlos R. Mafra | 962cf36 | 2008-05-15 11:15:37 -0300 | [diff] [blame] | 657 | open_softirq(TASKLET_SOFTIRQ, tasklet_action); |
| 658 | open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | } |
| 660 | |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 661 | static int ksoftirqd_should_run(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | { |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 663 | return local_softirq_pending(); |
| 664 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 666 | static void run_ksoftirqd(unsigned int cpu) |
| 667 | { |
| 668 | local_irq_disable(); |
| 669 | if (local_softirq_pending()) { |
Frederic Weisbecker | 0bed698 | 2013-09-05 16:14:00 +0200 | [diff] [blame] | 670 | /* |
| 671 | * We can safely run softirq on inline stack, as we are not deep |
| 672 | * in the task stack here. |
| 673 | */ |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 674 | __do_softirq(); |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 675 | local_irq_enable(); |
Paul E. McKenney | 6047967 | 2015-01-14 13:20:26 -0800 | [diff] [blame] | 676 | cond_resched_rcu_qs(); |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 677 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | } |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 679 | local_irq_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | } |
| 681 | |
| 682 | #ifdef CONFIG_HOTPLUG_CPU |
| 683 | /* |
| 684 | * tasklet_kill_immediate is called to remove a tasklet which can already be |
| 685 | * scheduled for execution on @cpu. |
| 686 | * |
| 687 | * Unlike tasklet_kill, this function removes the tasklet |
| 688 | * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. |
| 689 | * |
| 690 | * When this function is called, @cpu must be in the CPU_DEAD state. |
| 691 | */ |
| 692 | void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) |
| 693 | { |
| 694 | struct tasklet_struct **i; |
| 695 | |
| 696 | BUG_ON(cpu_online(cpu)); |
| 697 | BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); |
| 698 | |
| 699 | if (!test_bit(TASKLET_STATE_SCHED, &t->state)) |
| 700 | return; |
| 701 | |
| 702 | /* CPU is dead, so no lock needed. */ |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 703 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | if (*i == t) { |
| 705 | *i = t->next; |
Olof Johansson | 48f20a9 | 2008-03-04 15:23:25 -0800 | [diff] [blame] | 706 | /* If this was the tail element, move the tail ptr */ |
| 707 | if (*i == NULL) |
| 708 | per_cpu(tasklet_vec, cpu).tail = i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 | return; |
| 710 | } |
| 711 | } |
| 712 | BUG(); |
| 713 | } |
| 714 | |
Sebastian Andrzej Siewior | c4544db | 2016-08-18 14:57:21 +0200 | [diff] [blame] | 715 | static int takeover_tasklets(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 716 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 717 | /* CPU is dead, so no lock needed. */ |
| 718 | local_irq_disable(); |
| 719 | |
| 720 | /* Find end, append list for that CPU. */ |
Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 721 | if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 722 | *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; |
| 723 | this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); |
Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 724 | per_cpu(tasklet_vec, cpu).head = NULL; |
| 725 | per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; |
| 726 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 727 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
| 728 | |
Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 729 | if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
Christoph Lameter | 909ea96 | 2010-12-08 16:22:55 +0100 | [diff] [blame] | 730 | *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; |
| 731 | __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); |
Christian Borntraeger | e5e4172 | 2008-05-01 04:34:23 -0700 | [diff] [blame] | 732 | per_cpu(tasklet_hi_vec, cpu).head = NULL; |
| 733 | per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; |
| 734 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | raise_softirq_irqoff(HI_SOFTIRQ); |
| 736 | |
| 737 | local_irq_enable(); |
Sebastian Andrzej Siewior | c4544db | 2016-08-18 14:57:21 +0200 | [diff] [blame] | 738 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | } |
Sebastian Andrzej Siewior | c4544db | 2016-08-18 14:57:21 +0200 | [diff] [blame] | 740 | #else |
| 741 | #define takeover_tasklets NULL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 742 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 743 | |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 744 | static struct smp_hotplug_thread softirq_threads = { |
| 745 | .store = &ksoftirqd, |
| 746 | .thread_should_run = ksoftirqd_should_run, |
| 747 | .thread_fn = run_ksoftirqd, |
| 748 | .thread_comm = "ksoftirqd/%u", |
| 749 | }; |
| 750 | |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 751 | static __init int spawn_ksoftirqd(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | { |
Sebastian Andrzej Siewior | c4544db | 2016-08-18 14:57:21 +0200 | [diff] [blame] | 753 | cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, |
| 754 | takeover_tasklets); |
Thomas Gleixner | 3e339b5 | 2012-07-16 10:42:37 +0000 | [diff] [blame] | 755 | BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); |
| 756 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | return 0; |
| 758 | } |
Eduard - Gabriel Munteanu | 7babe8d | 2008-07-25 19:45:11 -0700 | [diff] [blame] | 759 | early_initcall(spawn_ksoftirqd); |
Andrew Morton | 78eef01 | 2006-03-22 00:08:16 -0800 | [diff] [blame] | 760 | |
Yinghai Lu | 43a2563 | 2008-12-28 16:01:13 -0800 | [diff] [blame] | 761 | /* |
| 762 | * [ These __weak aliases are kept in a separate compilation unit, so that |
| 763 | * GCC does not inline them incorrectly. ] |
| 764 | */ |
| 765 | |
| 766 | int __init __weak early_irq_init(void) |
| 767 | { |
| 768 | return 0; |
| 769 | } |
| 770 | |
Yinghai Lu | 4a046d1 | 2009-01-12 17:39:24 -0800 | [diff] [blame] | 771 | int __init __weak arch_probe_nr_irqs(void) |
| 772 | { |
Thomas Gleixner | b683de2 | 2010-09-27 20:55:03 +0200 | [diff] [blame] | 773 | return NR_IRQS_LEGACY; |
Yinghai Lu | 4a046d1 | 2009-01-12 17:39:24 -0800 | [diff] [blame] | 774 | } |
| 775 | |
Yinghai Lu | 43a2563 | 2008-12-28 16:01:13 -0800 | [diff] [blame] | 776 | int __init __weak arch_early_irq_init(void) |
| 777 | { |
| 778 | return 0; |
| 779 | } |
Thomas Gleixner | 62a08ae | 2014-04-24 09:50:53 +0200 | [diff] [blame] | 780 | |
| 781 | unsigned int __weak arch_dynirq_lower_bound(unsigned int from) |
| 782 | { |
| 783 | return from; |
| 784 | } |