blob: 2f17b488d58e11b51996565dfe5e24adccdae3b1 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Christoph Hellwig511cbce2015-11-10 14:56:14 +01002/*
3 * Functions related to interrupt-poll handling in the block layer. This
4 * is similar to NAPI for network devices.
5 */
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/bio.h>
10#include <linux/interrupt.h>
11#include <linux/cpu.h>
12#include <linux/irq_poll.h>
13#include <linux/delay.h>
14
15static unsigned int irq_poll_budget __read_mostly = 256;
16
17static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
18
19/**
20 * irq_poll_sched - Schedule a run of the iopoll handler
21 * @iop: The parent iopoll structure
22 *
23 * Description:
24 * Add this irq_poll structure to the pending poll list and trigger the
Christoph Hellwigea511902015-12-07 06:41:11 -080025 * raise of the blk iopoll softirq.
Christoph Hellwig511cbce2015-11-10 14:56:14 +010026 **/
27void irq_poll_sched(struct irq_poll *iop)
28{
29 unsigned long flags;
30
Christoph Hellwigea511902015-12-07 06:41:11 -080031 if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
32 return;
Bart Van Assche2ee177e2015-12-31 09:56:03 +010033 if (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
Christoph Hellwigea511902015-12-07 06:41:11 -080034 return;
35
Christoph Hellwig511cbce2015-11-10 14:56:14 +010036 local_irq_save(flags);
37 list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
Steve Wise4133b012019-02-06 13:11:49 -080038 raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
Christoph Hellwig511cbce2015-11-10 14:56:14 +010039 local_irq_restore(flags);
40}
41EXPORT_SYMBOL(irq_poll_sched);
42
43/**
44 * __irq_poll_complete - Mark this @iop as un-polled again
45 * @iop: The parent iopoll structure
46 *
47 * Description:
48 * See irq_poll_complete(). This function must be called with interrupts
49 * disabled.
50 **/
Christoph Hellwig83af1872015-12-07 06:57:25 -080051static void __irq_poll_complete(struct irq_poll *iop)
Christoph Hellwig511cbce2015-11-10 14:56:14 +010052{
53 list_del(&iop->list);
54 smp_mb__before_atomic();
55 clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
56}
Christoph Hellwig511cbce2015-11-10 14:56:14 +010057
58/**
59 * irq_poll_complete - Mark this @iop as un-polled again
60 * @iop: The parent iopoll structure
61 *
62 * Description:
63 * If a driver consumes less than the assigned budget in its run of the
64 * iopoll handler, it'll end the polled mode by calling this function. The
Christoph Hellwigea511902015-12-07 06:41:11 -080065 * iopoll handler will not be invoked again before irq_poll_sched()
Christoph Hellwig511cbce2015-11-10 14:56:14 +010066 * is called.
67 **/
68void irq_poll_complete(struct irq_poll *iop)
69{
70 unsigned long flags;
71
72 local_irq_save(flags);
73 __irq_poll_complete(iop);
74 local_irq_restore(flags);
75}
76EXPORT_SYMBOL(irq_poll_complete);
77
Emese Revfy0766f782016-06-20 20:42:34 +020078static void __latent_entropy irq_poll_softirq(struct softirq_action *h)
Christoph Hellwig511cbce2015-11-10 14:56:14 +010079{
80 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);
81 int rearm = 0, budget = irq_poll_budget;
82 unsigned long start_time = jiffies;
83
84 local_irq_disable();
85
86 while (!list_empty(list)) {
87 struct irq_poll *iop;
88 int work, weight;
89
90 /*
91 * If softirq window is exhausted then punt.
92 */
93 if (budget <= 0 || time_after(jiffies, start_time)) {
94 rearm = 1;
95 break;
96 }
97
98 local_irq_enable();
99
100 /* Even though interrupts have been re-enabled, this
101 * access is safe because interrupts can only add new
102 * entries to the tail of this list, and only ->poll()
103 * calls can remove this head entry from the list.
104 */
105 iop = list_entry(list->next, struct irq_poll, list);
106
107 weight = iop->weight;
108 work = 0;
109 if (test_bit(IRQ_POLL_F_SCHED, &iop->state))
110 work = iop->poll(iop, weight);
111
112 budget -= work;
113
114 local_irq_disable();
115
116 /*
117 * Drivers must not modify the iopoll state, if they
118 * consume their assigned weight (or more, some drivers can't
119 * easily just stop processing, they have to complete an
120 * entire mask of commands).In such cases this code
121 * still "owns" the iopoll instance and therefore can
122 * move the instance around on the list at-will.
123 */
124 if (work >= weight) {
Christoph Hellwig0bc92ac2015-12-07 06:56:36 -0800125 if (test_bit(IRQ_POLL_F_DISABLE, &iop->state))
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100126 __irq_poll_complete(iop);
127 else
128 list_move_tail(&iop->list, list);
129 }
130 }
131
132 if (rearm)
133 __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
134
135 local_irq_enable();
136}
137
138/**
139 * irq_poll_disable - Disable iopoll on this @iop
140 * @iop: The parent iopoll structure
141 *
142 * Description:
143 * Disable io polling and wait for any pending callbacks to have completed.
144 **/
145void irq_poll_disable(struct irq_poll *iop)
146{
147 set_bit(IRQ_POLL_F_DISABLE, &iop->state);
148 while (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))
149 msleep(1);
150 clear_bit(IRQ_POLL_F_DISABLE, &iop->state);
151}
152EXPORT_SYMBOL(irq_poll_disable);
153
154/**
155 * irq_poll_enable - Enable iopoll on this @iop
156 * @iop: The parent iopoll structure
157 *
158 * Description:
159 * Enable iopoll on this @iop. Note that the handler run will not be
160 * scheduled, it will only mark it as active.
161 **/
162void irq_poll_enable(struct irq_poll *iop)
163{
164 BUG_ON(!test_bit(IRQ_POLL_F_SCHED, &iop->state));
165 smp_mb__before_atomic();
166 clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);
167}
168EXPORT_SYMBOL(irq_poll_enable);
169
170/**
171 * irq_poll_init - Initialize this @iop
172 * @iop: The parent iopoll structure
173 * @weight: The default weight (or command completion budget)
174 * @poll_fn: The handler to invoke
175 *
176 * Description:
Christoph Hellwig78d02642015-12-07 06:38:28 -0800177 * Initialize and enable this irq_poll structure.
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100178 **/
179void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn)
180{
181 memset(iop, 0, sizeof(*iop));
182 INIT_LIST_HEAD(&iop->list);
183 iop->weight = weight;
184 iop->poll = poll_fn;
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100185}
186EXPORT_SYMBOL(irq_poll_init);
187
Sebastian Andrzej Siewior75e12ed2016-09-06 19:04:43 +0200188static int irq_poll_cpu_dead(unsigned int cpu)
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100189{
190 /*
191 * If a CPU goes away, splice its entries to the current CPU
192 * and trigger a run of the softirq
193 */
Sebastian Andrzej Siewior75e12ed2016-09-06 19:04:43 +0200194 local_irq_disable();
195 list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
196 this_cpu_ptr(&blk_cpu_iopoll));
197 __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
198 local_irq_enable();
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100199
Sebastian Andrzej Siewior75e12ed2016-09-06 19:04:43 +0200200 return 0;
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100201}
202
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100203static __init int irq_poll_setup(void)
204{
205 int i;
206
207 for_each_possible_cpu(i)
208 INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));
209
210 open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq);
Sebastian Andrzej Siewior75e12ed2016-09-06 19:04:43 +0200211 cpuhp_setup_state_nocalls(CPUHP_IRQ_POLL_DEAD, "irq_poll:dead", NULL,
212 irq_poll_cpu_dead);
Christoph Hellwig511cbce2015-11-10 14:56:14 +0100213 return 0;
214}
215subsys_initcall(irq_poll_setup);