blob: 219f79e2210a3fcd561b94456c4960a0a1fbacd9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/**
2 * @file cpu_buffer.c
3 *
Robert Richter2cc28b92008-12-25 17:26:07 +01004 * @remark Copyright 2002-2009 OProfile authors
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
Barry Kasindorf345c2572008-07-22 21:08:54 +02008 * @author Barry Kasindorf <barry.kasindorf@amd.com>
Robert Richter2cc28b92008-12-25 17:26:07 +01009 * @author Robert Richter <robert.richter@amd.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 *
11 * Each CPU has a local buffer that stores PC value/event
12 * pairs. We also log context switches when we notice them.
13 * Eventually each CPU's buffer is processed into the global
14 * event buffer by sync_buffer().
15 *
16 * We use a local buffer for two reasons: an NMI or similar
17 * interrupt cannot synchronise, and high sampling rates
18 * would lead to catastrophic global synchronisation if
19 * a global buffer was used.
20 */
21
22#include <linux/sched.h>
23#include <linux/oprofile.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/errno.h>
Robert Richter6a180372008-10-16 15:01:40 +020025
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include "event_buffer.h"
27#include "cpu_buffer.h"
28#include "buffer_sync.h"
29#include "oprof.h"
30
Robert Richter6dad8282008-12-09 01:21:32 +010031#define OP_BUFFER_FLAGS 0
32
Andi Kleencb6e9432010-04-01 03:17:25 +020033static struct ring_buffer *op_ring_buffer;
Tejun Heob3e9f672009-10-29 22:34:13 +090034DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
David Howellsc4028952006-11-22 14:57:56 +000036static void wq_sync_buffer(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037
38#define DEFAULT_TIMER_EXPIRE (HZ / 10)
39static int work_enabled;
40
Carl Lovea5598ca2008-10-14 23:37:01 +000041unsigned long oprofile_get_cpu_buffer_size(void)
42{
Robert Richterbd2172f2008-12-16 16:19:54 +010043 return oprofile_cpu_buffer_size;
Carl Lovea5598ca2008-10-14 23:37:01 +000044}
45
46void oprofile_cpu_buffer_inc_smpl_lost(void)
47{
Tejun Heob3e9f672009-10-29 22:34:13 +090048 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
Carl Lovea5598ca2008-10-14 23:37:01 +000049
50 cpu_buf->sample_lost_overflow++;
51}
52
Robert Richter30015772008-12-23 01:35:12 +010053void free_cpu_buffers(void)
54{
Andi Kleencb6e9432010-04-01 03:17:25 +020055 if (op_ring_buffer)
56 ring_buffer_free(op_ring_buffer);
57 op_ring_buffer = NULL;
Robert Richter30015772008-12-23 01:35:12 +010058}
59
Robert Richter54f2c842009-05-07 17:28:59 +020060#define RB_EVENT_HDR_SIZE 4
61
Linus Torvalds1da177e2005-04-16 15:20:36 -070062int alloc_cpu_buffers(void)
63{
64 int i;
Robert Richter6a180372008-10-16 15:01:40 +020065
Robert Richterbd2172f2008-12-16 16:19:54 +010066 unsigned long buffer_size = oprofile_cpu_buffer_size;
Robert Richter54f2c842009-05-07 17:28:59 +020067 unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
68 RB_EVENT_HDR_SIZE);
Robert Richter6a180372008-10-16 15:01:40 +020069
Andi Kleencb6e9432010-04-01 03:17:25 +020070 op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
71 if (!op_ring_buffer)
Robert Richter6dad8282008-12-09 01:21:32 +010072 goto fail;
73
Chris J Arges4bd9b9d2008-10-15 11:03:39 -050074 for_each_possible_cpu(i) {
Tejun Heob3e9f672009-10-29 22:34:13 +090075 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
Robert Richter6a180372008-10-16 15:01:40 +020076
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 b->last_task = NULL;
78 b->last_is_kernel = -1;
79 b->tracing = 0;
80 b->buffer_size = buffer_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 b->sample_received = 0;
82 b->sample_lost_overflow = 0;
Philippe Eliedf9d1772007-11-14 16:58:48 -080083 b->backtrace_aborted = 0;
84 b->sample_invalid_eip = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 b->cpu = i;
David Howellsc4028952006-11-22 14:57:56 +000086 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 }
88 return 0;
89
90fail:
91 free_cpu_buffers();
92 return -ENOMEM;
93}
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95void start_cpu_work(void)
96{
97 int i;
98
99 work_enabled = 1;
100
101 for_each_online_cpu(i) {
Tejun Heob3e9f672009-10-29 22:34:13 +0900102 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 /*
105 * Spread the work by 1 jiffy per cpu so they dont all
106 * fire at once.
107 */
108 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
109 }
110}
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112void end_cpu_work(void)
113{
114 int i;
115
116 work_enabled = 0;
117
118 for_each_online_cpu(i) {
Tejun Heob3e9f672009-10-29 22:34:13 +0900119 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121 cancel_delayed_work(&b->work);
122 }
123
124 flush_scheduled_work();
125}
126
Robert Richter2cc28b92008-12-25 17:26:07 +0100127/*
128 * This function prepares the cpu buffer to write a sample.
129 *
130 * Struct op_entry is used during operations on the ring buffer while
131 * struct op_sample contains the data that is stored in the ring
132 * buffer. Struct entry can be uninitialized. The function reserves a
133 * data array that is specified by size. Use
134 * op_cpu_buffer_write_commit() after preparing the sample. In case of
135 * errors a null pointer is returned, otherwise the pointer to the
136 * sample.
137 *
138 */
139struct op_sample
140*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
Robert Richter99667182008-12-16 16:19:54 +0100141{
Robert Richter2cc28b92008-12-25 17:26:07 +0100142 entry->event = ring_buffer_lock_reserve
Andi Kleencb6e9432010-04-01 03:17:25 +0200143 (op_ring_buffer, sizeof(struct op_sample) +
Ingo Molnar304cc6a2009-02-06 01:12:02 +0100144 size * sizeof(entry->sample->data[0]));
Andi Kleencb6e9432010-04-01 03:17:25 +0200145 if (!entry->event)
Robert Richter2cc28b92008-12-25 17:26:07 +0100146 return NULL;
Andi Kleencb6e9432010-04-01 03:17:25 +0200147 entry->sample = ring_buffer_event_data(entry->event);
Robert Richter2cc28b92008-12-25 17:26:07 +0100148 entry->size = size;
149 entry->data = entry->sample->data;
150
151 return entry->sample;
Robert Richter99667182008-12-16 16:19:54 +0100152}
153
154int op_cpu_buffer_write_commit(struct op_entry *entry)
155{
Andi Kleencb6e9432010-04-01 03:17:25 +0200156 return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
Robert Richter99667182008-12-16 16:19:54 +0100157}
158
Robert Richter2d87b142008-12-30 04:10:46 +0100159struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
Robert Richter99667182008-12-16 16:19:54 +0100160{
161 struct ring_buffer_event *e;
Robert Richterb971f062010-04-23 16:47:51 +0200162 e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL);
Andi Kleencb6e9432010-04-01 03:17:25 +0200163 if (!e)
Robert Richter99667182008-12-16 16:19:54 +0100164 return NULL;
Robert Richter2d87b142008-12-30 04:10:46 +0100165
Robert Richter2d87b142008-12-30 04:10:46 +0100166 entry->event = e;
167 entry->sample = ring_buffer_event_data(e);
168 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
169 / sizeof(entry->sample->data[0]);
170 entry->data = entry->sample->data;
171 return entry->sample;
Robert Richter99667182008-12-16 16:19:54 +0100172}
173
174unsigned long op_cpu_buffer_entries(int cpu)
175{
Andi Kleencb6e9432010-04-01 03:17:25 +0200176 return ring_buffer_entries_cpu(op_ring_buffer, cpu);
Robert Richter99667182008-12-16 16:19:54 +0100177}
178
Robert Richterae735e92008-12-25 17:26:07 +0100179static int
180op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
181 int is_kernel, struct task_struct *task)
182{
183 struct op_entry entry;
184 struct op_sample *sample;
185 unsigned long flags;
186 int size;
187
188 flags = 0;
189
190 if (backtrace)
191 flags |= TRACE_BEGIN;
192
193 /* notice a switch from user->kernel or vice versa */
194 is_kernel = !!is_kernel;
195 if (cpu_buf->last_is_kernel != is_kernel) {
196 cpu_buf->last_is_kernel = is_kernel;
197 flags |= KERNEL_CTX_SWITCH;
198 if (is_kernel)
199 flags |= IS_KERNEL;
200 }
201
202 /* notice a task switch */
203 if (cpu_buf->last_task != task) {
204 cpu_buf->last_task = task;
205 flags |= USER_CTX_SWITCH;
206 }
207
208 if (!flags)
209 /* nothing to do */
210 return 0;
211
212 if (flags & USER_CTX_SWITCH)
213 size = 1;
214 else
215 size = 0;
216
217 sample = op_cpu_buffer_write_reserve(&entry, size);
218 if (!sample)
219 return -ENOMEM;
220
221 sample->eip = ESCAPE_CODE;
222 sample->event = flags;
223
224 if (size)
Robert Richterd9928c22008-12-25 17:26:07 +0100225 op_cpu_buffer_add_data(&entry, (unsigned long)task);
Robert Richterae735e92008-12-25 17:26:07 +0100226
227 op_cpu_buffer_write_commit(&entry);
228
229 return 0;
230}
231
Robert Richter211117f2008-12-09 02:13:25 +0100232static inline int
Robert Richterd0e23382008-12-23 04:03:05 +0100233op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
234 unsigned long pc, unsigned long event)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235{
Robert Richter6dad8282008-12-09 01:21:32 +0100236 struct op_entry entry;
Robert Richter2cc28b92008-12-25 17:26:07 +0100237 struct op_sample *sample;
Robert Richter6dad8282008-12-09 01:21:32 +0100238
Robert Richter2cc28b92008-12-25 17:26:07 +0100239 sample = op_cpu_buffer_write_reserve(&entry, 0);
240 if (!sample)
241 return -ENOMEM;
Robert Richter6dad8282008-12-09 01:21:32 +0100242
Robert Richter2cc28b92008-12-25 17:26:07 +0100243 sample->eip = pc;
244 sample->event = event;
Robert Richter6dad8282008-12-09 01:21:32 +0100245
Robert Richter3967e932008-12-30 05:10:58 +0100246 return op_cpu_buffer_write_commit(&entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247}
248
Robert Richterae735e92008-12-25 17:26:07 +0100249/*
250 * This must be safe from any context.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 *
252 * is_kernel is needed because on some architectures you cannot
253 * tell if you are in kernel or user space simply by looking at
254 * pc. We tag this in the buffer by generating kernel enter/exit
255 * events whenever is_kernel changes
256 */
Robert Richterae735e92008-12-25 17:26:07 +0100257static int
258log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
259 unsigned long backtrace, int is_kernel, unsigned long event)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 cpu_buf->sample_received++;
262
Philippe Eliedf9d1772007-11-14 16:58:48 -0800263 if (pc == ESCAPE_CODE) {
264 cpu_buf->sample_invalid_eip++;
265 return 0;
266 }
267
Robert Richterae735e92008-12-25 17:26:07 +0100268 if (op_add_code(cpu_buf, backtrace, is_kernel, current))
269 goto fail;
Robert Richter6a180372008-10-16 15:01:40 +0200270
Robert Richterd0e23382008-12-23 04:03:05 +0100271 if (op_add_sample(cpu_buf, pc, event))
Robert Richter211117f2008-12-09 02:13:25 +0100272 goto fail;
273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 return 1;
Robert Richter211117f2008-12-09 02:13:25 +0100275
276fail:
277 cpu_buf->sample_lost_overflow++;
278 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279}
280
Robert Richter6352d922008-12-18 22:09:13 +0100281static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 cpu_buf->tracing = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284}
285
Robert Richter6352d922008-12-18 22:09:13 +0100286static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
288 cpu_buf->tracing = 0;
289}
290
Robert Richterd45d23b2008-12-16 12:00:10 +0100291static inline void
292__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
293 unsigned long event, int is_kernel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294{
Tejun Heob3e9f672009-10-29 22:34:13 +0900295 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
Robert Richterae735e92008-12-25 17:26:07 +0100296 unsigned long backtrace = oprofile_backtrace_depth;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Robert Richterfd13f6c2008-10-19 21:00:09 +0200298 /*
299 * if log_sample() fail we can't backtrace since we lost the
300 * source of this event
301 */
Robert Richterae735e92008-12-25 17:26:07 +0100302 if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event))
303 /* failed */
304 return;
Robert Richter6352d922008-12-18 22:09:13 +0100305
Robert Richterae735e92008-12-25 17:26:07 +0100306 if (!backtrace)
307 return;
308
309 oprofile_begin_trace(cpu_buf);
310 oprofile_ops.backtrace(regs, backtrace);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 oprofile_end_trace(cpu_buf);
312}
313
Robert Richterd45d23b2008-12-16 12:00:10 +0100314void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
315 unsigned long event, int is_kernel)
316{
317 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
318}
319
Brian Rogan27357712006-03-28 01:56:20 -0800320void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
321{
Phil Carmody9414e992010-04-28 12:09:16 -0500322 int is_kernel;
323 unsigned long pc;
324
325 if (likely(regs)) {
326 is_kernel = !user_mode(regs);
327 pc = profile_pc(regs);
328 } else {
329 is_kernel = 0; /* This value will not be used */
330 pc = ESCAPE_CODE; /* as this causes an early return. */
331 }
Brian Rogan27357712006-03-28 01:56:20 -0800332
Robert Richterd45d23b2008-12-16 12:00:10 +0100333 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
Brian Rogan27357712006-03-28 01:56:20 -0800334}
335
Robert Richter1acda872009-01-05 10:35:31 +0100336/*
337 * Add samples with data to the ring buffer.
338 *
Robert Richter14f0ca82009-01-07 21:50:22 +0100339 * Use oprofile_add_data(&entry, val) to add data and
340 * oprofile_write_commit(&entry) to commit the sample.
Robert Richter1acda872009-01-05 10:35:31 +0100341 */
Robert Richter14f0ca82009-01-07 21:50:22 +0100342void
343oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
Robert Richter1acda872009-01-05 10:35:31 +0100344 unsigned long pc, int code, int size)
Barry Kasindorf345c2572008-07-22 21:08:54 +0200345{
Robert Richter1acda872009-01-05 10:35:31 +0100346 struct op_sample *sample;
Robert Richtere2fee272008-07-18 17:36:20 +0200347 int is_kernel = !user_mode(regs);
Tejun Heob3e9f672009-10-29 22:34:13 +0900348 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200349
350 cpu_buf->sample_received++;
351
Robert Richter1acda872009-01-05 10:35:31 +0100352 /* no backtraces for samples with data */
353 if (op_add_code(cpu_buf, 0, is_kernel, current))
354 goto fail;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200355
Robert Richter1acda872009-01-05 10:35:31 +0100356 sample = op_cpu_buffer_write_reserve(entry, size + 2);
357 if (!sample)
358 goto fail;
359 sample->eip = ESCAPE_CODE;
360 sample->event = 0; /* no flags */
Barry Kasindorf345c2572008-07-22 21:08:54 +0200361
Robert Richter1acda872009-01-05 10:35:31 +0100362 op_cpu_buffer_add_data(entry, code);
363 op_cpu_buffer_add_data(entry, pc);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200364
Robert Richter1acda872009-01-05 10:35:31 +0100365 return;
366
367fail:
Robert Richterfdb6a8f2009-01-17 17:13:27 +0100368 entry->event = NULL;
Robert Richter1acda872009-01-05 10:35:31 +0100369 cpu_buf->sample_lost_overflow++;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200370}
371
Robert Richter14f0ca82009-01-07 21:50:22 +0100372int oprofile_add_data(struct op_entry *entry, unsigned long val)
373{
Robert Richterfdb6a8f2009-01-17 17:13:27 +0100374 if (!entry->event)
375 return 0;
Robert Richter14f0ca82009-01-07 21:50:22 +0100376 return op_cpu_buffer_add_data(entry, val);
377}
378
Robert Richter51563a02009-06-03 20:54:56 +0200379int oprofile_add_data64(struct op_entry *entry, u64 val)
380{
381 if (!entry->event)
382 return 0;
383 if (op_cpu_buffer_get_size(entry) < 2)
384 /*
385 * the function returns 0 to indicate a too small
386 * buffer, even if there is some space left
387 */
388 return 0;
389 if (!op_cpu_buffer_add_data(entry, (u32)val))
390 return 0;
391 return op_cpu_buffer_add_data(entry, (u32)(val >> 32));
392}
393
Robert Richter14f0ca82009-01-07 21:50:22 +0100394int oprofile_write_commit(struct op_entry *entry)
395{
Robert Richterfdb6a8f2009-01-17 17:13:27 +0100396 if (!entry->event)
397 return -EINVAL;
Robert Richter14f0ca82009-01-07 21:50:22 +0100398 return op_cpu_buffer_write_commit(entry);
399}
400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
402{
Tejun Heob3e9f672009-10-29 22:34:13 +0900403 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
Robert Richterae735e92008-12-25 17:26:07 +0100404 log_sample(cpu_buf, pc, 0, is_kernel, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405}
406
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407void oprofile_add_trace(unsigned long pc)
408{
Tejun Heob3e9f672009-10-29 22:34:13 +0900409 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
411 if (!cpu_buf->tracing)
412 return;
413
Robert Richterfd13f6c2008-10-19 21:00:09 +0200414 /*
415 * broken frame can give an eip with the same value as an
416 * escape code, abort the trace if we get it
417 */
Robert Richter211117f2008-12-09 02:13:25 +0100418 if (pc == ESCAPE_CODE)
419 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
Robert Richterd0e23382008-12-23 04:03:05 +0100421 if (op_add_sample(cpu_buf, pc, 0))
Robert Richter211117f2008-12-09 02:13:25 +0100422 goto fail;
423
424 return;
425fail:
426 cpu_buf->tracing = 0;
427 cpu_buf->backtrace_aborted++;
428 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429}
430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431/*
432 * This serves to avoid cpu buffer overflow, and makes sure
433 * the task mortuary progresses
434 *
435 * By using schedule_delayed_work_on and then schedule_delayed_work
436 * we guarantee this will stay on the correct cpu
437 */
David Howellsc4028952006-11-22 14:57:56 +0000438static void wq_sync_buffer(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439{
Robert Richter25ad29132008-09-05 17:12:36 +0200440 struct oprofile_cpu_buffer *b =
David Howellsc4028952006-11-22 14:57:56 +0000441 container_of(work, struct oprofile_cpu_buffer, work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 if (b->cpu != smp_processor_id()) {
Robert Richterbd17b622008-07-22 21:09:07 +0200443 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 smp_processor_id(), b->cpu);
Chris J Arges4bd9b9d2008-10-15 11:03:39 -0500445
446 if (!cpu_online(b->cpu)) {
447 cancel_delayed_work(&b->work);
448 return;
449 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 }
451 sync_buffer(b->cpu);
452
453 /* don't re-add the work if we're shutting down */
454 if (work_enabled)
455 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
456}