blob: 242257b19441fb31bc95b0ba10324ffc2ec8df35 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/**
2 * @file cpu_buffer.c
3 *
Robert Richter2cc28b92008-12-25 17:26:07 +01004 * @remark Copyright 2002-2009 OProfile authors
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
Barry Kasindorf345c2572008-07-22 21:08:54 +02008 * @author Barry Kasindorf <barry.kasindorf@amd.com>
Robert Richter2cc28b92008-12-25 17:26:07 +01009 * @author Robert Richter <robert.richter@amd.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 *
11 * Each CPU has a local buffer that stores PC value/event
12 * pairs. We also log context switches when we notice them.
13 * Eventually each CPU's buffer is processed into the global
14 * event buffer by sync_buffer().
15 *
16 * We use a local buffer for two reasons: an NMI or similar
17 * interrupt cannot synchronise, and high sampling rates
18 * would lead to catastrophic global synchronisation if
19 * a global buffer was used.
20 */
21
22#include <linux/sched.h>
23#include <linux/oprofile.h>
24#include <linux/vmalloc.h>
25#include <linux/errno.h>
Robert Richter6a180372008-10-16 15:01:40 +020026
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include "event_buffer.h"
28#include "cpu_buffer.h"
29#include "buffer_sync.h"
30#include "oprof.h"
31
Robert Richter6dad8282008-12-09 01:21:32 +010032#define OP_BUFFER_FLAGS 0
33
34/*
35 * Read and write access is using spin locking. Thus, writing to the
36 * buffer by NMI handler (x86) could occur also during critical
37 * sections when reading the buffer. To avoid this, there are 2
38 * buffers for independent read and write access. Read access is in
39 * process context only, write access only in the NMI handler. If the
40 * read buffer runs empty, both buffers are swapped atomically. There
41 * is potentially a small window during swapping where the buffers are
42 * disabled and samples could be lost.
43 *
44 * Using 2 buffers is a little bit overhead, but the solution is clear
45 * and does not require changes in the ring buffer implementation. It
46 * can be changed to a single buffer solution when the ring buffer
47 * access is implemented as non-locking atomic code.
48 */
Robert Richter99667182008-12-16 16:19:54 +010049static struct ring_buffer *op_ring_buffer_read;
50static struct ring_buffer *op_ring_buffer_write;
Eric Dumazet8b8b4982008-05-14 16:05:31 -070051DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
David Howellsc4028952006-11-22 14:57:56 +000053static void wq_sync_buffer(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55#define DEFAULT_TIMER_EXPIRE (HZ / 10)
56static int work_enabled;
57
Carl Lovea5598ca2008-10-14 23:37:01 +000058unsigned long oprofile_get_cpu_buffer_size(void)
59{
Robert Richterbd2172f2008-12-16 16:19:54 +010060 return oprofile_cpu_buffer_size;
Carl Lovea5598ca2008-10-14 23:37:01 +000061}
62
63void oprofile_cpu_buffer_inc_smpl_lost(void)
64{
65 struct oprofile_cpu_buffer *cpu_buf
66 = &__get_cpu_var(cpu_buffer);
67
68 cpu_buf->sample_lost_overflow++;
69}
70
Robert Richter30015772008-12-23 01:35:12 +010071void free_cpu_buffers(void)
72{
73 if (op_ring_buffer_read)
74 ring_buffer_free(op_ring_buffer_read);
75 op_ring_buffer_read = NULL;
76 if (op_ring_buffer_write)
77 ring_buffer_free(op_ring_buffer_write);
78 op_ring_buffer_write = NULL;
79}
80
Robert Richter54f2c842009-05-07 17:28:59 +020081#define RB_EVENT_HDR_SIZE 4
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083int alloc_cpu_buffers(void)
84{
85 int i;
Robert Richter6a180372008-10-16 15:01:40 +020086
Robert Richterbd2172f2008-12-16 16:19:54 +010087 unsigned long buffer_size = oprofile_cpu_buffer_size;
Robert Richter54f2c842009-05-07 17:28:59 +020088 unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
89 RB_EVENT_HDR_SIZE);
Robert Richter6a180372008-10-16 15:01:40 +020090
Robert Richter54f2c842009-05-07 17:28:59 +020091 op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
Robert Richter6dad8282008-12-09 01:21:32 +010092 if (!op_ring_buffer_read)
93 goto fail;
Robert Richter54f2c842009-05-07 17:28:59 +020094 op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
Robert Richter6dad8282008-12-09 01:21:32 +010095 if (!op_ring_buffer_write)
96 goto fail;
97
Chris J Arges4bd9b9d2008-10-15 11:03:39 -050098 for_each_possible_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -070099 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Robert Richter6a180372008-10-16 15:01:40 +0200100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 b->last_task = NULL;
102 b->last_is_kernel = -1;
103 b->tracing = 0;
104 b->buffer_size = buffer_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 b->sample_received = 0;
106 b->sample_lost_overflow = 0;
Philippe Eliedf9d1772007-11-14 16:58:48 -0800107 b->backtrace_aborted = 0;
108 b->sample_invalid_eip = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 b->cpu = i;
David Howellsc4028952006-11-22 14:57:56 +0000110 INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 }
112 return 0;
113
114fail:
115 free_cpu_buffers();
116 return -ENOMEM;
117}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
119void start_cpu_work(void)
120{
121 int i;
122
123 work_enabled = 1;
124
125 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -0700126 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128 /*
129 * Spread the work by 1 jiffy per cpu so they dont all
130 * fire at once.
131 */
132 schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i);
133 }
134}
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136void end_cpu_work(void)
137{
138 int i;
139
140 work_enabled = 0;
141
142 for_each_online_cpu(i) {
Mike Travis608dfdd2008-04-28 02:14:15 -0700143 struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145 cancel_delayed_work(&b->work);
146 }
147
148 flush_scheduled_work();
149}
150
Robert Richter2cc28b92008-12-25 17:26:07 +0100151/*
152 * This function prepares the cpu buffer to write a sample.
153 *
154 * Struct op_entry is used during operations on the ring buffer while
155 * struct op_sample contains the data that is stored in the ring
156 * buffer. Struct entry can be uninitialized. The function reserves a
157 * data array that is specified by size. Use
158 * op_cpu_buffer_write_commit() after preparing the sample. In case of
159 * errors a null pointer is returned, otherwise the pointer to the
160 * sample.
161 *
162 */
163struct op_sample
164*op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
Robert Richter99667182008-12-16 16:19:54 +0100165{
Robert Richter2cc28b92008-12-25 17:26:07 +0100166 entry->event = ring_buffer_lock_reserve
167 (op_ring_buffer_write, sizeof(struct op_sample) +
Ingo Molnar304cc6a2009-02-06 01:12:02 +0100168 size * sizeof(entry->sample->data[0]));
Robert Richter99667182008-12-16 16:19:54 +0100169 if (entry->event)
170 entry->sample = ring_buffer_event_data(entry->event);
171 else
172 entry->sample = NULL;
173
174 if (!entry->sample)
Robert Richter2cc28b92008-12-25 17:26:07 +0100175 return NULL;
Robert Richter99667182008-12-16 16:19:54 +0100176
Robert Richter2cc28b92008-12-25 17:26:07 +0100177 entry->size = size;
178 entry->data = entry->sample->data;
179
180 return entry->sample;
Robert Richter99667182008-12-16 16:19:54 +0100181}
182
183int op_cpu_buffer_write_commit(struct op_entry *entry)
184{
Ingo Molnar304cc6a2009-02-06 01:12:02 +0100185 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event);
Robert Richter99667182008-12-16 16:19:54 +0100186}
187
Robert Richter2d87b142008-12-30 04:10:46 +0100188struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
Robert Richter99667182008-12-16 16:19:54 +0100189{
190 struct ring_buffer_event *e;
191 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
192 if (e)
Robert Richter2d87b142008-12-30 04:10:46 +0100193 goto event;
Robert Richter99667182008-12-16 16:19:54 +0100194 if (ring_buffer_swap_cpu(op_ring_buffer_read,
195 op_ring_buffer_write,
196 cpu))
197 return NULL;
198 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
199 if (e)
Robert Richter2d87b142008-12-30 04:10:46 +0100200 goto event;
Robert Richter99667182008-12-16 16:19:54 +0100201 return NULL;
Robert Richter2d87b142008-12-30 04:10:46 +0100202
203event:
204 entry->event = e;
205 entry->sample = ring_buffer_event_data(e);
206 entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
207 / sizeof(entry->sample->data[0]);
208 entry->data = entry->sample->data;
209 return entry->sample;
Robert Richter99667182008-12-16 16:19:54 +0100210}
211
212unsigned long op_cpu_buffer_entries(int cpu)
213{
214 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
215 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
216}
217
Robert Richterae735e92008-12-25 17:26:07 +0100218static int
219op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace,
220 int is_kernel, struct task_struct *task)
221{
222 struct op_entry entry;
223 struct op_sample *sample;
224 unsigned long flags;
225 int size;
226
227 flags = 0;
228
229 if (backtrace)
230 flags |= TRACE_BEGIN;
231
232 /* notice a switch from user->kernel or vice versa */
233 is_kernel = !!is_kernel;
234 if (cpu_buf->last_is_kernel != is_kernel) {
235 cpu_buf->last_is_kernel = is_kernel;
236 flags |= KERNEL_CTX_SWITCH;
237 if (is_kernel)
238 flags |= IS_KERNEL;
239 }
240
241 /* notice a task switch */
242 if (cpu_buf->last_task != task) {
243 cpu_buf->last_task = task;
244 flags |= USER_CTX_SWITCH;
245 }
246
247 if (!flags)
248 /* nothing to do */
249 return 0;
250
251 if (flags & USER_CTX_SWITCH)
252 size = 1;
253 else
254 size = 0;
255
256 sample = op_cpu_buffer_write_reserve(&entry, size);
257 if (!sample)
258 return -ENOMEM;
259
260 sample->eip = ESCAPE_CODE;
261 sample->event = flags;
262
263 if (size)
Robert Richterd9928c22008-12-25 17:26:07 +0100264 op_cpu_buffer_add_data(&entry, (unsigned long)task);
Robert Richterae735e92008-12-25 17:26:07 +0100265
266 op_cpu_buffer_write_commit(&entry);
267
268 return 0;
269}
270
Robert Richter211117f2008-12-09 02:13:25 +0100271static inline int
Robert Richterd0e23382008-12-23 04:03:05 +0100272op_add_sample(struct oprofile_cpu_buffer *cpu_buf,
273 unsigned long pc, unsigned long event)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274{
Robert Richter6dad8282008-12-09 01:21:32 +0100275 struct op_entry entry;
Robert Richter2cc28b92008-12-25 17:26:07 +0100276 struct op_sample *sample;
Robert Richter6dad8282008-12-09 01:21:32 +0100277
Robert Richter2cc28b92008-12-25 17:26:07 +0100278 sample = op_cpu_buffer_write_reserve(&entry, 0);
279 if (!sample)
280 return -ENOMEM;
Robert Richter6dad8282008-12-09 01:21:32 +0100281
Robert Richter2cc28b92008-12-25 17:26:07 +0100282 sample->eip = pc;
283 sample->event = event;
Robert Richter6dad8282008-12-09 01:21:32 +0100284
Robert Richter3967e932008-12-30 05:10:58 +0100285 return op_cpu_buffer_write_commit(&entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
Robert Richterae735e92008-12-25 17:26:07 +0100288/*
289 * This must be safe from any context.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 *
291 * is_kernel is needed because on some architectures you cannot
292 * tell if you are in kernel or user space simply by looking at
293 * pc. We tag this in the buffer by generating kernel enter/exit
294 * events whenever is_kernel changes
295 */
Robert Richterae735e92008-12-25 17:26:07 +0100296static int
297log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
298 unsigned long backtrace, int is_kernel, unsigned long event)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 cpu_buf->sample_received++;
301
Philippe Eliedf9d1772007-11-14 16:58:48 -0800302 if (pc == ESCAPE_CODE) {
303 cpu_buf->sample_invalid_eip++;
304 return 0;
305 }
306
Robert Richterae735e92008-12-25 17:26:07 +0100307 if (op_add_code(cpu_buf, backtrace, is_kernel, current))
308 goto fail;
Robert Richter6a180372008-10-16 15:01:40 +0200309
Robert Richterd0e23382008-12-23 04:03:05 +0100310 if (op_add_sample(cpu_buf, pc, event))
Robert Richter211117f2008-12-09 02:13:25 +0100311 goto fail;
312
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 return 1;
Robert Richter211117f2008-12-09 02:13:25 +0100314
315fail:
316 cpu_buf->sample_lost_overflow++;
317 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319
Robert Richter6352d922008-12-18 22:09:13 +0100320static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 cpu_buf->tracing = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Robert Richter6352d922008-12-18 22:09:13 +0100325static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326{
327 cpu_buf->tracing = 0;
328}
329
Robert Richterd45d23b2008-12-16 12:00:10 +0100330static inline void
331__oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
332 unsigned long event, int is_kernel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333{
Mike Travis608dfdd2008-04-28 02:14:15 -0700334 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Robert Richterae735e92008-12-25 17:26:07 +0100335 unsigned long backtrace = oprofile_backtrace_depth;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Robert Richterfd13f6c2008-10-19 21:00:09 +0200337 /*
338 * if log_sample() fail we can't backtrace since we lost the
339 * source of this event
340 */
Robert Richterae735e92008-12-25 17:26:07 +0100341 if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event))
342 /* failed */
343 return;
Robert Richter6352d922008-12-18 22:09:13 +0100344
Robert Richterae735e92008-12-25 17:26:07 +0100345 if (!backtrace)
346 return;
347
348 oprofile_begin_trace(cpu_buf);
349 oprofile_ops.backtrace(regs, backtrace);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 oprofile_end_trace(cpu_buf);
351}
352
Robert Richterd45d23b2008-12-16 12:00:10 +0100353void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
354 unsigned long event, int is_kernel)
355{
356 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
357}
358
Brian Rogan27357712006-03-28 01:56:20 -0800359void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
360{
361 int is_kernel = !user_mode(regs);
362 unsigned long pc = profile_pc(regs);
363
Robert Richterd45d23b2008-12-16 12:00:10 +0100364 __oprofile_add_ext_sample(pc, regs, event, is_kernel);
Brian Rogan27357712006-03-28 01:56:20 -0800365}
366
Robert Richter1acda872009-01-05 10:35:31 +0100367/*
368 * Add samples with data to the ring buffer.
369 *
Robert Richter14f0ca82009-01-07 21:50:22 +0100370 * Use oprofile_add_data(&entry, val) to add data and
371 * oprofile_write_commit(&entry) to commit the sample.
Robert Richter1acda872009-01-05 10:35:31 +0100372 */
Robert Richter14f0ca82009-01-07 21:50:22 +0100373void
374oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
Robert Richter1acda872009-01-05 10:35:31 +0100375 unsigned long pc, int code, int size)
Barry Kasindorf345c2572008-07-22 21:08:54 +0200376{
Robert Richter1acda872009-01-05 10:35:31 +0100377 struct op_sample *sample;
Robert Richtere2fee272008-07-18 17:36:20 +0200378 int is_kernel = !user_mode(regs);
379 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200380
381 cpu_buf->sample_received++;
382
Robert Richter1acda872009-01-05 10:35:31 +0100383 /* no backtraces for samples with data */
384 if (op_add_code(cpu_buf, 0, is_kernel, current))
385 goto fail;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200386
Robert Richter1acda872009-01-05 10:35:31 +0100387 sample = op_cpu_buffer_write_reserve(entry, size + 2);
388 if (!sample)
389 goto fail;
390 sample->eip = ESCAPE_CODE;
391 sample->event = 0; /* no flags */
Barry Kasindorf345c2572008-07-22 21:08:54 +0200392
Robert Richter1acda872009-01-05 10:35:31 +0100393 op_cpu_buffer_add_data(entry, code);
394 op_cpu_buffer_add_data(entry, pc);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200395
Robert Richter1acda872009-01-05 10:35:31 +0100396 return;
397
398fail:
Robert Richterfdb6a8f2009-01-17 17:13:27 +0100399 entry->event = NULL;
Robert Richter1acda872009-01-05 10:35:31 +0100400 cpu_buf->sample_lost_overflow++;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200401}
402
Robert Richter14f0ca82009-01-07 21:50:22 +0100403int oprofile_add_data(struct op_entry *entry, unsigned long val)
404{
Robert Richterfdb6a8f2009-01-17 17:13:27 +0100405 if (!entry->event)
406 return 0;
Robert Richter14f0ca82009-01-07 21:50:22 +0100407 return op_cpu_buffer_add_data(entry, val);
408}
409
410int oprofile_write_commit(struct op_entry *entry)
411{
Robert Richterfdb6a8f2009-01-17 17:13:27 +0100412 if (!entry->event)
413 return -EINVAL;
Robert Richter14f0ca82009-01-07 21:50:22 +0100414 return op_cpu_buffer_write_commit(entry);
415}
416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
418{
Mike Travis608dfdd2008-04-28 02:14:15 -0700419 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Robert Richterae735e92008-12-25 17:26:07 +0100420 log_sample(cpu_buf, pc, 0, is_kernel, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423void oprofile_add_trace(unsigned long pc)
424{
Mike Travis608dfdd2008-04-28 02:14:15 -0700425 struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
427 if (!cpu_buf->tracing)
428 return;
429
Robert Richterfd13f6c2008-10-19 21:00:09 +0200430 /*
431 * broken frame can give an eip with the same value as an
432 * escape code, abort the trace if we get it
433 */
Robert Richter211117f2008-12-09 02:13:25 +0100434 if (pc == ESCAPE_CODE)
435 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Robert Richterd0e23382008-12-23 04:03:05 +0100437 if (op_add_sample(cpu_buf, pc, 0))
Robert Richter211117f2008-12-09 02:13:25 +0100438 goto fail;
439
440 return;
441fail:
442 cpu_buf->tracing = 0;
443 cpu_buf->backtrace_aborted++;
444 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445}
446
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447/*
448 * This serves to avoid cpu buffer overflow, and makes sure
449 * the task mortuary progresses
450 *
451 * By using schedule_delayed_work_on and then schedule_delayed_work
452 * we guarantee this will stay on the correct cpu
453 */
David Howellsc4028952006-11-22 14:57:56 +0000454static void wq_sync_buffer(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455{
Robert Richter25ad29132008-09-05 17:12:36 +0200456 struct oprofile_cpu_buffer *b =
David Howellsc4028952006-11-22 14:57:56 +0000457 container_of(work, struct oprofile_cpu_buffer, work.work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 if (b->cpu != smp_processor_id()) {
Robert Richterbd17b622008-07-22 21:09:07 +0200459 printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 smp_processor_id(), b->cpu);
Chris J Arges4bd9b9d2008-10-15 11:03:39 -0500461
462 if (!cpu_online(b->cpu)) {
463 cancel_delayed_work(&b->work);
464 return;
465 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 }
467 sync_buffer(b->cpu);
468
469 /* don't re-add the work if we're shutting down */
470 if (work_enabled)
471 schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE);
472}