Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /** |
| 2 | * @file cpu_buffer.c |
| 3 | * |
| 4 | * @remark Copyright 2002 OProfile authors |
| 5 | * @remark Read the file COPYING |
| 6 | * |
| 7 | * @author John Levon <levon@movementarian.org> |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 8 | * @author Barry Kasindorf <barry.kasindorf@amd.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * |
| 10 | * Each CPU has a local buffer that stores PC value/event |
| 11 | * pairs. We also log context switches when we notice them. |
| 12 | * Eventually each CPU's buffer is processed into the global |
| 13 | * event buffer by sync_buffer(). |
| 14 | * |
| 15 | * We use a local buffer for two reasons: an NMI or similar |
| 16 | * interrupt cannot synchronise, and high sampling rates |
| 17 | * would lead to catastrophic global synchronisation if |
| 18 | * a global buffer was used. |
| 19 | */ |
| 20 | |
| 21 | #include <linux/sched.h> |
| 22 | #include <linux/oprofile.h> |
| 23 | #include <linux/vmalloc.h> |
| 24 | #include <linux/errno.h> |
Robert Richter | 6a18037 | 2008-10-16 15:01:40 +0200 | [diff] [blame] | 25 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include "event_buffer.h" |
| 27 | #include "cpu_buffer.h" |
| 28 | #include "buffer_sync.h" |
| 29 | #include "oprof.h" |
| 30 | |
Robert Richter | 6dad828 | 2008-12-09 01:21:32 +0100 | [diff] [blame^] | 31 | #define OP_BUFFER_FLAGS 0 |
| 32 | |
| 33 | /* |
| 34 | * Read and write access is using spin locking. Thus, writing to the |
| 35 | * buffer by NMI handler (x86) could occur also during critical |
| 36 | * sections when reading the buffer. To avoid this, there are 2 |
| 37 | * buffers for independent read and write access. Read access is in |
| 38 | * process context only, write access only in the NMI handler. If the |
| 39 | * read buffer runs empty, both buffers are swapped atomically. There |
| 40 | * is potentially a small window during swapping where the buffers are |
| 41 | * disabled and samples could be lost. |
| 42 | * |
| 43 | * Using 2 buffers is a little bit overhead, but the solution is clear |
| 44 | * and does not require changes in the ring buffer implementation. It |
| 45 | * can be changed to a single buffer solution when the ring buffer |
| 46 | * access is implemented as non-locking atomic code. |
| 47 | */ |
| 48 | struct ring_buffer *op_ring_buffer_read; |
| 49 | struct ring_buffer *op_ring_buffer_write; |
Eric Dumazet | 8b8b498 | 2008-05-14 16:05:31 -0700 | [diff] [blame] | 50 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 52 | static void wq_sync_buffer(struct work_struct *work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
| 54 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) |
| 55 | static int work_enabled; |
| 56 | |
| 57 | void free_cpu_buffers(void) |
| 58 | { |
Robert Richter | 6dad828 | 2008-12-09 01:21:32 +0100 | [diff] [blame^] | 59 | if (op_ring_buffer_read) |
| 60 | ring_buffer_free(op_ring_buffer_read); |
| 61 | op_ring_buffer_read = NULL; |
| 62 | if (op_ring_buffer_write) |
| 63 | ring_buffer_free(op_ring_buffer_write); |
| 64 | op_ring_buffer_write = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | } |
Jesper Juhl | 77933d7 | 2005-07-27 11:46:09 -0700 | [diff] [blame] | 66 | |
Carl Love | a5598ca | 2008-10-14 23:37:01 +0000 | [diff] [blame] | 67 | unsigned long oprofile_get_cpu_buffer_size(void) |
| 68 | { |
| 69 | return fs_cpu_buffer_size; |
| 70 | } |
| 71 | |
| 72 | void oprofile_cpu_buffer_inc_smpl_lost(void) |
| 73 | { |
| 74 | struct oprofile_cpu_buffer *cpu_buf |
| 75 | = &__get_cpu_var(cpu_buffer); |
| 76 | |
| 77 | cpu_buf->sample_lost_overflow++; |
| 78 | } |
| 79 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | int alloc_cpu_buffers(void) |
| 81 | { |
| 82 | int i; |
Robert Richter | 6a18037 | 2008-10-16 15:01:40 +0200 | [diff] [blame] | 83 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | unsigned long buffer_size = fs_cpu_buffer_size; |
Robert Richter | 6a18037 | 2008-10-16 15:01:40 +0200 | [diff] [blame] | 85 | |
Robert Richter | 6dad828 | 2008-12-09 01:21:32 +0100 | [diff] [blame^] | 86 | op_ring_buffer_read = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); |
| 87 | if (!op_ring_buffer_read) |
| 88 | goto fail; |
| 89 | op_ring_buffer_write = ring_buffer_alloc(buffer_size, OP_BUFFER_FLAGS); |
| 90 | if (!op_ring_buffer_write) |
| 91 | goto fail; |
| 92 | |
Chris J Arges | 4bd9b9d | 2008-10-15 11:03:39 -0500 | [diff] [blame] | 93 | for_each_possible_cpu(i) { |
Mike Travis | 608dfdd | 2008-04-28 02:14:15 -0700 | [diff] [blame] | 94 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); |
Robert Richter | 6a18037 | 2008-10-16 15:01:40 +0200 | [diff] [blame] | 95 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | b->last_task = NULL; |
| 97 | b->last_is_kernel = -1; |
| 98 | b->tracing = 0; |
| 99 | b->buffer_size = buffer_size; |
| 100 | b->tail_pos = 0; |
| 101 | b->head_pos = 0; |
| 102 | b->sample_received = 0; |
| 103 | b->sample_lost_overflow = 0; |
Philippe Elie | df9d177 | 2007-11-14 16:58:48 -0800 | [diff] [blame] | 104 | b->backtrace_aborted = 0; |
| 105 | b->sample_invalid_eip = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | b->cpu = i; |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 107 | INIT_DELAYED_WORK(&b->work, wq_sync_buffer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | } |
| 109 | return 0; |
| 110 | |
| 111 | fail: |
| 112 | free_cpu_buffers(); |
| 113 | return -ENOMEM; |
| 114 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | |
| 116 | void start_cpu_work(void) |
| 117 | { |
| 118 | int i; |
| 119 | |
| 120 | work_enabled = 1; |
| 121 | |
| 122 | for_each_online_cpu(i) { |
Mike Travis | 608dfdd | 2008-04-28 02:14:15 -0700 | [diff] [blame] | 123 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | |
| 125 | /* |
| 126 | * Spread the work by 1 jiffy per cpu so they dont all |
| 127 | * fire at once. |
| 128 | */ |
| 129 | schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); |
| 130 | } |
| 131 | } |
| 132 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | void end_cpu_work(void) |
| 134 | { |
| 135 | int i; |
| 136 | |
| 137 | work_enabled = 0; |
| 138 | |
| 139 | for_each_online_cpu(i) { |
Mike Travis | 608dfdd | 2008-04-28 02:14:15 -0700 | [diff] [blame] | 140 | struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | |
| 142 | cancel_delayed_work(&b->work); |
| 143 | } |
| 144 | |
| 145 | flush_scheduled_work(); |
| 146 | } |
| 147 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | /* compute number of available slots in cpu_buffer queue */ |
Robert Richter | 25ad291 | 2008-09-05 17:12:36 +0200 | [diff] [blame] | 149 | static unsigned long nr_available_slots(struct oprofile_cpu_buffer const *b) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | { |
| 151 | unsigned long head = b->head_pos; |
| 152 | unsigned long tail = b->tail_pos; |
| 153 | |
| 154 | if (tail > head) |
| 155 | return (tail - head) - 1; |
| 156 | |
| 157 | return tail + (b->buffer_size - head) - 1; |
| 158 | } |
| 159 | |
Jesper Juhl | 77933d7 | 2005-07-27 11:46:09 -0700 | [diff] [blame] | 160 | static inline void |
Robert Richter | 25ad291 | 2008-09-05 17:12:36 +0200 | [diff] [blame] | 161 | add_sample(struct oprofile_cpu_buffer *cpu_buf, |
Robert Richter | 6a18037 | 2008-10-16 15:01:40 +0200 | [diff] [blame] | 162 | unsigned long pc, unsigned long event) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | { |
Robert Richter | 6dad828 | 2008-12-09 01:21:32 +0100 | [diff] [blame^] | 164 | struct op_entry entry; |
| 165 | |
| 166 | if (cpu_buffer_write_entry(&entry)) |
| 167 | goto Error; |
| 168 | |
| 169 | entry.sample->eip = pc; |
| 170 | entry.sample->event = event; |
| 171 | |
| 172 | if (cpu_buffer_write_commit(&entry)) |
| 173 | goto Error; |
| 174 | |
| 175 | return; |
| 176 | |
| 177 | Error: |
| 178 | cpu_buf->sample_lost_overflow++; |
| 179 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | } |
| 181 | |
Jesper Juhl | 77933d7 | 2005-07-27 11:46:09 -0700 | [diff] [blame] | 182 | static inline void |
Robert Richter | 25ad291 | 2008-09-05 17:12:36 +0200 | [diff] [blame] | 183 | add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | { |
| 185 | add_sample(buffer, ESCAPE_CODE, value); |
| 186 | } |
| 187 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | /* This must be safe from any context. It's safe writing here |
| 189 | * because of the head/tail separation of the writer and reader |
| 190 | * of the CPU buffer. |
| 191 | * |
| 192 | * is_kernel is needed because on some architectures you cannot |
| 193 | * tell if you are in kernel or user space simply by looking at |
| 194 | * pc. We tag this in the buffer by generating kernel enter/exit |
| 195 | * events whenever is_kernel changes |
| 196 | */ |
Robert Richter | 25ad291 | 2008-09-05 17:12:36 +0200 | [diff] [blame] | 197 | static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | int is_kernel, unsigned long event) |
| 199 | { |
Robert Richter | 25ad291 | 2008-09-05 17:12:36 +0200 | [diff] [blame] | 200 | struct task_struct *task; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | |
| 202 | cpu_buf->sample_received++; |
| 203 | |
Philippe Elie | df9d177 | 2007-11-14 16:58:48 -0800 | [diff] [blame] | 204 | if (pc == ESCAPE_CODE) { |
| 205 | cpu_buf->sample_invalid_eip++; |
| 206 | return 0; |
| 207 | } |
| 208 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | if (nr_available_slots(cpu_buf) < 3) { |
| 210 | cpu_buf->sample_lost_overflow++; |
| 211 | return 0; |
| 212 | } |
| 213 | |
| 214 | is_kernel = !!is_kernel; |
| 215 | |
| 216 | task = current; |
| 217 | |
| 218 | /* notice a switch from user->kernel or vice versa */ |
| 219 | if (cpu_buf->last_is_kernel != is_kernel) { |
| 220 | cpu_buf->last_is_kernel = is_kernel; |
| 221 | add_code(cpu_buf, is_kernel); |
| 222 | } |
| 223 | |
| 224 | /* notice a task switch */ |
| 225 | if (cpu_buf->last_task != task) { |
| 226 | cpu_buf->last_task = task; |
| 227 | add_code(cpu_buf, (unsigned long)task); |
| 228 | } |
Robert Richter | 6a18037 | 2008-10-16 15:01:40 +0200 | [diff] [blame] | 229 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | add_sample(cpu_buf, pc, event); |
| 231 | return 1; |
| 232 | } |
| 233 | |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 234 | static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | { |
| 236 | if (nr_available_slots(cpu_buf) < 4) { |
| 237 | cpu_buf->sample_lost_overflow++; |
| 238 | return 0; |
| 239 | } |
| 240 | |
| 241 | add_code(cpu_buf, CPU_TRACE_BEGIN); |
| 242 | cpu_buf->tracing = 1; |
| 243 | return 1; |
| 244 | } |
| 245 | |
Robert Richter | 25ad291 | 2008-09-05 17:12:36 +0200 | [diff] [blame] | 246 | static void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | { |
| 248 | cpu_buf->tracing = 0; |
| 249 | } |
| 250 | |
Brian Rogan | 2735771 | 2006-03-28 01:56:20 -0800 | [diff] [blame] | 251 | void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, |
| 252 | unsigned long event, int is_kernel) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | { |
Mike Travis | 608dfdd | 2008-04-28 02:14:15 -0700 | [diff] [blame] | 254 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | |
| 256 | if (!backtrace_depth) { |
| 257 | log_sample(cpu_buf, pc, is_kernel, event); |
| 258 | return; |
| 259 | } |
| 260 | |
| 261 | if (!oprofile_begin_trace(cpu_buf)) |
| 262 | return; |
| 263 | |
Robert Richter | fd13f6c | 2008-10-19 21:00:09 +0200 | [diff] [blame] | 264 | /* |
| 265 | * if log_sample() fail we can't backtrace since we lost the |
| 266 | * source of this event |
| 267 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | if (log_sample(cpu_buf, pc, is_kernel, event)) |
| 269 | oprofile_ops.backtrace(regs, backtrace_depth); |
| 270 | oprofile_end_trace(cpu_buf); |
| 271 | } |
| 272 | |
Brian Rogan | 2735771 | 2006-03-28 01:56:20 -0800 | [diff] [blame] | 273 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) |
| 274 | { |
| 275 | int is_kernel = !user_mode(regs); |
| 276 | unsigned long pc = profile_pc(regs); |
| 277 | |
| 278 | oprofile_add_ext_sample(pc, regs, event, is_kernel); |
| 279 | } |
| 280 | |
Robert Richter | 852402c | 2008-07-22 21:09:06 +0200 | [diff] [blame] | 281 | #ifdef CONFIG_OPROFILE_IBS |
| 282 | |
Robert Richter | e2fee27 | 2008-07-18 17:36:20 +0200 | [diff] [blame] | 283 | #define MAX_IBS_SAMPLE_SIZE 14 |
| 284 | |
Robert Richter | cdc1834 | 2008-09-26 22:18:44 -0400 | [diff] [blame] | 285 | void oprofile_add_ibs_sample(struct pt_regs * const regs, |
| 286 | unsigned int * const ibs_sample, int ibs_code) |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 287 | { |
Robert Richter | e2fee27 | 2008-07-18 17:36:20 +0200 | [diff] [blame] | 288 | int is_kernel = !user_mode(regs); |
| 289 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 290 | struct task_struct *task; |
| 291 | |
| 292 | cpu_buf->sample_received++; |
| 293 | |
| 294 | if (nr_available_slots(cpu_buf) < MAX_IBS_SAMPLE_SIZE) { |
Robert Richter | e2fee27 | 2008-07-18 17:36:20 +0200 | [diff] [blame] | 295 | /* we can't backtrace since we lost the source of this event */ |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 296 | cpu_buf->sample_lost_overflow++; |
Robert Richter | e2fee27 | 2008-07-18 17:36:20 +0200 | [diff] [blame] | 297 | return; |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 298 | } |
| 299 | |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 300 | /* notice a switch from user->kernel or vice versa */ |
| 301 | if (cpu_buf->last_is_kernel != is_kernel) { |
| 302 | cpu_buf->last_is_kernel = is_kernel; |
| 303 | add_code(cpu_buf, is_kernel); |
| 304 | } |
| 305 | |
| 306 | /* notice a task switch */ |
| 307 | if (!is_kernel) { |
| 308 | task = current; |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 309 | if (cpu_buf->last_task != task) { |
| 310 | cpu_buf->last_task = task; |
| 311 | add_code(cpu_buf, (unsigned long)task); |
| 312 | } |
| 313 | } |
| 314 | |
| 315 | add_code(cpu_buf, ibs_code); |
Robert Richter | e2fee27 | 2008-07-18 17:36:20 +0200 | [diff] [blame] | 316 | add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); |
| 317 | add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); |
| 318 | add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 319 | |
| 320 | if (ibs_code == IBS_OP_BEGIN) { |
Robert Richter | e2fee27 | 2008-07-18 17:36:20 +0200 | [diff] [blame] | 321 | add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); |
| 322 | add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); |
| 323 | add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 324 | } |
| 325 | |
Robert Richter | e2fee27 | 2008-07-18 17:36:20 +0200 | [diff] [blame] | 326 | if (backtrace_depth) |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 327 | oprofile_ops.backtrace(regs, backtrace_depth); |
| 328 | } |
| 329 | |
Robert Richter | 852402c | 2008-07-22 21:09:06 +0200 | [diff] [blame] | 330 | #endif |
| 331 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) |
| 333 | { |
Mike Travis | 608dfdd | 2008-04-28 02:14:15 -0700 | [diff] [blame] | 334 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | log_sample(cpu_buf, pc, is_kernel, event); |
| 336 | } |
| 337 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | void oprofile_add_trace(unsigned long pc) |
| 339 | { |
Mike Travis | 608dfdd | 2008-04-28 02:14:15 -0700 | [diff] [blame] | 340 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | |
| 342 | if (!cpu_buf->tracing) |
| 343 | return; |
| 344 | |
| 345 | if (nr_available_slots(cpu_buf) < 1) { |
| 346 | cpu_buf->tracing = 0; |
| 347 | cpu_buf->sample_lost_overflow++; |
| 348 | return; |
| 349 | } |
| 350 | |
Robert Richter | fd13f6c | 2008-10-19 21:00:09 +0200 | [diff] [blame] | 351 | /* |
| 352 | * broken frame can give an eip with the same value as an |
| 353 | * escape code, abort the trace if we get it |
| 354 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | if (pc == ESCAPE_CODE) { |
| 356 | cpu_buf->tracing = 0; |
| 357 | cpu_buf->backtrace_aborted++; |
| 358 | return; |
| 359 | } |
| 360 | |
| 361 | add_sample(cpu_buf, pc, 0); |
| 362 | } |
| 363 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | /* |
| 365 | * This serves to avoid cpu buffer overflow, and makes sure |
| 366 | * the task mortuary progresses |
| 367 | * |
| 368 | * By using schedule_delayed_work_on and then schedule_delayed_work |
| 369 | * we guarantee this will stay on the correct cpu |
| 370 | */ |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 371 | static void wq_sync_buffer(struct work_struct *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | { |
Robert Richter | 25ad291 | 2008-09-05 17:12:36 +0200 | [diff] [blame] | 373 | struct oprofile_cpu_buffer *b = |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 374 | container_of(work, struct oprofile_cpu_buffer, work.work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | if (b->cpu != smp_processor_id()) { |
Robert Richter | bd17b62 | 2008-07-22 21:09:07 +0200 | [diff] [blame] | 376 | printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | smp_processor_id(), b->cpu); |
Chris J Arges | 4bd9b9d | 2008-10-15 11:03:39 -0500 | [diff] [blame] | 378 | |
| 379 | if (!cpu_online(b->cpu)) { |
| 380 | cancel_delayed_work(&b->work); |
| 381 | return; |
| 382 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | } |
| 384 | sync_buffer(b->cpu); |
| 385 | |
| 386 | /* don't re-add the work if we're shutting down */ |
| 387 | if (work_enabled) |
| 388 | schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); |
| 389 | } |