Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /** |
| 2 | * @file cpu_buffer.c |
| 3 | * |
Robert Richter | 2cc28b9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 4 | * @remark Copyright 2002-2009 OProfile authors |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * @remark Read the file COPYING |
| 6 | * |
| 7 | * @author John Levon <levon@movementarian.org> |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 8 | * @author Barry Kasindorf <barry.kasindorf@amd.com> |
Robert Richter | 2cc28b9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 9 | * @author Robert Richter <robert.richter@amd.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * |
| 11 | * Each CPU has a local buffer that stores PC value/event |
| 12 | * pairs. We also log context switches when we notice them. |
| 13 | * Eventually each CPU's buffer is processed into the global |
| 14 | * event buffer by sync_buffer(). |
| 15 | * |
| 16 | * We use a local buffer for two reasons: an NMI or similar |
| 17 | * interrupt cannot synchronise, and high sampling rates |
| 18 | * would lead to catastrophic global synchronisation if |
| 19 | * a global buffer was used. |
| 20 | */ |
| 21 | |
| 22 | #include <linux/sched.h> |
| 23 | #include <linux/oprofile.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/errno.h> |
Robert Richter | 6a18037 | 2008-10-16 15:01:40 +0200 | [diff] [blame] | 25 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include "event_buffer.h" |
| 27 | #include "cpu_buffer.h" |
| 28 | #include "buffer_sync.h" |
| 29 | #include "oprof.h" |
| 30 | |
Robert Richter | 6dad828 | 2008-12-09 01:21:32 +0100 | [diff] [blame] | 31 | #define OP_BUFFER_FLAGS 0 |
| 32 | |
| 33 | /* |
| 34 | * Read and write access is using spin locking. Thus, writing to the |
| 35 | * buffer by NMI handler (x86) could occur also during critical |
| 36 | * sections when reading the buffer. To avoid this, there are 2 |
| 37 | * buffers for independent read and write access. Read access is in |
| 38 | * process context only, write access only in the NMI handler. If the |
| 39 | * read buffer runs empty, both buffers are swapped atomically. There |
| 40 | * is potentially a small window during swapping where the buffers are |
| 41 | * disabled and samples could be lost. |
| 42 | * |
| 43 | * Using 2 buffers is a little bit overhead, but the solution is clear |
| 44 | * and does not require changes in the ring buffer implementation. It |
| 45 | * can be changed to a single buffer solution when the ring buffer |
| 46 | * access is implemented as non-locking atomic code. |
| 47 | */ |
Robert Richter | 9966718 | 2008-12-16 16:19:54 +0100 | [diff] [blame] | 48 | static struct ring_buffer *op_ring_buffer_read; |
| 49 | static struct ring_buffer *op_ring_buffer_write; |
Tejun Heo | b3e9f67 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 50 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 52 | static void wq_sync_buffer(struct work_struct *work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
| 54 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) |
| 55 | static int work_enabled; |
| 56 | |
Carl Love | a5598ca | 2008-10-14 23:37:01 +0000 | [diff] [blame] | 57 | unsigned long oprofile_get_cpu_buffer_size(void) |
| 58 | { |
Robert Richter | bd2172f | 2008-12-16 16:19:54 +0100 | [diff] [blame] | 59 | return oprofile_cpu_buffer_size; |
Carl Love | a5598ca | 2008-10-14 23:37:01 +0000 | [diff] [blame] | 60 | } |
| 61 | |
| 62 | void oprofile_cpu_buffer_inc_smpl_lost(void) |
| 63 | { |
Tejun Heo | b3e9f67 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 64 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
Carl Love | a5598ca | 2008-10-14 23:37:01 +0000 | [diff] [blame] | 65 | |
| 66 | cpu_buf->sample_lost_overflow++; |
| 67 | } |
| 68 | |
Robert Richter | 3001577 | 2008-12-23 01:35:12 +0100 | [diff] [blame] | 69 | void free_cpu_buffers(void) |
| 70 | { |
| 71 | if (op_ring_buffer_read) |
| 72 | ring_buffer_free(op_ring_buffer_read); |
| 73 | op_ring_buffer_read = NULL; |
| 74 | if (op_ring_buffer_write) |
| 75 | ring_buffer_free(op_ring_buffer_write); |
| 76 | op_ring_buffer_write = NULL; |
| 77 | } |
| 78 | |
Robert Richter | 54f2c84 | 2009-05-07 17:28:59 +0200 | [diff] [blame] | 79 | #define RB_EVENT_HDR_SIZE 4 |
| 80 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | int alloc_cpu_buffers(void) |
| 82 | { |
| 83 | int i; |
Robert Richter | 6a18037 | 2008-10-16 15:01:40 +0200 | [diff] [blame] | 84 | |
Robert Richter | bd2172f | 2008-12-16 16:19:54 +0100 | [diff] [blame] | 85 | unsigned long buffer_size = oprofile_cpu_buffer_size; |
Robert Richter | 54f2c84 | 2009-05-07 17:28:59 +0200 | [diff] [blame] | 86 | unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + |
| 87 | RB_EVENT_HDR_SIZE); |
Robert Richter | 6a18037 | 2008-10-16 15:01:40 +0200 | [diff] [blame] | 88 | |
Robert Richter | 54f2c84 | 2009-05-07 17:28:59 +0200 | [diff] [blame] | 89 | op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); |
Robert Richter | 6dad828 | 2008-12-09 01:21:32 +0100 | [diff] [blame] | 90 | if (!op_ring_buffer_read) |
| 91 | goto fail; |
Robert Richter | 54f2c84 | 2009-05-07 17:28:59 +0200 | [diff] [blame] | 92 | op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); |
Robert Richter | 6dad828 | 2008-12-09 01:21:32 +0100 | [diff] [blame] | 93 | if (!op_ring_buffer_write) |
| 94 | goto fail; |
| 95 | |
Chris J Arges | 4bd9b9d | 2008-10-15 11:03:39 -0500 | [diff] [blame] | 96 | for_each_possible_cpu(i) { |
Tejun Heo | b3e9f67 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 97 | struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); |
Robert Richter | 6a18037 | 2008-10-16 15:01:40 +0200 | [diff] [blame] | 98 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | b->last_task = NULL; |
| 100 | b->last_is_kernel = -1; |
| 101 | b->tracing = 0; |
| 102 | b->buffer_size = buffer_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | b->sample_received = 0; |
| 104 | b->sample_lost_overflow = 0; |
Philippe Elie | df9d177 | 2007-11-14 16:58:48 -0800 | [diff] [blame] | 105 | b->backtrace_aborted = 0; |
| 106 | b->sample_invalid_eip = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | b->cpu = i; |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 108 | INIT_DELAYED_WORK(&b->work, wq_sync_buffer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | } |
| 110 | return 0; |
| 111 | |
| 112 | fail: |
| 113 | free_cpu_buffers(); |
| 114 | return -ENOMEM; |
| 115 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | |
| 117 | void start_cpu_work(void) |
| 118 | { |
| 119 | int i; |
| 120 | |
| 121 | work_enabled = 1; |
| 122 | |
| 123 | for_each_online_cpu(i) { |
Tejun Heo | b3e9f67 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 124 | struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | |
| 126 | /* |
| 127 | * Spread the work by 1 jiffy per cpu so they dont all |
| 128 | * fire at once. |
| 129 | */ |
| 130 | schedule_delayed_work_on(i, &b->work, DEFAULT_TIMER_EXPIRE + i); |
| 131 | } |
| 132 | } |
| 133 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | void end_cpu_work(void) |
| 135 | { |
| 136 | int i; |
| 137 | |
| 138 | work_enabled = 0; |
| 139 | |
| 140 | for_each_online_cpu(i) { |
Tejun Heo | b3e9f67 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 141 | struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | |
| 143 | cancel_delayed_work(&b->work); |
| 144 | } |
| 145 | |
| 146 | flush_scheduled_work(); |
| 147 | } |
| 148 | |
Robert Richter | 2cc28b9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 149 | /* |
| 150 | * This function prepares the cpu buffer to write a sample. |
| 151 | * |
| 152 | * Struct op_entry is used during operations on the ring buffer while |
| 153 | * struct op_sample contains the data that is stored in the ring |
| 154 | * buffer. Struct entry can be uninitialized. The function reserves a |
| 155 | * data array that is specified by size. Use |
| 156 | * op_cpu_buffer_write_commit() after preparing the sample. In case of |
| 157 | * errors a null pointer is returned, otherwise the pointer to the |
| 158 | * sample. |
| 159 | * |
| 160 | */ |
| 161 | struct op_sample |
| 162 | *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) |
Robert Richter | 9966718 | 2008-12-16 16:19:54 +0100 | [diff] [blame] | 163 | { |
Robert Richter | 2cc28b9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 164 | entry->event = ring_buffer_lock_reserve |
| 165 | (op_ring_buffer_write, sizeof(struct op_sample) + |
Ingo Molnar | 304cc6a | 2009-02-06 01:12:02 +0100 | [diff] [blame] | 166 | size * sizeof(entry->sample->data[0])); |
Robert Richter | 9966718 | 2008-12-16 16:19:54 +0100 | [diff] [blame] | 167 | if (entry->event) |
| 168 | entry->sample = ring_buffer_event_data(entry->event); |
| 169 | else |
| 170 | entry->sample = NULL; |
| 171 | |
| 172 | if (!entry->sample) |
Robert Richter | 2cc28b9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 173 | return NULL; |
Robert Richter | 9966718 | 2008-12-16 16:19:54 +0100 | [diff] [blame] | 174 | |
Robert Richter | 2cc28b9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 175 | entry->size = size; |
| 176 | entry->data = entry->sample->data; |
| 177 | |
| 178 | return entry->sample; |
Robert Richter | 9966718 | 2008-12-16 16:19:54 +0100 | [diff] [blame] | 179 | } |
| 180 | |
| 181 | int op_cpu_buffer_write_commit(struct op_entry *entry) |
| 182 | { |
Ingo Molnar | 304cc6a | 2009-02-06 01:12:02 +0100 | [diff] [blame] | 183 | return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event); |
Robert Richter | 9966718 | 2008-12-16 16:19:54 +0100 | [diff] [blame] | 184 | } |
| 185 | |
Robert Richter | 2d87b14 | 2008-12-30 04:10:46 +0100 | [diff] [blame] | 186 | struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) |
Robert Richter | 9966718 | 2008-12-16 16:19:54 +0100 | [diff] [blame] | 187 | { |
| 188 | struct ring_buffer_event *e; |
| 189 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); |
| 190 | if (e) |
Robert Richter | 2d87b14 | 2008-12-30 04:10:46 +0100 | [diff] [blame] | 191 | goto event; |
Robert Richter | 9966718 | 2008-12-16 16:19:54 +0100 | [diff] [blame] | 192 | if (ring_buffer_swap_cpu(op_ring_buffer_read, |
| 193 | op_ring_buffer_write, |
| 194 | cpu)) |
| 195 | return NULL; |
| 196 | e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); |
| 197 | if (e) |
Robert Richter | 2d87b14 | 2008-12-30 04:10:46 +0100 | [diff] [blame] | 198 | goto event; |
Robert Richter | 9966718 | 2008-12-16 16:19:54 +0100 | [diff] [blame] | 199 | return NULL; |
Robert Richter | 2d87b14 | 2008-12-30 04:10:46 +0100 | [diff] [blame] | 200 | |
| 201 | event: |
| 202 | entry->event = e; |
| 203 | entry->sample = ring_buffer_event_data(e); |
| 204 | entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) |
| 205 | / sizeof(entry->sample->data[0]); |
| 206 | entry->data = entry->sample->data; |
| 207 | return entry->sample; |
Robert Richter | 9966718 | 2008-12-16 16:19:54 +0100 | [diff] [blame] | 208 | } |
| 209 | |
| 210 | unsigned long op_cpu_buffer_entries(int cpu) |
| 211 | { |
| 212 | return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) |
| 213 | + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); |
| 214 | } |
| 215 | |
Robert Richter | ae735e9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 216 | static int |
| 217 | op_add_code(struct oprofile_cpu_buffer *cpu_buf, unsigned long backtrace, |
| 218 | int is_kernel, struct task_struct *task) |
| 219 | { |
| 220 | struct op_entry entry; |
| 221 | struct op_sample *sample; |
| 222 | unsigned long flags; |
| 223 | int size; |
| 224 | |
| 225 | flags = 0; |
| 226 | |
| 227 | if (backtrace) |
| 228 | flags |= TRACE_BEGIN; |
| 229 | |
| 230 | /* notice a switch from user->kernel or vice versa */ |
| 231 | is_kernel = !!is_kernel; |
| 232 | if (cpu_buf->last_is_kernel != is_kernel) { |
| 233 | cpu_buf->last_is_kernel = is_kernel; |
| 234 | flags |= KERNEL_CTX_SWITCH; |
| 235 | if (is_kernel) |
| 236 | flags |= IS_KERNEL; |
| 237 | } |
| 238 | |
| 239 | /* notice a task switch */ |
| 240 | if (cpu_buf->last_task != task) { |
| 241 | cpu_buf->last_task = task; |
| 242 | flags |= USER_CTX_SWITCH; |
| 243 | } |
| 244 | |
| 245 | if (!flags) |
| 246 | /* nothing to do */ |
| 247 | return 0; |
| 248 | |
| 249 | if (flags & USER_CTX_SWITCH) |
| 250 | size = 1; |
| 251 | else |
| 252 | size = 0; |
| 253 | |
| 254 | sample = op_cpu_buffer_write_reserve(&entry, size); |
| 255 | if (!sample) |
| 256 | return -ENOMEM; |
| 257 | |
| 258 | sample->eip = ESCAPE_CODE; |
| 259 | sample->event = flags; |
| 260 | |
| 261 | if (size) |
Robert Richter | d9928c2 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 262 | op_cpu_buffer_add_data(&entry, (unsigned long)task); |
Robert Richter | ae735e9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 263 | |
| 264 | op_cpu_buffer_write_commit(&entry); |
| 265 | |
| 266 | return 0; |
| 267 | } |
| 268 | |
Robert Richter | 211117f | 2008-12-09 02:13:25 +0100 | [diff] [blame] | 269 | static inline int |
Robert Richter | d0e2338 | 2008-12-23 04:03:05 +0100 | [diff] [blame] | 270 | op_add_sample(struct oprofile_cpu_buffer *cpu_buf, |
| 271 | unsigned long pc, unsigned long event) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | { |
Robert Richter | 6dad828 | 2008-12-09 01:21:32 +0100 | [diff] [blame] | 273 | struct op_entry entry; |
Robert Richter | 2cc28b9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 274 | struct op_sample *sample; |
Robert Richter | 6dad828 | 2008-12-09 01:21:32 +0100 | [diff] [blame] | 275 | |
Robert Richter | 2cc28b9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 276 | sample = op_cpu_buffer_write_reserve(&entry, 0); |
| 277 | if (!sample) |
| 278 | return -ENOMEM; |
Robert Richter | 6dad828 | 2008-12-09 01:21:32 +0100 | [diff] [blame] | 279 | |
Robert Richter | 2cc28b9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 280 | sample->eip = pc; |
| 281 | sample->event = event; |
Robert Richter | 6dad828 | 2008-12-09 01:21:32 +0100 | [diff] [blame] | 282 | |
Robert Richter | 3967e93 | 2008-12-30 05:10:58 +0100 | [diff] [blame] | 283 | return op_cpu_buffer_write_commit(&entry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | } |
| 285 | |
Robert Richter | ae735e9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 286 | /* |
| 287 | * This must be safe from any context. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | * |
| 289 | * is_kernel is needed because on some architectures you cannot |
| 290 | * tell if you are in kernel or user space simply by looking at |
| 291 | * pc. We tag this in the buffer by generating kernel enter/exit |
| 292 | * events whenever is_kernel changes |
| 293 | */ |
Robert Richter | ae735e9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 294 | static int |
| 295 | log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, |
| 296 | unsigned long backtrace, int is_kernel, unsigned long event) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 297 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | cpu_buf->sample_received++; |
| 299 | |
Philippe Elie | df9d177 | 2007-11-14 16:58:48 -0800 | [diff] [blame] | 300 | if (pc == ESCAPE_CODE) { |
| 301 | cpu_buf->sample_invalid_eip++; |
| 302 | return 0; |
| 303 | } |
| 304 | |
Robert Richter | ae735e9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 305 | if (op_add_code(cpu_buf, backtrace, is_kernel, current)) |
| 306 | goto fail; |
Robert Richter | 6a18037 | 2008-10-16 15:01:40 +0200 | [diff] [blame] | 307 | |
Robert Richter | d0e2338 | 2008-12-23 04:03:05 +0100 | [diff] [blame] | 308 | if (op_add_sample(cpu_buf, pc, event)) |
Robert Richter | 211117f | 2008-12-09 02:13:25 +0100 | [diff] [blame] | 309 | goto fail; |
| 310 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | return 1; |
Robert Richter | 211117f | 2008-12-09 02:13:25 +0100 | [diff] [blame] | 312 | |
| 313 | fail: |
| 314 | cpu_buf->sample_lost_overflow++; |
| 315 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | } |
| 317 | |
Robert Richter | 6352d92 | 2008-12-18 22:09:13 +0100 | [diff] [blame] | 318 | static inline void oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | cpu_buf->tracing = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | } |
| 322 | |
Robert Richter | 6352d92 | 2008-12-18 22:09:13 +0100 | [diff] [blame] | 323 | static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | { |
| 325 | cpu_buf->tracing = 0; |
| 326 | } |
| 327 | |
Robert Richter | d45d23b | 2008-12-16 12:00:10 +0100 | [diff] [blame] | 328 | static inline void |
| 329 | __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, |
| 330 | unsigned long event, int is_kernel) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | { |
Tejun Heo | b3e9f67 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 332 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
Robert Richter | ae735e9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 333 | unsigned long backtrace = oprofile_backtrace_depth; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | |
Robert Richter | fd13f6c | 2008-10-19 21:00:09 +0200 | [diff] [blame] | 335 | /* |
| 336 | * if log_sample() fail we can't backtrace since we lost the |
| 337 | * source of this event |
| 338 | */ |
Robert Richter | ae735e9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 339 | if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event)) |
| 340 | /* failed */ |
| 341 | return; |
Robert Richter | 6352d92 | 2008-12-18 22:09:13 +0100 | [diff] [blame] | 342 | |
Robert Richter | ae735e9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 343 | if (!backtrace) |
| 344 | return; |
| 345 | |
| 346 | oprofile_begin_trace(cpu_buf); |
| 347 | oprofile_ops.backtrace(regs, backtrace); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | oprofile_end_trace(cpu_buf); |
| 349 | } |
| 350 | |
Robert Richter | d45d23b | 2008-12-16 12:00:10 +0100 | [diff] [blame] | 351 | void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, |
| 352 | unsigned long event, int is_kernel) |
| 353 | { |
| 354 | __oprofile_add_ext_sample(pc, regs, event, is_kernel); |
| 355 | } |
| 356 | |
Brian Rogan | 2735771 | 2006-03-28 01:56:20 -0800 | [diff] [blame] | 357 | void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) |
| 358 | { |
| 359 | int is_kernel = !user_mode(regs); |
| 360 | unsigned long pc = profile_pc(regs); |
| 361 | |
Robert Richter | d45d23b | 2008-12-16 12:00:10 +0100 | [diff] [blame] | 362 | __oprofile_add_ext_sample(pc, regs, event, is_kernel); |
Brian Rogan | 2735771 | 2006-03-28 01:56:20 -0800 | [diff] [blame] | 363 | } |
| 364 | |
Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 365 | /* |
| 366 | * Add samples with data to the ring buffer. |
| 367 | * |
Robert Richter | 14f0ca8 | 2009-01-07 21:50:22 +0100 | [diff] [blame] | 368 | * Use oprofile_add_data(&entry, val) to add data and |
| 369 | * oprofile_write_commit(&entry) to commit the sample. |
Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 370 | */ |
Robert Richter | 14f0ca8 | 2009-01-07 21:50:22 +0100 | [diff] [blame] | 371 | void |
| 372 | oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs, |
Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 373 | unsigned long pc, int code, int size) |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 374 | { |
Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 375 | struct op_sample *sample; |
Robert Richter | e2fee27 | 2008-07-18 17:36:20 +0200 | [diff] [blame] | 376 | int is_kernel = !user_mode(regs); |
Tejun Heo | b3e9f67 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 377 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 378 | |
| 379 | cpu_buf->sample_received++; |
| 380 | |
Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 381 | /* no backtraces for samples with data */ |
| 382 | if (op_add_code(cpu_buf, 0, is_kernel, current)) |
| 383 | goto fail; |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 384 | |
Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 385 | sample = op_cpu_buffer_write_reserve(entry, size + 2); |
| 386 | if (!sample) |
| 387 | goto fail; |
| 388 | sample->eip = ESCAPE_CODE; |
| 389 | sample->event = 0; /* no flags */ |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 390 | |
Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 391 | op_cpu_buffer_add_data(entry, code); |
| 392 | op_cpu_buffer_add_data(entry, pc); |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 393 | |
Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 394 | return; |
| 395 | |
| 396 | fail: |
Robert Richter | fdb6a8f | 2009-01-17 17:13:27 +0100 | [diff] [blame] | 397 | entry->event = NULL; |
Robert Richter | 1acda87 | 2009-01-05 10:35:31 +0100 | [diff] [blame] | 398 | cpu_buf->sample_lost_overflow++; |
Barry Kasindorf | 345c257 | 2008-07-22 21:08:54 +0200 | [diff] [blame] | 399 | } |
| 400 | |
Robert Richter | 14f0ca8 | 2009-01-07 21:50:22 +0100 | [diff] [blame] | 401 | int oprofile_add_data(struct op_entry *entry, unsigned long val) |
| 402 | { |
Robert Richter | fdb6a8f | 2009-01-17 17:13:27 +0100 | [diff] [blame] | 403 | if (!entry->event) |
| 404 | return 0; |
Robert Richter | 14f0ca8 | 2009-01-07 21:50:22 +0100 | [diff] [blame] | 405 | return op_cpu_buffer_add_data(entry, val); |
| 406 | } |
| 407 | |
Robert Richter | 51563a0 | 2009-06-03 20:54:56 +0200 | [diff] [blame] | 408 | int oprofile_add_data64(struct op_entry *entry, u64 val) |
| 409 | { |
| 410 | if (!entry->event) |
| 411 | return 0; |
| 412 | if (op_cpu_buffer_get_size(entry) < 2) |
| 413 | /* |
| 414 | * the function returns 0 to indicate a too small |
| 415 | * buffer, even if there is some space left |
| 416 | */ |
| 417 | return 0; |
| 418 | if (!op_cpu_buffer_add_data(entry, (u32)val)) |
| 419 | return 0; |
| 420 | return op_cpu_buffer_add_data(entry, (u32)(val >> 32)); |
| 421 | } |
| 422 | |
Robert Richter | 14f0ca8 | 2009-01-07 21:50:22 +0100 | [diff] [blame] | 423 | int oprofile_write_commit(struct op_entry *entry) |
| 424 | { |
Robert Richter | fdb6a8f | 2009-01-17 17:13:27 +0100 | [diff] [blame] | 425 | if (!entry->event) |
| 426 | return -EINVAL; |
Robert Richter | 14f0ca8 | 2009-01-07 21:50:22 +0100 | [diff] [blame] | 427 | return op_cpu_buffer_write_commit(entry); |
| 428 | } |
| 429 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) |
| 431 | { |
Tejun Heo | b3e9f67 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 432 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
Robert Richter | ae735e9 | 2008-12-25 17:26:07 +0100 | [diff] [blame] | 433 | log_sample(cpu_buf, pc, 0, is_kernel, event); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | } |
| 435 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | void oprofile_add_trace(unsigned long pc) |
| 437 | { |
Tejun Heo | b3e9f67 | 2009-10-29 22:34:13 +0900 | [diff] [blame] | 438 | struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | |
| 440 | if (!cpu_buf->tracing) |
| 441 | return; |
| 442 | |
Robert Richter | fd13f6c | 2008-10-19 21:00:09 +0200 | [diff] [blame] | 443 | /* |
| 444 | * broken frame can give an eip with the same value as an |
| 445 | * escape code, abort the trace if we get it |
| 446 | */ |
Robert Richter | 211117f | 2008-12-09 02:13:25 +0100 | [diff] [blame] | 447 | if (pc == ESCAPE_CODE) |
| 448 | goto fail; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | |
Robert Richter | d0e2338 | 2008-12-23 04:03:05 +0100 | [diff] [blame] | 450 | if (op_add_sample(cpu_buf, pc, 0)) |
Robert Richter | 211117f | 2008-12-09 02:13:25 +0100 | [diff] [blame] | 451 | goto fail; |
| 452 | |
| 453 | return; |
| 454 | fail: |
| 455 | cpu_buf->tracing = 0; |
| 456 | cpu_buf->backtrace_aborted++; |
| 457 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | } |
| 459 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | /* |
| 461 | * This serves to avoid cpu buffer overflow, and makes sure |
| 462 | * the task mortuary progresses |
| 463 | * |
| 464 | * By using schedule_delayed_work_on and then schedule_delayed_work |
| 465 | * we guarantee this will stay on the correct cpu |
| 466 | */ |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 467 | static void wq_sync_buffer(struct work_struct *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 468 | { |
Robert Richter | 25ad291 | 2008-09-05 17:12:36 +0200 | [diff] [blame] | 469 | struct oprofile_cpu_buffer *b = |
David Howells | c402895 | 2006-11-22 14:57:56 +0000 | [diff] [blame] | 470 | container_of(work, struct oprofile_cpu_buffer, work.work); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | if (b->cpu != smp_processor_id()) { |
Robert Richter | bd17b62 | 2008-07-22 21:09:07 +0200 | [diff] [blame] | 472 | printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | smp_processor_id(), b->cpu); |
Chris J Arges | 4bd9b9d | 2008-10-15 11:03:39 -0500 | [diff] [blame] | 474 | |
| 475 | if (!cpu_online(b->cpu)) { |
| 476 | cancel_delayed_work(&b->work); |
| 477 | return; |
| 478 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | } |
| 480 | sync_buffer(b->cpu); |
| 481 | |
| 482 | /* don't re-add the work if we're shutting down */ |
| 483 | if (work_enabled) |
| 484 | schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); |
| 485 | } |