blob: 83d491e273fe67c07a2cb99c886e93fddc1bcd46 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/**
2 * @file cpu_buffer.h
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
8 */
9
10#ifndef OPROFILE_CPU_BUFFER_H
11#define OPROFILE_CPU_BUFFER_H
12
13#include <linux/types.h>
14#include <linux/spinlock.h>
15#include <linux/workqueue.h>
16#include <linux/cache.h>
Mike Travis608dfdd2008-04-28 02:14:15 -070017#include <linux/sched.h>
Robert Richter6dad8282008-12-09 01:21:32 +010018#include <linux/ring_buffer.h>
Robert Richter6a180372008-10-16 15:01:40 +020019
Linus Torvalds1da177e2005-04-16 15:20:36 -070020struct task_struct;
Robert Richter6a180372008-10-16 15:01:40 +020021
Linus Torvalds1da177e2005-04-16 15:20:36 -070022int alloc_cpu_buffers(void);
23void free_cpu_buffers(void);
24
25void start_cpu_work(void);
26void end_cpu_work(void);
27
28/* CPU buffer is composed of such entries (which are
29 * also used for context switch notes)
30 */
31struct op_sample {
32 unsigned long eip;
33 unsigned long event;
34};
Robert Richter6a180372008-10-16 15:01:40 +020035
Robert Richter6dad8282008-12-09 01:21:32 +010036struct op_entry {
37 struct ring_buffer_event *event;
38 struct op_sample *sample;
39 unsigned long irq_flags;
40};
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042struct oprofile_cpu_buffer {
43 volatile unsigned long head_pos;
44 volatile unsigned long tail_pos;
45 unsigned long buffer_size;
Robert Richter25ad2912008-09-05 17:12:36 +020046 struct task_struct *last_task;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 int last_is_kernel;
48 int tracing;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 unsigned long sample_received;
50 unsigned long sample_lost_overflow;
51 unsigned long backtrace_aborted;
Philippe Eliedf9d1772007-11-14 16:58:48 -080052 unsigned long sample_invalid_eip;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 int cpu;
David Howellsc4028952006-11-22 14:57:56 +000054 struct delayed_work work;
Eric Dumazet8b8b4982008-05-14 16:05:31 -070055};
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Robert Richter6dad8282008-12-09 01:21:32 +010057extern struct ring_buffer *op_ring_buffer_read;
58extern struct ring_buffer *op_ring_buffer_write;
Mike Travis608dfdd2008-04-28 02:14:15 -070059DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Robert Richterfbc9bf92008-12-04 16:27:00 +010061/*
62 * Resets the cpu buffer to a sane state.
63 *
64 * reset these to invalid values; the next sample collected will
65 * populate the buffer with proper values to initialize the buffer
66 */
Robert Richter6d2c53f2008-12-24 16:53:53 +010067static inline void op_cpu_buffer_reset(int cpu)
Robert Richterfbc9bf92008-12-04 16:27:00 +010068{
69 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
70
71 cpu_buf->last_is_kernel = -1;
72 cpu_buf->last_task = NULL;
73}
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Robert Richter6d2c53f2008-12-24 16:53:53 +010075static inline int op_cpu_buffer_write_entry(struct op_entry *entry)
Robert Richter7d468ab2008-11-27 10:57:09 +010076{
Robert Richter6dad8282008-12-09 01:21:32 +010077 entry->event = ring_buffer_lock_reserve(op_ring_buffer_write,
78 sizeof(struct op_sample),
79 &entry->irq_flags);
80 if (entry->event)
81 entry->sample = ring_buffer_event_data(entry->event);
Robert Richter229234a2008-11-27 18:36:08 +010082 else
Robert Richter6dad8282008-12-09 01:21:32 +010083 entry->sample = NULL;
84
85 if (!entry->sample)
86 return -ENOMEM;
87
88 return 0;
Robert Richter229234a2008-11-27 18:36:08 +010089}
90
Robert Richter6d2c53f2008-12-24 16:53:53 +010091static inline int op_cpu_buffer_write_commit(struct op_entry *entry)
Robert Richter7d468ab2008-11-27 10:57:09 +010092{
Robert Richter6dad8282008-12-09 01:21:32 +010093 return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event,
94 entry->irq_flags);
95}
96
Robert Richter6d2c53f2008-12-24 16:53:53 +010097static inline struct op_sample *op_cpu_buffer_read_entry(int cpu)
Robert Richter6dad8282008-12-09 01:21:32 +010098{
99 struct ring_buffer_event *e;
100 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
101 if (e)
102 return ring_buffer_event_data(e);
103 if (ring_buffer_swap_cpu(op_ring_buffer_read,
104 op_ring_buffer_write,
105 cpu))
106 return NULL;
107 e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
108 if (e)
109 return ring_buffer_event_data(e);
110 return NULL;
Robert Richter7d468ab2008-11-27 10:57:09 +0100111}
112
Robert Richterbf589e32008-11-27 22:33:37 +0100113/* "acquire" as many cpu buffer slots as we can */
Robert Richter6d2c53f2008-12-24 16:53:53 +0100114static inline unsigned long op_cpu_buffer_entries(int cpu)
Robert Richterbf589e32008-11-27 22:33:37 +0100115{
Robert Richter6dad8282008-12-09 01:21:32 +0100116 return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
117 + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
Robert Richterbf589e32008-11-27 22:33:37 +0100118}
119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120/* transient events for the CPU buffer -> event buffer */
121#define CPU_IS_KERNEL 1
122#define CPU_TRACE_BEGIN 2
Barry Kasindorf345c2572008-07-22 21:08:54 +0200123#define IBS_FETCH_BEGIN 3
124#define IBS_OP_BEGIN 4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
126#endif /* OPROFILE_CPU_BUFFER_H */