blob: ceb68aa51f7f5eaeaa109a250ac0ceb9ddc68665 [file] [log] [blame]
Thomas Gleixner6eda5832009-05-01 18:29:57 +02001#ifndef _PERF_PERF_H
2#define _PERF_PERF_H
3
Peter Zijlstra1a482f32009-05-23 18:28:58 +02004#if defined(__x86_64__) || defined(__i386__)
5#include "../../arch/x86/include/asm/unistd.h"
6#define rmb() asm volatile("lfence" ::: "memory")
7#define cpu_relax() asm volatile("rep; nop" ::: "memory");
8#endif
9
10#ifdef __powerpc__
11#include "../../arch/powerpc/include/asm/unistd.h"
12#define rmb() asm volatile ("sync" ::: "memory")
13#define cpu_relax() asm volatile ("" ::: "memory");
14#endif
15
Martin Schwidefsky12310e92009-06-22 12:08:22 +020016#ifdef __s390__
17#include "../../arch/s390/include/asm/unistd.h"
18#define rmb() asm volatile("bcr 15,0" ::: "memory")
19#define cpu_relax() asm volatile("" ::: "memory");
20#endif
21
Peter Zijlstra1a482f32009-05-23 18:28:58 +020022#include <time.h>
23#include <unistd.h>
24#include <sys/types.h>
25#include <sys/syscall.h>
26
27#include "../../include/linux/perf_counter.h"
Paul Mackerras9cffa8d2009-06-19 22:21:42 +100028#include "types.h"
Peter Zijlstra1a482f32009-05-23 18:28:58 +020029
Thomas Gleixner6eda5832009-05-01 18:29:57 +020030/*
31 * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
32 * counters in the current task.
33 */
34#define PR_TASK_PERF_COUNTERS_DISABLE 31
35#define PR_TASK_PERF_COUNTERS_ENABLE 32
36
Thomas Gleixnera92e702372009-05-01 18:39:47 +020037#ifndef NSEC_PER_SEC
38# define NSEC_PER_SEC 1000000000ULL
39#endif
40
41static inline unsigned long long rdclock(void)
42{
43 struct timespec ts;
44
45 clock_gettime(CLOCK_MONOTONIC, &ts);
46 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
47}
Thomas Gleixner6eda5832009-05-01 18:29:57 +020048
49/*
50 * Pick up some kernel type conventions:
51 */
52#define __user
53#define asmlinkage
54
Thomas Gleixner6eda5832009-05-01 18:29:57 +020055#define unlikely(x) __builtin_expect(!!(x), 0)
56#define min(x, y) ({ \
57 typeof(x) _min1 = (x); \
58 typeof(y) _min2 = (y); \
59 (void) (&_min1 == &_min2); \
60 _min1 < _min2 ? _min1 : _min2; })
61
62static inline int
Peter Zijlstra974802e2009-06-12 12:46:55 +020063sys_perf_counter_open(struct perf_counter_attr *attr,
Thomas Gleixner6eda5832009-05-01 18:29:57 +020064 pid_t pid, int cpu, int group_fd,
65 unsigned long flags)
66{
Peter Zijlstra974802e2009-06-12 12:46:55 +020067 attr->size = sizeof(*attr);
68 return syscall(__NR_perf_counter_open, attr, pid, cpu,
Thomas Gleixner6eda5832009-05-01 18:29:57 +020069 group_fd, flags);
70}
71
Ingo Molnar85a9f922009-05-25 09:59:50 +020072#define MAX_COUNTERS 256
73#define MAX_NR_CPUS 256
Thomas Gleixner6eda5832009-05-01 18:29:57 +020074
Peter Zijlstraf5970552009-06-18 23:22:55 +020075struct perf_file_header {
Paul Mackerras9cffa8d2009-06-19 22:21:42 +100076 u64 version;
77 u64 sample_type;
78 u64 data_size;
Peter Zijlstraf5970552009-06-18 23:22:55 +020079};
80
Thomas Gleixner6eda5832009-05-01 18:29:57 +020081#endif