blob: 75f941bfba9eb897b0bdd3075a450425536ac74c [file] [log] [blame]
Thomas Gleixner6eda5832009-05-01 18:29:57 +02001#ifndef _PERF_PERF_H
2#define _PERF_PERF_H
3
Vince Weaver11d15782009-07-08 17:46:14 -04004#if defined(__i386__)
5#include "../../arch/x86/include/asm/unistd.h"
6#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
7#define cpu_relax() asm volatile("rep; nop" ::: "memory");
8#endif
9
10#if defined(__x86_64__)
Peter Zijlstra1a482f32009-05-23 18:28:58 +020011#include "../../arch/x86/include/asm/unistd.h"
12#define rmb() asm volatile("lfence" ::: "memory")
13#define cpu_relax() asm volatile("rep; nop" ::: "memory");
14#endif
15
16#ifdef __powerpc__
17#include "../../arch/powerpc/include/asm/unistd.h"
18#define rmb() asm volatile ("sync" ::: "memory")
19#define cpu_relax() asm volatile ("" ::: "memory");
20#endif
21
Martin Schwidefsky12310e92009-06-22 12:08:22 +020022#ifdef __s390__
23#include "../../arch/s390/include/asm/unistd.h"
24#define rmb() asm volatile("bcr 15,0" ::: "memory")
25#define cpu_relax() asm volatile("" ::: "memory");
26#endif
27
Paul Mundtfebe8342009-06-25 14:41:57 +090028#ifdef __sh__
29#include "../../arch/sh/include/asm/unistd.h"
30#if defined(__SH4A__) || defined(__SH5__)
31# define rmb() asm volatile("synco" ::: "memory")
32#else
33# define rmb() asm volatile("" ::: "memory")
34#endif
35#define cpu_relax() asm volatile("" ::: "memory")
36#endif
37
Kyle McMartin2d4618d2009-06-23 21:38:49 -040038#ifdef __hppa__
39#include "../../arch/parisc/include/asm/unistd.h"
40#define rmb() asm volatile("" ::: "memory")
41#define cpu_relax() asm volatile("" ::: "memory");
42#endif
43
Jens Axboe825c9fb2009-09-04 02:56:22 -070044#ifdef __sparc__
45#include "../../arch/sparc/include/asm/unistd.h"
46#define rmb() asm volatile("":::"memory")
47#define cpu_relax() asm volatile("":::"memory")
48#endif
49
Michael Creefcd14b32009-10-26 21:32:06 +130050#ifdef __alpha__
51#include "../../arch/alpha/include/asm/unistd.h"
52#define rmb() asm volatile("mb" ::: "memory")
53#define cpu_relax() asm volatile("" ::: "memory")
54#endif
55
Luck, Tony11ada262009-11-17 09:05:56 -080056#ifdef __ia64__
57#include "../../arch/ia64/include/asm/unistd.h"
58#define rmb() asm volatile ("mf" ::: "memory")
59#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
60#endif
61
Jamie Iles58e9f942009-12-11 12:20:09 +000062#ifdef __arm__
63#include "../../arch/arm/include/asm/unistd.h"
64/*
65 * Use the __kuser_memory_barrier helper in the CPU helper page. See
66 * arch/arm/kernel/entry-armv.S in the kernel source for details.
67 */
68#define rmb() asm volatile("mov r0, #0xffff0fff; mov lr, pc;" \
69 "sub pc, r0, #95" ::: "r0", "lr", "cc", \
70 "memory")
71#define cpu_relax() asm volatile("":::"memory")
72#endif
73
Peter Zijlstra1a482f32009-05-23 18:28:58 +020074#include <time.h>
75#include <unistd.h>
76#include <sys/types.h>
77#include <sys/syscall.h>
78
Ingo Molnarcdd6c482009-09-21 12:02:48 +020079#include "../../include/linux/perf_event.h"
Peter Zijlstra7c6a1c62009-06-25 17:05:54 +020080#include "util/types.h"
Peter Zijlstra1a482f32009-05-23 18:28:58 +020081
Thomas Gleixner6eda5832009-05-01 18:29:57 +020082/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020083 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
Thomas Gleixner6eda5832009-05-01 18:29:57 +020084 * counters in the current task.
85 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +020086#define PR_TASK_PERF_EVENTS_DISABLE 31
87#define PR_TASK_PERF_EVENTS_ENABLE 32
Thomas Gleixner6eda5832009-05-01 18:29:57 +020088
Thomas Gleixnera92e702372009-05-01 18:39:47 +020089#ifndef NSEC_PER_SEC
90# define NSEC_PER_SEC 1000000000ULL
91#endif
92
93static inline unsigned long long rdclock(void)
94{
95 struct timespec ts;
96
97 clock_gettime(CLOCK_MONOTONIC, &ts);
98 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
99}
Thomas Gleixner6eda5832009-05-01 18:29:57 +0200100
101/*
102 * Pick up some kernel type conventions:
103 */
104#define __user
105#define asmlinkage
106
Ingo Molnarf37a2912009-07-01 12:37:06 +0200107#define __used __attribute__((__unused__))
108
Thomas Gleixner6eda5832009-05-01 18:29:57 +0200109#define unlikely(x) __builtin_expect(!!(x), 0)
110#define min(x, y) ({ \
111 typeof(x) _min1 = (x); \
112 typeof(y) _min2 = (y); \
113 (void) (&_min1 == &_min2); \
114 _min1 < _min2 ? _min1 : _min2; })
115
116static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200117sys_perf_event_open(struct perf_event_attr *attr,
Thomas Gleixner6eda5832009-05-01 18:29:57 +0200118 pid_t pid, int cpu, int group_fd,
119 unsigned long flags)
120{
Peter Zijlstra974802e2009-06-12 12:46:55 +0200121 attr->size = sizeof(*attr);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200122 return syscall(__NR_perf_event_open, attr, pid, cpu,
Thomas Gleixner6eda5832009-05-01 18:29:57 +0200123 group_fd, flags);
124}
125
Ingo Molnar85a9f922009-05-25 09:59:50 +0200126#define MAX_COUNTERS 256
127#define MAX_NR_CPUS 256
Thomas Gleixner6eda5832009-05-01 18:29:57 +0200128
Frederic Weisbecker8cb76d92009-06-26 16:28:00 +0200129struct ip_callchain {
130 u64 nr;
131 u64 ips[0];
Peter Zijlstraf5970552009-06-18 23:22:55 +0200132};
133
Thomas Gleixner6eda5832009-05-01 18:29:57 +0200134#endif