blob: fb1423072286e9d563f69e4a0bce434201ac7c53 [file] [log] [blame]
Thomas Gleixner6eda5832009-05-01 18:29:57 +02001#ifndef _PERF_PERF_H
2#define _PERF_PERF_H
3
4/*
5 * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
6 * counters in the current task.
7 */
8#define PR_TASK_PERF_COUNTERS_DISABLE 31
9#define PR_TASK_PERF_COUNTERS_ENABLE 32
10
Thomas Gleixnera92e702372009-05-01 18:39:47 +020011#ifndef NSEC_PER_SEC
12# define NSEC_PER_SEC 1000000000ULL
13#endif
14
15static inline unsigned long long rdclock(void)
16{
17 struct timespec ts;
18
19 clock_gettime(CLOCK_MONOTONIC, &ts);
20 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
21}
Thomas Gleixner6eda5832009-05-01 18:29:57 +020022
23/*
24 * Pick up some kernel type conventions:
25 */
26#define __user
27#define asmlinkage
28
29#ifdef __x86_64__
30#define __NR_perf_counter_open 298
31#define rmb() asm volatile("lfence" ::: "memory")
32#define cpu_relax() asm volatile("rep; nop" ::: "memory");
33#endif
34
35#ifdef __i386__
36#define __NR_perf_counter_open 336
37#define rmb() asm volatile("lfence" ::: "memory")
38#define cpu_relax() asm volatile("rep; nop" ::: "memory");
39#endif
40
41#ifdef __powerpc__
42#define __NR_perf_counter_open 319
43#define rmb() asm volatile ("sync" ::: "memory")
44#define cpu_relax() asm volatile ("" ::: "memory");
45#endif
46
47#define unlikely(x) __builtin_expect(!!(x), 0)
48#define min(x, y) ({ \
49 typeof(x) _min1 = (x); \
50 typeof(y) _min2 = (y); \
51 (void) (&_min1 == &_min2); \
52 _min1 < _min2 ? _min1 : _min2; })
53
54static inline int
55sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr,
56 pid_t pid, int cpu, int group_fd,
57 unsigned long flags)
58{
59 return syscall(__NR_perf_counter_open, hw_event_uptr, pid, cpu,
60 group_fd, flags);
61}
62
63#define MAX_COUNTERS 64
64#define MAX_NR_CPUS 256
65
66#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id))
67
68#endif