blob: b1e3b4c87f7d6e790a78a9b4dc820a9f78365541 [file] [log] [blame]
Ben Cheng5a4eb4e2009-09-14 16:00:41 -07001/*
2 * @file architecture specific interfaces
3 * @remark Copyright 2008 Intel Corporation
4 * @remark Read the file COPYING
5 * @author Andi Kleen
6 */
7
8#if defined(__i386__) || defined(__x86_64__)
9
10/* Assume we run on the same host as the profilee */
11
12#define num_to_mask(x) ((1U << (x)) - 1)
13
14static inline int cpuid_vendor(char *vnd)
15{
16 union {
17 struct {
18 unsigned b,d,c;
19 };
20 char v[12];
21 } v;
22 unsigned eax;
Bruce Beare751a4432010-03-04 10:30:51 -080023 asm volatile( "pushl %%ebx; cpuid; movl %%ebx, %1; popl %%ebx"
24 : "=a" (eax), "=S" (v.b), "=c" (v.c), "=d" (v.d) : "0" (0));
Ben Cheng5a4eb4e2009-09-14 16:00:41 -070025 return !strncmp(v.v, vnd, 12);
26}
27
28/* Work around Nehalem spec update AAJ79: CPUID incorrectly indicates
29 unhalted reference cycle architectural event is supported. We assume
30 steppings after C0 report correct data in CPUID. */
31static inline void workaround_nehalem_aaj79(unsigned *ebx)
32{
33 union {
34 unsigned eax;
35 struct {
36 unsigned stepping : 4;
37 unsigned model : 4;
38 unsigned family : 4;
39 unsigned type : 2;
40 unsigned res : 2;
41 unsigned ext_model : 4;
42 unsigned ext_family : 8;
43 unsigned res2 : 4;
44 };
45 } v;
46 unsigned model;
47
48 if (!cpuid_vendor("GenuineIntel"))
49 return;
Bruce Beare751a4432010-03-04 10:30:51 -080050 asm volatile( "pushl %%ebx; cpuid; movl %%ebx, %1; popl %%ebx"
51 : "=a" (v.eax) : "0" (1) : "ecx","edx");
Ben Cheng5a4eb4e2009-09-14 16:00:41 -070052 model = (v.ext_model << 4) + v.model;
53 if (v.family != 6 || model != 26 || v.stepping > 4)
54 return;
55 *ebx |= (1 << 2); /* disable unsupported event */
56}
57
58static inline unsigned arch_get_filter(op_cpu cpu_type)
59{
60 if (cpu_type == CPU_ARCH_PERFMON) {
61 unsigned ebx, eax;
Bruce Beare751a4432010-03-04 10:30:51 -080062 asm volatile( "pushl %%ebx; cpuid; movl %%ebx, %1; popl %%ebx"
63 : "=a" (eax), "=S" (ebx) : "0" (0xa) : "ecx","edx");
Ben Cheng5a4eb4e2009-09-14 16:00:41 -070064 workaround_nehalem_aaj79(&ebx);
65 return ebx & num_to_mask(eax >> 24);
66 }
67 return -1U;
68}
69
70static inline int arch_num_counters(op_cpu cpu_type)
71{
72 if (cpu_type == CPU_ARCH_PERFMON) {
73 unsigned v;
Bruce Beare751a4432010-03-04 10:30:51 -080074 asm volatile( "pushl %%ebx; cpuid; movl %%eax, %1; popl %%ebx"
75 : "=a" (v) : "0" (0xa) : "ecx","edx");
Ben Cheng5a4eb4e2009-09-14 16:00:41 -070076 return (v >> 8) & 0xff;
77 }
78 return -1;
79}
80
81static inline unsigned arch_get_counter_mask(void)
82{
83 unsigned v;
Bruce Beare751a4432010-03-04 10:30:51 -080084 asm volatile( "pushl %%ebx; cpuid; movl %%ebx, %1; popl %%ebx"
85 : "=a" (v) : "0" (0xa) : "ecx","edx");
Ben Cheng5a4eb4e2009-09-14 16:00:41 -070086 return num_to_mask((v >> 8) & 0xff);
87}
88
89#else
90
91static inline unsigned arch_get_filter(op_cpu cpu_type)
92{
93 /* Do something with passed arg to shut up the compiler warning */
94 if (cpu_type != CPU_NO_GOOD)
95 return 0;
96 return 0;
97}
98
99static inline int arch_num_counters(op_cpu cpu_type)
100{
101 /* Do something with passed arg to shut up the compiler warning */
102 if (cpu_type != CPU_NO_GOOD)
103 return -1;
104 return -1;
105}
106
107static inline unsigned arch_get_counter_mask(void)
108{
109 return 0;
110}
111
112#endif