blob: c6693f19b09cb78944c229b9958fc58a86ab4de6 [file] [log] [blame]
njnc6168192004-11-29 13:54:10 +00001
2/*--------------------------------------------------------------------*/
3/*--- AMD64-specific definitions. amd64/cg_arch.c ---*/
4/*--------------------------------------------------------------------*/
5
6/*
7 This file is part of Cachegrind, a Valgrind tool for cache
8 profiling programs.
9
10 Copyright (C) 2002-2004 Nicholas Nethercote
11 njn25@cam.ac.uk
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29*/
30
31#include "tool.h"
32#include "cg_arch.h"
33
34// All CPUID info taken from sandpile.org/a32/cpuid.htm */
35// Probably only works for Intel and AMD chips, and probably only for some of
36// them.
37
38#if 0
39static void micro_ops_warn(Int actual_size, Int used_size, Int line_size)
40{
41 VG_(message)(Vg_DebugMsg,
42 "warning: Pentium with %d K micro-op instruction trace cache",
43 actual_size);
44 VG_(message)(Vg_DebugMsg,
45 " Simulating a %d KB cache with %d B lines",
46 used_size, line_size);
47}
48
49/* Intel method is truly wretched. We have to do an insane indexing into an
50 * array of pre-defined configurations for various parts of the memory
51 * hierarchy.
52 */
53static
54Int Intel_cache_info(Int level, cache_t* I1c, cache_t* D1c, cache_t* L2c)
55{
56 UChar info[16];
57 Int i, trials;
58 Bool L2_found = False;
59
60 if (level < 2) {
61 VG_(message)(Vg_DebugMsg,
62 "warning: CPUID level < 2 for Intel processor (%d)",
63 level);
64 return -1;
65 }
66
67 VG_(cpuid)(2, (Int*)&info[0], (Int*)&info[4],
68 (Int*)&info[8], (Int*)&info[12]);
69 trials = info[0] - 1; /* AL register - bits 0..7 of %eax */
70 info[0] = 0x0; /* reset AL */
71
72 if (0 != trials) {
73 VG_(message)(Vg_DebugMsg,
74 "warning: non-zero CPUID trials for Intel processor (%d)",
75 trials);
76 return -1;
77 }
78
79 for (i = 0; i < 16; i++) {
80
81 switch (info[i]) {
82
83 case 0x0: /* ignore zeros */
84 break;
85
86 /* TLB info, ignore */
87 case 0x01: case 0x02: case 0x03: case 0x04:
88 case 0x50: case 0x51: case 0x52: case 0x5b: case 0x5c: case 0x5d:
89 case 0xb0: case 0xb3:
90 break;
91
92 case 0x06: *I1c = (cache_t) { 8, 4, 32 }; break;
93 case 0x08: *I1c = (cache_t) { 16, 4, 32 }; break;
94 case 0x30: *I1c = (cache_t) { 32, 8, 64 }; break;
95
96 case 0x0a: *D1c = (cache_t) { 8, 2, 32 }; break;
97 case 0x0c: *D1c = (cache_t) { 16, 4, 32 }; break;
98 case 0x2c: *D1c = (cache_t) { 32, 8, 64 }; break;
99
100 /* IA-64 info -- panic! */
101 case 0x10: case 0x15: case 0x1a:
102 case 0x88: case 0x89: case 0x8a: case 0x8d:
103 case 0x90: case 0x96: case 0x9b:
104 VG_(tool_panic)("IA-64 cache detected?!");
105
106 case 0x22: case 0x23: case 0x25: case 0x29:
107 VG_(message)(Vg_DebugMsg,
108 "warning: L3 cache detected but ignored\n");
109 break;
110
111 /* These are sectored, whatever that means */
112 case 0x39: *L2c = (cache_t) { 128, 4, 64 }; L2_found = True; break;
113 case 0x3c: *L2c = (cache_t) { 256, 4, 64 }; L2_found = True; break;
114
115 /* If a P6 core, this means "no L2 cache".
116 If a P4 core, this means "no L3 cache".
117 We don't know what core it is, so don't issue a warning. To detect
118 a missing L2 cache, we use 'L2_found'. */
119 case 0x40:
120 break;
121
122 case 0x41: *L2c = (cache_t) { 128, 4, 32 }; L2_found = True; break;
123 case 0x42: *L2c = (cache_t) { 256, 4, 32 }; L2_found = True; break;
124 case 0x43: *L2c = (cache_t) { 512, 4, 32 }; L2_found = True; break;
125 case 0x44: *L2c = (cache_t) { 1024, 4, 32 }; L2_found = True; break;
126 case 0x45: *L2c = (cache_t) { 2048, 4, 32 }; L2_found = True; break;
127
128 /* These are sectored, whatever that means */
129 case 0x60: *D1c = (cache_t) { 16, 8, 64 }; break; /* sectored */
130 case 0x66: *D1c = (cache_t) { 8, 4, 64 }; break; /* sectored */
131 case 0x67: *D1c = (cache_t) { 16, 4, 64 }; break; /* sectored */
132 case 0x68: *D1c = (cache_t) { 32, 4, 64 }; break; /* sectored */
133
134 /* HACK ALERT: Instruction trace cache -- capacity is micro-ops based.
135 * conversion to byte size is a total guess; treat the 12K and 16K
136 * cases the same since the cache byte size must be a power of two for
137 * everything to work!. Also guessing 32 bytes for the line size...
138 */
139 case 0x70: /* 12K micro-ops, 8-way */
140 *I1c = (cache_t) { 16, 8, 32 };
141 micro_ops_warn(12, 16, 32);
142 break;
143 case 0x71: /* 16K micro-ops, 8-way */
144 *I1c = (cache_t) { 16, 8, 32 };
145 micro_ops_warn(16, 16, 32);
146 break;
147 case 0x72: /* 32K micro-ops, 8-way */
148 *I1c = (cache_t) { 32, 8, 32 };
149 micro_ops_warn(32, 32, 32);
150 break;
151
152 /* These are sectored, whatever that means */
153 case 0x79: *L2c = (cache_t) { 128, 8, 64 }; L2_found = True; break;
154 case 0x7a: *L2c = (cache_t) { 256, 8, 64 }; L2_found = True; break;
155 case 0x7b: *L2c = (cache_t) { 512, 8, 64 }; L2_found = True; break;
156 case 0x7c: *L2c = (cache_t) { 1024, 8, 64 }; L2_found = True; break;
157 case 0x7e: *L2c = (cache_t) { 256, 8, 128 }; L2_found = True; break;
158
159 case 0x81: *L2c = (cache_t) { 128, 8, 32 }; L2_found = True; break;
160 case 0x82: *L2c = (cache_t) { 256, 8, 32 }; L2_found = True; break;
161 case 0x83: *L2c = (cache_t) { 512, 8, 32 }; L2_found = True; break;
162 case 0x84: *L2c = (cache_t) { 1024, 8, 32 }; L2_found = True; break;
163 case 0x85: *L2c = (cache_t) { 2048, 8, 32 }; L2_found = True; break;
164 case 0x86: *L2c = (cache_t) { 512, 4, 64 }; L2_found = True; break;
165 case 0x87: *L2c = (cache_t) { 1024, 8, 64 }; L2_found = True; break;
166
167 default:
168 VG_(message)(Vg_DebugMsg,
169 "warning: Unknown Intel cache config value "
170 "(0x%x), ignoring", info[i]);
171 break;
172 }
173 }
174
175 if (!L2_found)
176 VG_(message)(Vg_DebugMsg,
177 "warning: L2 cache not installed, ignore L2 results.");
178
179 return 0;
180}
181
182/* AMD method is straightforward, just extract appropriate bits from the
183 * result registers.
184 *
185 * Bits, for D1 and I1:
186 * 31..24 data L1 cache size in KBs
187 * 23..16 data L1 cache associativity (FFh=full)
188 * 15.. 8 data L1 cache lines per tag
189 * 7.. 0 data L1 cache line size in bytes
190 *
191 * Bits, for L2:
192 * 31..16 unified L2 cache size in KBs
193 * 15..12 unified L2 cache associativity (0=off, FFh=full)
194 * 11.. 8 unified L2 cache lines per tag
195 * 7.. 0 unified L2 cache line size in bytes
196 *
197 * #3 The AMD K7 processor's L2 cache must be configured prior to relying
198 * upon this information. (Whatever that means -- njn)
199 *
200 * Also, according to Cyrille Chepelov, Duron stepping A0 processors (model
201 * 0x630) have a bug and misreport their L2 size as 1KB (it's really 64KB),
202 * so we detect that.
203 *
204 * Returns 0 on success, non-zero on failure.
205 */
206static
207Int AMD_cache_info(cache_t* I1c, cache_t* D1c, cache_t* L2c)
208{
209 UInt ext_level;
210 UInt dummy, model;
211 UInt I1i, D1i, L2i;
212
213 VG_(cpuid)(0x80000000, &ext_level, &dummy, &dummy, &dummy);
214
215 if (0 == (ext_level & 0x80000000) || ext_level < 0x80000006) {
216 VG_(message)(Vg_UserMsg,
217 "warning: ext_level < 0x80000006 for AMD processor (0x%x)",
218 ext_level);
219 return -1;
220 }
221
222 VG_(cpuid)(0x80000005, &dummy, &dummy, &D1i, &I1i);
223 VG_(cpuid)(0x80000006, &dummy, &dummy, &L2i, &dummy);
224
225 VG_(cpuid)(0x1, &model, &dummy, &dummy, &dummy);
226
227 /* Check for Duron bug */
228 if (model == 0x630) {
229 VG_(message)(Vg_UserMsg,
230 "Buggy Duron stepping A0. Assuming L2 size=65536 bytes");
231 L2i = (64 << 16) | (L2i & 0xffff);
232 }
233
234 D1c->size = (D1i >> 24) & 0xff;
235 D1c->assoc = (D1i >> 16) & 0xff;
236 D1c->line_size = (D1i >> 0) & 0xff;
237
238 I1c->size = (I1i >> 24) & 0xff;
239 I1c->assoc = (I1i >> 16) & 0xff;
240 I1c->line_size = (I1i >> 0) & 0xff;
241
242 L2c->size = (L2i >> 16) & 0xffff; /* Nb: different bits used for L2 */
243 L2c->assoc = (L2i >> 12) & 0xf;
244 L2c->line_size = (L2i >> 0) & 0xff;
245
246 return 0;
247}
248
249static jmp_buf cpuid_jmpbuf;
250
251static
252void cpuid_SIGILL_handler(int signum)
253{
254 __builtin_longjmp(cpuid_jmpbuf, 1);
255}
256
257static
258Int get_caches_from_CPUID(cache_t* I1c, cache_t* D1c, cache_t* L2c)
259{
260 Int level, res, ret;
261 Char vendor_id[13];
262 struct vki_sigaction sigill_new, sigill_saved;
263
264 /* Install own SIGILL handler */
265 sigill_new.ksa_handler = cpuid_SIGILL_handler;
266 sigill_new.sa_flags = 0;
267 sigill_new.sa_restorer = NULL;
268 res = VG_(sigemptyset)( &sigill_new.sa_mask );
269 tl_assert(res == 0);
270
271 res = VG_(sigaction)( VKI_SIGILL, &sigill_new, &sigill_saved );
272 tl_assert(res == 0);
273
274 /* Trap for illegal instruction, in case it's a really old processor that
275 * doesn't support CPUID. */
276 if (__builtin_setjmp(cpuid_jmpbuf) == 0) {
277 VG_(cpuid)(0, &level, (int*)&vendor_id[0],
278 (int*)&vendor_id[8], (int*)&vendor_id[4]);
279 vendor_id[12] = '\0';
280
281 /* Restore old SIGILL handler */
282 res = VG_(sigaction)( VKI_SIGILL, &sigill_saved, NULL );
283 tl_assert(res == 0);
284
285 } else {
286 VG_(message)(Vg_DebugMsg, "CPUID instruction not supported");
287
288 /* Restore old SIGILL handler */
289 res = VG_(sigaction)( VKI_SIGILL, &sigill_saved, NULL );
290 tl_assert(res == 0);
291 return -1;
292 }
293
294 if (0 == level) {
295 VG_(message)(Vg_DebugMsg, "CPUID level is 0, early Pentium?\n");
296 return -1;
297 }
298
299 /* Only handling Intel and AMD chips... no Cyrix, Transmeta, etc */
300 if (0 == VG_(strcmp)(vendor_id, "GenuineIntel")) {
301 ret = Intel_cache_info(level, I1c, D1c, L2c);
302
303 } else if (0 == VG_(strcmp)(vendor_id, "AuthenticAMD")) {
304 ret = AMD_cache_info(I1c, D1c, L2c);
305
306 } else if (0 == VG_(strcmp)(vendor_id, "CentaurHauls")) {
307 /* Total kludge. Pretend to be a VIA Nehemiah. */
308 D1c->size = 64;
309 D1c->assoc = 16;
310 D1c->line_size = 16;
311 I1c->size = 64;
312 I1c->assoc = 4;
313 I1c->line_size = 16;
314 L2c->size = 64;
315 L2c->assoc = 16;
316 L2c->line_size = 16;
317 ret = 0;
318
319 } else {
320 VG_(message)(Vg_DebugMsg, "CPU vendor ID not recognised (%s)",
321 vendor_id);
322 return -1;
323 }
324
325 /* Successful! Convert sizes from KB to bytes */
326 I1c->size *= 1024;
327 D1c->size *= 1024;
328 L2c->size *= 1024;
329
330 return ret;
331}
332#endif
333
334
335void VGA_(configure_caches)(cache_t* I1c, cache_t* D1c, cache_t* L2c,
336 Bool all_caches_clo_defined)
337{
338 VG_(printf)("VGA_(configure_caches)() not yet implemented for AMD64\n");
339 VG_(exit)(1);
340#if 0
341 Int res;
342
343 // Set caches to default.
344 *I1c = (cache_t) { 65536, 2, 64 };
345 *D1c = (cache_t) { 65536, 2, 64 };
346 *L2c = (cache_t) { 262144, 8, 64 };
347
348 // Then replace with any info we can get from CPUID.
349 res = get_caches_from_CPUID(I1c, D1c, L2c);
350
351 // Warn if CPUID failed and config not completely specified from cmd line.
352 if (res != 0 && !all_caches_clo_defined) {
353 VG_(message)(Vg_DebugMsg,
354 "Warning: Couldn't auto-detect cache config, using one "
355 "or more defaults ");
356 }
357#endif
358}
359
360/*--------------------------------------------------------------------*/
361/*--- end ---*/
362/*--------------------------------------------------------------------*/