blob: 0bdf4daba205542b99024c3f6e091dceae9c7859 [file] [log] [blame]
Jaswinder Singh Rajput9b779ed2009-03-10 15:37:51 +05301/*
2 * CPU x86 architecture debug code
3 *
4 * Copyright(C) 2009 Jaswinder Singh Rajput
5 *
6 * For licencing details see kernel-base/COPYING
7 */
8
9#include <linux/interrupt.h>
10#include <linux/compiler.h>
11#include <linux/seq_file.h>
12#include <linux/debugfs.h>
13#include <linux/kprobes.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/percpu.h>
17#include <linux/signal.h>
18#include <linux/errno.h>
19#include <linux/sched.h>
20#include <linux/types.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <linux/smp.h>
24
25#include <asm/cpu_debug.h>
26#include <asm/system.h>
27#include <asm/traps.h>
28#include <asm/apic.h>
29#include <asm/desc.h>
30
31static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]);
32static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]);
33static DEFINE_PER_CPU(unsigned, cpu_modelflag);
34static DEFINE_PER_CPU(int, cpu_priv_count);
35static DEFINE_PER_CPU(unsigned, cpu_model);
36
37static DEFINE_MUTEX(cpu_debug_lock);
38
39static struct dentry *cpu_debugfs_dir;
40
41static struct cpu_debug_base cpu_base[] = {
42 { "mc", CPU_MC }, /* Machine Check */
43 { "monitor", CPU_MONITOR }, /* Monitor */
44 { "time", CPU_TIME }, /* Time */
45 { "pmc", CPU_PMC }, /* Performance Monitor */
46 { "platform", CPU_PLATFORM }, /* Platform */
47 { "apic", CPU_APIC }, /* APIC */
48 { "poweron", CPU_POWERON }, /* Power-on */
49 { "control", CPU_CONTROL }, /* Control */
50 { "features", CPU_FEATURES }, /* Features control */
51 { "lastbranch", CPU_LBRANCH }, /* Last Branch */
52 { "bios", CPU_BIOS }, /* BIOS */
53 { "freq", CPU_FREQ }, /* Frequency */
54 { "mtrr", CPU_MTRR }, /* MTRR */
55 { "perf", CPU_PERF }, /* Performance */
56 { "cache", CPU_CACHE }, /* Cache */
57 { "sysenter", CPU_SYSENTER }, /* Sysenter */
58 { "therm", CPU_THERM }, /* Thermal */
59 { "misc", CPU_MISC }, /* Miscellaneous */
60 { "debug", CPU_DEBUG }, /* Debug */
61 { "pat", CPU_PAT }, /* PAT */
62 { "vmx", CPU_VMX }, /* VMX */
63 { "call", CPU_CALL }, /* System Call */
64 { "base", CPU_BASE }, /* BASE Address */
65 { "smm", CPU_SMM }, /* System mgmt mode */
66 { "svm", CPU_SVM }, /*Secure Virtial Machine*/
67 { "osvm", CPU_OSVM }, /* OS-Visible Workaround*/
68 { "tss", CPU_TSS }, /* Task Stack Segment */
69 { "cr", CPU_CR }, /* Control Registers */
70 { "dt", CPU_DT }, /* Descriptor Table */
71 { "registers", CPU_REG_ALL }, /* Select all Registers */
72};
73
74static struct cpu_file_base cpu_file[] = {
75 { "index", CPU_REG_ALL }, /* index */
76 { "value", CPU_REG_ALL }, /* value */
77};
78
79/* Intel Registers Range */
80static struct cpu_debug_range cpu_intel_range[] = {
81 { 0x00000000, 0x00000001, CPU_MC, CPU_INTEL_ALL },
82 { 0x00000006, 0x00000007, CPU_MONITOR, CPU_CX_AT_XE },
83 { 0x00000010, 0x00000010, CPU_TIME, CPU_INTEL_ALL },
84 { 0x00000011, 0x00000013, CPU_PMC, CPU_INTEL_PENTIUM },
85 { 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE },
86 { 0x0000001B, 0x0000001B, CPU_APIC, CPU_P6_CX_AT_XE },
87
88 { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_PX_CX_AT_XE },
89 { 0x0000002B, 0x0000002B, CPU_POWERON, CPU_INTEL_XEON },
90 { 0x0000002C, 0x0000002C, CPU_FREQ, CPU_INTEL_XEON },
91 { 0x0000003A, 0x0000003A, CPU_CONTROL, CPU_CX_AT_XE },
92
93 { 0x00000040, 0x00000043, CPU_LBRANCH, CPU_PM_CX_AT_XE },
94 { 0x00000044, 0x00000047, CPU_LBRANCH, CPU_PM_CO_AT },
95 { 0x00000060, 0x00000063, CPU_LBRANCH, CPU_C2_AT },
96 { 0x00000064, 0x00000067, CPU_LBRANCH, CPU_INTEL_ATOM },
97
98 { 0x00000079, 0x00000079, CPU_BIOS, CPU_P6_CX_AT_XE },
99 { 0x00000088, 0x0000008A, CPU_CACHE, CPU_INTEL_P6 },
100 { 0x0000008B, 0x0000008B, CPU_BIOS, CPU_P6_CX_AT_XE },
101 { 0x0000009B, 0x0000009B, CPU_MONITOR, CPU_INTEL_XEON },
102
103 { 0x000000C1, 0x000000C2, CPU_PMC, CPU_P6_CX_AT },
104 { 0x000000CD, 0x000000CD, CPU_FREQ, CPU_CX_AT },
105 { 0x000000E7, 0x000000E8, CPU_PERF, CPU_CX_AT },
106 { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_P6_CX_XE },
107
108 { 0x00000116, 0x00000116, CPU_CACHE, CPU_INTEL_P6 },
109 { 0x00000118, 0x00000118, CPU_CACHE, CPU_INTEL_P6 },
110 { 0x00000119, 0x00000119, CPU_CACHE, CPU_INTEL_PX },
111 { 0x0000011A, 0x0000011B, CPU_CACHE, CPU_INTEL_P6 },
112 { 0x0000011E, 0x0000011E, CPU_CACHE, CPU_PX_CX_AT },
113
114 { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE },
115 { 0x00000179, 0x0000017A, CPU_MC, CPU_PX_CX_AT_XE },
116 { 0x0000017B, 0x0000017B, CPU_MC, CPU_P6_XE },
117 { 0x00000186, 0x00000187, CPU_PMC, CPU_P6_CX_AT },
118 { 0x00000198, 0x00000199, CPU_PERF, CPU_PM_CX_AT_XE },
119 { 0x0000019A, 0x0000019A, CPU_TIME, CPU_PM_CX_AT_XE },
120 { 0x0000019B, 0x0000019D, CPU_THERM, CPU_PM_CX_AT_XE },
121 { 0x000001A0, 0x000001A0, CPU_MISC, CPU_PM_CX_AT_XE },
122
123 { 0x000001C9, 0x000001C9, CPU_LBRANCH, CPU_PM_CX_AT },
124 { 0x000001D7, 0x000001D8, CPU_LBRANCH, CPU_INTEL_XEON },
125 { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_CX_AT_XE },
126 { 0x000001DA, 0x000001DA, CPU_LBRANCH, CPU_INTEL_XEON },
127 { 0x000001DB, 0x000001DB, CPU_LBRANCH, CPU_P6_XE },
128 { 0x000001DC, 0x000001DC, CPU_LBRANCH, CPU_INTEL_P6 },
129 { 0x000001DD, 0x000001DE, CPU_LBRANCH, CPU_PX_CX_AT_XE },
130 { 0x000001E0, 0x000001E0, CPU_LBRANCH, CPU_INTEL_P6 },
131
132 { 0x00000200, 0x0000020F, CPU_MTRR, CPU_P6_CX_XE },
133 { 0x00000250, 0x00000250, CPU_MTRR, CPU_P6_CX_XE },
134 { 0x00000258, 0x00000259, CPU_MTRR, CPU_P6_CX_XE },
135 { 0x00000268, 0x0000026F, CPU_MTRR, CPU_P6_CX_XE },
136 { 0x00000277, 0x00000277, CPU_PAT, CPU_C2_AT_XE },
137 { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_P6_CX_XE },
138
139 { 0x00000300, 0x00000308, CPU_PMC, CPU_INTEL_XEON },
140 { 0x00000309, 0x0000030B, CPU_PMC, CPU_C2_AT_XE },
141 { 0x0000030C, 0x00000311, CPU_PMC, CPU_INTEL_XEON },
142 { 0x00000345, 0x00000345, CPU_PMC, CPU_C2_AT },
143 { 0x00000360, 0x00000371, CPU_PMC, CPU_INTEL_XEON },
144 { 0x0000038D, 0x00000390, CPU_PMC, CPU_C2_AT },
145 { 0x000003A0, 0x000003BE, CPU_PMC, CPU_INTEL_XEON },
146 { 0x000003C0, 0x000003CD, CPU_PMC, CPU_INTEL_XEON },
147 { 0x000003E0, 0x000003E1, CPU_PMC, CPU_INTEL_XEON },
148 { 0x000003F0, 0x000003F0, CPU_PMC, CPU_INTEL_XEON },
149 { 0x000003F1, 0x000003F1, CPU_PMC, CPU_C2_AT_XE },
150 { 0x000003F2, 0x000003F2, CPU_PMC, CPU_INTEL_XEON },
151
152 { 0x00000400, 0x00000402, CPU_MC, CPU_PM_CX_AT_XE },
153 { 0x00000403, 0x00000403, CPU_MC, CPU_INTEL_XEON },
154 { 0x00000404, 0x00000406, CPU_MC, CPU_PM_CX_AT_XE },
155 { 0x00000407, 0x00000407, CPU_MC, CPU_INTEL_XEON },
156 { 0x00000408, 0x0000040A, CPU_MC, CPU_PM_CX_AT_XE },
157 { 0x0000040B, 0x0000040B, CPU_MC, CPU_INTEL_XEON },
158 { 0x0000040C, 0x0000040E, CPU_MC, CPU_PM_CX_XE },
159 { 0x0000040F, 0x0000040F, CPU_MC, CPU_INTEL_XEON },
160 { 0x00000410, 0x00000412, CPU_MC, CPU_PM_CX_AT_XE },
161 { 0x00000413, 0x00000417, CPU_MC, CPU_CX_AT_XE },
162 { 0x00000480, 0x0000048B, CPU_VMX, CPU_CX_AT_XE },
163
164 { 0x00000600, 0x00000600, CPU_DEBUG, CPU_PM_CX_AT_XE },
165 { 0x00000680, 0x0000068F, CPU_LBRANCH, CPU_INTEL_XEON },
166 { 0x000006C0, 0x000006CF, CPU_LBRANCH, CPU_INTEL_XEON },
167
168 { 0x000107CC, 0x000107D3, CPU_PMC, CPU_INTEL_XEON_MP },
169
170 { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON },
171 { 0xC0000081, 0xC0000082, CPU_CALL, CPU_INTEL_XEON },
172 { 0xC0000084, 0xC0000084, CPU_CALL, CPU_INTEL_XEON },
173 { 0xC0000100, 0xC0000102, CPU_BASE, CPU_INTEL_XEON },
174};
175
176/* AMD Registers Range */
177static struct cpu_debug_range cpu_amd_range[] = {
178 { 0x00000010, 0x00000010, CPU_TIME, CPU_ALL, },
179 { 0x0000001B, 0x0000001B, CPU_APIC, CPU_ALL, },
180 { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_ALL, },
181
182 { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_ALL, },
183 { 0x00000179, 0x0000017A, CPU_MC, CPU_ALL, },
184 { 0x0000017B, 0x0000017B, CPU_MC, CPU_ALL, },
185 { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_ALL, },
186 { 0x000001DB, 0x000001DE, CPU_LBRANCH, CPU_ALL, },
187
188 { 0x00000200, 0x0000020F, CPU_MTRR, CPU_ALL, },
189 { 0x00000250, 0x00000250, CPU_MTRR, CPU_ALL, },
190 { 0x00000258, 0x00000259, CPU_MTRR, CPU_ALL, },
191 { 0x00000268, 0x0000026F, CPU_MTRR, CPU_ALL, },
192 { 0x00000277, 0x00000277, CPU_PAT, CPU_ALL, },
193 { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_ALL, },
194
195 { 0x00000400, 0x00000417, CPU_MC, CPU_ALL, },
196
197 { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_ALL, },
198 { 0xC0000081, 0xC0000084, CPU_CALL, CPU_ALL, },
199 { 0xC0000100, 0xC0000102, CPU_BASE, CPU_ALL, },
200 { 0xC0000103, 0xC0000103, CPU_TIME, CPU_ALL, },
201
202 { 0xC0000408, 0xC000040A, CPU_MC, CPU_ALL, },
203
204 { 0xc0010000, 0xc0010007, CPU_PMC, CPU_ALL, },
205 { 0xc0010010, 0xc0010010, CPU_MTRR, CPU_ALL, },
206 { 0xc0010016, 0xc001001A, CPU_MTRR, CPU_ALL, },
207 { 0xc001001D, 0xc001001D, CPU_MTRR, CPU_ALL, },
208 { 0xc0010030, 0xc0010035, CPU_BIOS, CPU_ALL, },
209 { 0xc0010056, 0xc0010056, CPU_SMM, CPU_ALL, },
210 { 0xc0010061, 0xc0010063, CPU_SMM, CPU_ALL, },
211 { 0xc0010074, 0xc0010074, CPU_MC, CPU_ALL, },
212 { 0xc0010111, 0xc0010113, CPU_SMM, CPU_ALL, },
213 { 0xc0010114, 0xc0010118, CPU_SVM, CPU_ALL, },
214 { 0xc0010119, 0xc001011A, CPU_SMM, CPU_ALL, },
215 { 0xc0010140, 0xc0010141, CPU_OSVM, CPU_ALL, },
216 { 0xc0010156, 0xc0010156, CPU_SMM, CPU_ALL, },
217};
218
219
220static int get_cpu_modelflag(unsigned cpu)
221{
222 int flag;
223
224 switch (per_cpu(cpu_model, cpu)) {
225 /* Intel */
226 case 0x0501:
227 case 0x0502:
228 case 0x0504:
229 flag = CPU_INTEL_PENTIUM;
230 break;
231 case 0x0601:
232 case 0x0603:
233 case 0x0605:
234 case 0x0607:
235 case 0x0608:
236 case 0x060A:
237 case 0x060B:
238 flag = CPU_INTEL_P6;
239 break;
240 case 0x0609:
241 case 0x060D:
242 flag = CPU_INTEL_PENTIUM_M;
243 break;
244 case 0x060E:
245 flag = CPU_INTEL_CORE;
246 break;
247 case 0x060F:
248 case 0x0617:
249 flag = CPU_INTEL_CORE2;
250 break;
251 case 0x061C:
252 flag = CPU_INTEL_ATOM;
253 break;
254 case 0x0F00:
255 case 0x0F01:
256 case 0x0F02:
257 case 0x0F03:
258 case 0x0F04:
259 flag = CPU_INTEL_XEON_P4;
260 break;
261 case 0x0F06:
262 flag = CPU_INTEL_XEON_MP;
263 break;
264 default:
265 flag = CPU_NONE;
266 break;
267 }
268
269 return flag;
270}
271
272static int get_cpu_range_count(unsigned cpu)
273{
274 int index;
275
276 switch (per_cpu(cpu_model, cpu) >> 16) {
277 case X86_VENDOR_INTEL:
278 index = ARRAY_SIZE(cpu_intel_range);
279 break;
280 case X86_VENDOR_AMD:
281 index = ARRAY_SIZE(cpu_amd_range);
282 break;
283 default:
284 index = 0;
285 break;
286 }
287
288 return index;
289}
290
291static int is_typeflag_valid(unsigned cpu, unsigned flag)
292{
293 unsigned vendor, modelflag;
294 int i, index;
295
296 /* Standard Registers should be always valid */
297 if (flag >= CPU_TSS)
298 return 1;
299
300 modelflag = per_cpu(cpu_modelflag, cpu);
301 vendor = per_cpu(cpu_model, cpu) >> 16;
302 index = get_cpu_range_count(cpu);
303
304 for (i = 0; i < index; i++) {
305 switch (vendor) {
306 case X86_VENDOR_INTEL:
307 if ((cpu_intel_range[i].model & modelflag) &&
308 (cpu_intel_range[i].flag & flag))
309 return 1;
310 break;
311 case X86_VENDOR_AMD:
312 if (cpu_amd_range[i].flag & flag)
313 return 1;
314 break;
315 }
316 }
317
318 /* Invalid */
319 return 0;
320}
321
322static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
323 int index, unsigned flag)
324{
325 unsigned modelflag;
326
327 modelflag = per_cpu(cpu_modelflag, cpu);
328 *max = 0;
329 switch (per_cpu(cpu_model, cpu) >> 16) {
330 case X86_VENDOR_INTEL:
331 if ((cpu_intel_range[index].model & modelflag) &&
332 (cpu_intel_range[index].flag & flag)) {
333 *min = cpu_intel_range[index].min;
334 *max = cpu_intel_range[index].max;
335 }
336 break;
337 case X86_VENDOR_AMD:
338 if (cpu_amd_range[index].flag & flag) {
339 *min = cpu_amd_range[index].min;
340 *max = cpu_amd_range[index].max;
341 }
342 break;
343 }
344
345 return *max;
346}
347
348/* This function can also be called with seq = NULL for printk */
349static void print_cpu_data(struct seq_file *seq, unsigned type,
350 u32 low, u32 high)
351{
352 struct cpu_private *priv;
353 u64 val = high;
354
355 if (seq) {
356 priv = seq->private;
357 if (priv->file) {
358 val = (val << 32) | low;
359 seq_printf(seq, "0x%llx\n", val);
360 } else
361 seq_printf(seq, " %08x: %08x_%08x\n",
362 type, high, low);
363 } else
364 printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
365}
366
367/* This function can also be called with seq = NULL for printk */
368static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
369{
370 unsigned msr, msr_min, msr_max;
371 struct cpu_private *priv;
372 u32 low, high;
373 int i, range;
374
375 if (seq) {
376 priv = seq->private;
377 if (priv->file) {
378 if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
379 &low, &high))
380 print_cpu_data(seq, priv->reg, low, high);
381 return;
382 }
383 }
384
385 range = get_cpu_range_count(cpu);
386
387 for (i = 0; i < range; i++) {
388 if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
389 continue;
390
391 for (msr = msr_min; msr <= msr_max; msr++) {
392 if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
393 continue;
394 print_cpu_data(seq, msr, low, high);
395 }
396 }
397}
398
399static void print_tss(void *arg)
400{
401 struct pt_regs *regs = task_pt_regs(current);
402 struct seq_file *seq = arg;
403 unsigned int seg;
404
405 seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
406 seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
407 seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
408 seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
409
410 seq_printf(seq, " RSI\t: %016lx\n", regs->si);
411 seq_printf(seq, " RDI\t: %016lx\n", regs->di);
412 seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
413 seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
414
415#ifdef CONFIG_X86_64
416 seq_printf(seq, " R08\t: %016lx\n", regs->r8);
417 seq_printf(seq, " R09\t: %016lx\n", regs->r9);
418 seq_printf(seq, " R10\t: %016lx\n", regs->r10);
419 seq_printf(seq, " R11\t: %016lx\n", regs->r11);
420 seq_printf(seq, " R12\t: %016lx\n", regs->r12);
421 seq_printf(seq, " R13\t: %016lx\n", regs->r13);
422 seq_printf(seq, " R14\t: %016lx\n", regs->r14);
423 seq_printf(seq, " R15\t: %016lx\n", regs->r15);
424#endif
425
426 asm("movl %%cs,%0" : "=r" (seg));
427 seq_printf(seq, " CS\t: %04x\n", seg);
428 asm("movl %%ds,%0" : "=r" (seg));
429 seq_printf(seq, " DS\t: %04x\n", seg);
430 seq_printf(seq, " SS\t: %04lx\n", regs->ss);
431 asm("movl %%es,%0" : "=r" (seg));
432 seq_printf(seq, " ES\t: %04x\n", seg);
433 asm("movl %%fs,%0" : "=r" (seg));
434 seq_printf(seq, " FS\t: %04x\n", seg);
435 asm("movl %%gs,%0" : "=r" (seg));
436 seq_printf(seq, " GS\t: %04x\n", seg);
437
438 seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
439
440 seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
441}
442
443static void print_cr(void *arg)
444{
445 struct seq_file *seq = arg;
446
447 seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
448 seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
449 seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
450 seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
451#ifdef CONFIG_X86_64
452 seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
453#endif
454}
455
456static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
457{
458 seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
459}
460
461static void print_dt(void *seq)
462{
463 struct desc_ptr dt;
464 unsigned long ldt;
465
466 /* IDT */
467 store_idt((struct desc_ptr *)&dt);
468 print_desc_ptr("IDT", seq, dt);
469
470 /* GDT */
471 store_gdt((struct desc_ptr *)&dt);
472 print_desc_ptr("GDT", seq, dt);
473
474 /* LDT */
475 store_ldt(ldt);
476 seq_printf(seq, " LDT\t: %016lx\n", ldt);
477
478 /* TR */
479 store_tr(ldt);
480 seq_printf(seq, " TR\t: %016lx\n", ldt);
481}
482
483static void print_dr(void *arg)
484{
485 struct seq_file *seq = arg;
486 unsigned long dr;
487 int i;
488
489 for (i = 0; i < 8; i++) {
490 /* Ignore db4, db5 */
491 if ((i == 4) || (i == 5))
492 continue;
493 get_debugreg(dr, i);
494 seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
495 }
496
497 seq_printf(seq, "\n MSR\t:\n");
498}
499
500static void print_apic(void *arg)
501{
502 struct seq_file *seq = arg;
503
504#ifdef CONFIG_X86_LOCAL_APIC
505 seq_printf(seq, " LAPIC\t:\n");
506 seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24);
507 seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR));
508 seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI));
509 seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI));
510 seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI));
511 seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR));
512 seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR));
513 seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV));
514 seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR));
515 seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR));
516 seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR));
517 seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2));
518 seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT));
519 seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR));
520 seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC));
521 seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0));
522 seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1));
523 seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR));
524 seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
525 seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
526 seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
527#endif /* CONFIG_X86_LOCAL_APIC */
528
529 seq_printf(seq, "\n MSR\t:\n");
530}
531
532static int cpu_seq_show(struct seq_file *seq, void *v)
533{
534 struct cpu_private *priv = seq->private;
535
536 if (priv == NULL)
537 return -EINVAL;
538
539 switch (cpu_base[priv->type].flag) {
540 case CPU_TSS:
541 smp_call_function_single(priv->cpu, print_tss, seq, 1);
542 break;
543 case CPU_CR:
544 smp_call_function_single(priv->cpu, print_cr, seq, 1);
545 break;
546 case CPU_DT:
547 smp_call_function_single(priv->cpu, print_dt, seq, 1);
548 break;
549 case CPU_DEBUG:
550 if (priv->file == CPU_INDEX_BIT)
551 smp_call_function_single(priv->cpu, print_dr, seq, 1);
552 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
553 break;
554 case CPU_APIC:
555 if (priv->file == CPU_INDEX_BIT)
556 smp_call_function_single(priv->cpu, print_apic, seq, 1);
557 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
558 break;
559
560 default:
561 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
562 break;
563 }
564 seq_printf(seq, "\n");
565
566 return 0;
567}
568
569static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
570{
571 if (*pos == 0) /* One time is enough ;-) */
572 return seq;
573
574 return NULL;
575}
576
577static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
578{
579 (*pos)++;
580
581 return cpu_seq_start(seq, pos);
582}
583
584static void cpu_seq_stop(struct seq_file *seq, void *v)
585{
586}
587
588static const struct seq_operations cpu_seq_ops = {
589 .start = cpu_seq_start,
590 .next = cpu_seq_next,
591 .stop = cpu_seq_stop,
592 .show = cpu_seq_show,
593};
594
595static int cpu_seq_open(struct inode *inode, struct file *file)
596{
597 struct cpu_private *priv = inode->i_private;
598 struct seq_file *seq;
599 int err;
600
601 err = seq_open(file, &cpu_seq_ops);
602 if (!err) {
603 seq = file->private_data;
604 seq->private = priv;
605 }
606
607 return err;
608}
609
610static const struct file_operations cpu_fops = {
611 .open = cpu_seq_open,
612 .read = seq_read,
613 .llseek = seq_lseek,
614 .release = seq_release,
615};
616
617static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
618 unsigned file, struct dentry *dentry)
619{
620 struct cpu_private *priv = NULL;
621
622 /* Already intialized */
623 if (file == CPU_INDEX_BIT)
624 if (per_cpu(cpu_arr[type].init, cpu))
625 return 0;
626
627 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
628 if (priv == NULL)
629 return -ENOMEM;
630
631 priv->cpu = cpu;
632 priv->type = type;
633 priv->reg = reg;
634 priv->file = file;
635 mutex_lock(&cpu_debug_lock);
636 per_cpu(priv_arr[type], cpu) = priv;
637 per_cpu(cpu_priv_count, cpu)++;
638 mutex_unlock(&cpu_debug_lock);
639
640 if (file)
641 debugfs_create_file(cpu_file[file].name, S_IRUGO,
642 dentry, (void *)priv, &cpu_fops);
643 else {
644 debugfs_create_file(cpu_base[type].name, S_IRUGO,
645 per_cpu(cpu_arr[type].dentry, cpu),
646 (void *)priv, &cpu_fops);
647 mutex_lock(&cpu_debug_lock);
648 per_cpu(cpu_arr[type].init, cpu) = 1;
649 mutex_unlock(&cpu_debug_lock);
650 }
651
652 return 0;
653}
654
655static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
656 struct dentry *dentry)
657{
658 unsigned file;
659 int err = 0;
660
661 for (file = 0; file < ARRAY_SIZE(cpu_file); file++) {
662 err = cpu_create_file(cpu, type, reg, file, dentry);
663 if (err)
664 return err;
665 }
666
667 return err;
668}
669
670static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
671{
672 struct dentry *cpu_dentry = NULL;
673 unsigned reg, reg_min, reg_max;
674 int i, range, err = 0;
675 char reg_dir[12];
676 u32 low, high;
677
678 range = get_cpu_range_count(cpu);
679
680 for (i = 0; i < range; i++) {
681 if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
682 cpu_base[type].flag))
683 continue;
684
685 for (reg = reg_min; reg <= reg_max; reg++) {
686 if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
687 continue;
688
689 sprintf(reg_dir, "0x%x", reg);
690 cpu_dentry = debugfs_create_dir(reg_dir, dentry);
691 err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
692 if (err)
693 return err;
694 }
695 }
696
697 return err;
698}
699
700static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
701{
702 struct dentry *cpu_dentry = NULL;
703 unsigned type;
704 int err = 0;
705
706 for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) {
707 if (!is_typeflag_valid(cpu, cpu_base[type].flag))
708 continue;
709 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
710 per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
711
712 if (type < CPU_TSS_BIT)
713 err = cpu_init_msr(cpu, type, cpu_dentry);
714 else
715 err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
716 cpu_dentry);
717 if (err)
718 return err;
719 }
720
721 return err;
722}
723
724static int cpu_init_cpu(void)
725{
726 struct dentry *cpu_dentry = NULL;
727 struct cpuinfo_x86 *cpui;
728 char cpu_dir[12];
729 unsigned cpu;
730 int err = 0;
731
732 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
733 cpui = &cpu_data(cpu);
734 if (!cpu_has(cpui, X86_FEATURE_MSR))
735 continue;
736 per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) |
737 (cpui->x86 << 8) |
738 (cpui->x86_model));
739 per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu);
740
741 sprintf(cpu_dir, "cpu%d", cpu);
742 cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
743 err = cpu_init_allreg(cpu, cpu_dentry);
744
745 pr_info("cpu%d(%d) debug files %d\n",
746 cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
747 if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
748 pr_err("Register files count %d exceeds limit %d\n",
749 per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
750 per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
751 err = -ENFILE;
752 }
753 if (err)
754 return err;
755 }
756
757 return err;
758}
759
760static int __init cpu_debug_init(void)
761{
762 cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
763
764 return cpu_init_cpu();
765}
766
767static void __exit cpu_debug_exit(void)
768{
769 int i, cpu;
770
771 if (cpu_debugfs_dir)
772 debugfs_remove_recursive(cpu_debugfs_dir);
773
774 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
775 for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
776 kfree(per_cpu(priv_arr[i], cpu));
777}
778
779module_init(cpu_debug_init);
780module_exit(cpu_debug_exit);
781
782MODULE_AUTHOR("Jaswinder Singh Rajput");
783MODULE_DESCRIPTION("CPU Debug module");
784MODULE_LICENSE("GPL");