blob: b8f3653dddbc2daccf1d0da1990e74b8cab57202 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Ingo Molnarcdcf7722008-07-28 16:20:08 +02002 * Routines to indentify caches on Intel CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Ingo Molnarcdcf7722008-07-28 16:20:08 +02004 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
Alan Cox8bdbd962009-07-04 00:35:45 +01006 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
Andi Kleen67cddd92007-07-21 17:10:03 +02007 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
10#include <linux/init.h>
11#include <linux/slab.h>
12#include <linux/device.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080015#include <linux/sched.h>
Mark Langsdorfa24e8d32008-07-22 13:06:02 -050016#include <linux/pci.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18#include <asm/processor.h>
Alan Cox8bdbd962009-07-04 00:35:45 +010019#include <linux/smp.h>
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +020020#include <asm/amd_nb.h>
Borislav Petkovdcf39da2010-01-22 16:01:05 +010021#include <asm/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23#define LVL_1_INST 1
24#define LVL_1_DATA 2
25#define LVL_2 3
26#define LVL_3 4
27#define LVL_TRACE 5
28
Alan Cox8bdbd962009-07-04 00:35:45 +010029struct _cache_table {
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 unsigned char descriptor;
31 char cache_type;
32 short size;
33};
34
Dave Jones2ca49b22010-01-04 09:47:35 -050035#define MB(x) ((x) * 1024)
36
Alan Cox8bdbd962009-07-04 00:35:45 +010037/* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
39
Jan Beulich02dde8b2009-03-12 12:08:49 +000040static const struct _cache_table __cpuinitconst cache_table[] =
Linus Torvalds1da177e2005-04-16 15:20:36 -070041{
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
Dave Jones9a8ecae2009-01-31 20:12:14 -050044 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
Dave Jones9a8ecae2009-01-31 20:12:14 -050047 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
Dave Jonesfb87ec32011-01-19 20:20:56 -050048 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
Dave Jones9a8ecae2009-01-31 20:12:14 -050049 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
Dave Jones2ca49b22010-01-04 09:47:35 -050051 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
Dave Jones6fe8f472006-01-26 22:40:40 -080057 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
Dave Jones6fe8f472006-01-26 22:40:40 -080060 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
Jason Gaston04fa11e2007-12-21 01:27:19 +010062 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
Dave Jones2ca49b22010-01-04 09:47:35 -050066 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
Dave Jonesfb87ec32011-01-19 20:20:56 -050070 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
Dave Jones2ca49b22010-01-04 09:47:35 -050071 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
Dave Jones6fe8f472006-01-26 22:40:40 -080084 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
Dave Jones2ca49b22010-01-04 09:47:35 -050085 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
Dave Jonesfb87ec32011-01-19 20:20:56 -050092 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
Dave Jones2ca49b22010-01-04 09:47:35 -050093 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 { 0x00, 0, 0}
115};
116
117
Alan Cox8bdbd962009-07-04 00:35:45 +0100118enum _cache_type {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 CACHE_TYPE_NULL = 0,
120 CACHE_TYPE_DATA = 1,
121 CACHE_TYPE_INST = 2,
122 CACHE_TYPE_UNIFIED = 3
123};
124
125union _cpuid4_leaf_eax {
126 struct {
127 enum _cache_type type:5;
128 unsigned int level:3;
129 unsigned int is_self_initializing:1;
130 unsigned int is_fully_associative:1;
131 unsigned int reserved:4;
132 unsigned int num_threads_sharing:12;
133 unsigned int num_cores_on_die:6;
134 } split;
135 u32 full;
136};
137
138union _cpuid4_leaf_ebx {
139 struct {
140 unsigned int coherency_line_size:12;
141 unsigned int physical_line_partition:10;
142 unsigned int ways_of_associativity:10;
143 } split;
144 u32 full;
145};
146
147union _cpuid4_leaf_ecx {
148 struct {
149 unsigned int number_of_sets:32;
150 } split;
151 u32 full;
152};
153
Mike Travisf9b90562009-01-10 21:58:10 -0800154struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
158 unsigned long size;
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000159 struct amd_northbridge *nb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160};
161
Thomas Gleixnerb7d11a72011-07-24 09:46:08 +0000162struct _cpuid4_info {
163 struct _cpuid4_info_regs base;
164 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
165};
166
Andi Kleen240cd6a802006-06-26 13:56:13 +0200167unsigned short num_cache_leaves;
168
169/* AMD doesn't have CPUID4. Emulate it here to report the same
170 information to the user. This makes some assumptions about the machine:
Andi Kleen67cddd92007-07-21 17:10:03 +0200171 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
Andi Kleen240cd6a802006-06-26 13:56:13 +0200172
173 In theory the TLBs could be reported as fake type (they are in "dummy").
174 Maybe later */
175union l1_cache {
176 struct {
Alan Cox8bdbd962009-07-04 00:35:45 +0100177 unsigned line_size:8;
178 unsigned lines_per_tag:8;
179 unsigned assoc:8;
180 unsigned size_in_kb:8;
Andi Kleen240cd6a802006-06-26 13:56:13 +0200181 };
182 unsigned val;
183};
184
185union l2_cache {
186 struct {
Alan Cox8bdbd962009-07-04 00:35:45 +0100187 unsigned line_size:8;
188 unsigned lines_per_tag:4;
189 unsigned assoc:4;
190 unsigned size_in_kb:16;
Andi Kleen240cd6a802006-06-26 13:56:13 +0200191 };
192 unsigned val;
193};
194
Andi Kleen67cddd92007-07-21 17:10:03 +0200195union l3_cache {
196 struct {
Alan Cox8bdbd962009-07-04 00:35:45 +0100197 unsigned line_size:8;
198 unsigned lines_per_tag:4;
199 unsigned assoc:4;
200 unsigned res:2;
201 unsigned size_encoded:14;
Andi Kleen67cddd92007-07-21 17:10:03 +0200202 };
203 unsigned val;
204};
205
Jan Beulich02dde8b2009-03-12 12:08:49 +0000206static const unsigned short __cpuinitconst assocs[] = {
Andreas Herrmann6265ff12009-04-09 15:47:10 +0200207 [1] = 1,
208 [2] = 2,
209 [4] = 4,
210 [6] = 8,
211 [8] = 16,
212 [0xa] = 32,
213 [0xb] = 48,
Andi Kleen67cddd92007-07-21 17:10:03 +0200214 [0xc] = 64,
Andreas Herrmann6265ff12009-04-09 15:47:10 +0200215 [0xd] = 96,
216 [0xe] = 128,
217 [0xf] = 0xffff /* fully associative - no way to show this currently */
Andi Kleen67cddd92007-07-21 17:10:03 +0200218};
219
Jan Beulich02dde8b2009-03-12 12:08:49 +0000220static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
221static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
Andi Kleen240cd6a802006-06-26 13:56:13 +0200222
Ingo Molnarcdcf7722008-07-28 16:20:08 +0200223static void __cpuinit
224amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx)
Andi Kleen240cd6a802006-06-26 13:56:13 +0200227{
228 unsigned dummy;
229 unsigned line_size, lines_per_tag, assoc, size_in_kb;
230 union l1_cache l1i, l1d;
231 union l2_cache l2;
Andi Kleen67cddd92007-07-21 17:10:03 +0200232 union l3_cache l3;
233 union l1_cache *l1 = &l1d;
Andi Kleen240cd6a802006-06-26 13:56:13 +0200234
235 eax->full = 0;
236 ebx->full = 0;
237 ecx->full = 0;
238
239 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
Andi Kleen67cddd92007-07-21 17:10:03 +0200240 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
Andi Kleen240cd6a802006-06-26 13:56:13 +0200241
Andi Kleen67cddd92007-07-21 17:10:03 +0200242 switch (leaf) {
243 case 1:
244 l1 = &l1i;
245 case 0:
246 if (!l1->val)
247 return;
Andreas Herrmanna326e942009-09-03 09:41:19 +0200248 assoc = assocs[l1->assoc];
Andi Kleen240cd6a802006-06-26 13:56:13 +0200249 line_size = l1->line_size;
250 lines_per_tag = l1->lines_per_tag;
251 size_in_kb = l1->size_in_kb;
Andi Kleen67cddd92007-07-21 17:10:03 +0200252 break;
253 case 2:
254 if (!l2.val)
255 return;
Andreas Herrmanna326e942009-09-03 09:41:19 +0200256 assoc = assocs[l2.assoc];
Andi Kleen240cd6a802006-06-26 13:56:13 +0200257 line_size = l2.line_size;
258 lines_per_tag = l2.lines_per_tag;
259 /* cpu_data has errata corrections for K7 applied */
Tejun Heo7b543a52010-12-18 16:30:05 +0100260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
Andi Kleen67cddd92007-07-21 17:10:03 +0200261 break;
262 case 3:
263 if (!l3.val)
264 return;
Andreas Herrmanna326e942009-09-03 09:41:19 +0200265 assoc = assocs[l3.assoc];
Andi Kleen67cddd92007-07-21 17:10:03 +0200266 line_size = l3.line_size;
267 lines_per_tag = l3.lines_per_tag;
268 size_in_kb = l3.size_encoded * 512;
Andreas Herrmanna326e942009-09-03 09:41:19 +0200269 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
270 size_in_kb = size_in_kb >> 1;
271 assoc = assoc >> 1;
272 }
Andi Kleen67cddd92007-07-21 17:10:03 +0200273 break;
274 default:
275 return;
Andi Kleen240cd6a802006-06-26 13:56:13 +0200276 }
277
Andi Kleen67cddd92007-07-21 17:10:03 +0200278 eax->split.is_self_initializing = 1;
279 eax->split.type = types[leaf];
280 eax->split.level = levels[leaf];
Andreas Herrmanna326e942009-09-03 09:41:19 +0200281 eax->split.num_threads_sharing = 0;
Tejun Heo7b543a52010-12-18 16:30:05 +0100282 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
Andi Kleen67cddd92007-07-21 17:10:03 +0200283
284
Andreas Herrmanna326e942009-09-03 09:41:19 +0200285 if (assoc == 0xffff)
Andi Kleen240cd6a802006-06-26 13:56:13 +0200286 eax->split.is_fully_associative = 1;
287 ebx->split.coherency_line_size = line_size - 1;
Andreas Herrmanna326e942009-09-03 09:41:19 +0200288 ebx->split.ways_of_associativity = assoc - 1;
Andi Kleen240cd6a802006-06-26 13:56:13 +0200289 ebx->split.physical_line_partition = lines_per_tag - 1;
290 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
291 (ebx->split.ways_of_associativity + 1) - 1;
292}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
Borislav Petkovcb190602010-02-18 19:37:14 +0100294struct _cache_attr {
295 struct attribute attr;
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100296 ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
297 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
298 unsigned int);
Borislav Petkovcb190602010-02-18 19:37:14 +0100299};
300
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +0200301#ifdef CONFIG_AMD_NB
Borislav Petkovba06edb2010-04-22 16:07:01 +0200302
303/*
304 * L3 cache descriptors
305 */
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000306static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
Borislav Petkov048a8772010-01-22 16:01:07 +0100307{
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000308 struct amd_l3_cache *l3 = &nb->l3_cache;
Borislav Petkov048a8772010-01-22 16:01:07 +0100309 unsigned int sc0, sc1, sc2, sc3;
Borislav Petkovcb190602010-02-18 19:37:14 +0100310 u32 val = 0;
Borislav Petkov048a8772010-01-22 16:01:07 +0100311
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000312 pci_read_config_dword(nb->misc, 0x1C4, &val);
Borislav Petkov048a8772010-01-22 16:01:07 +0100313
314 /* calculate subcache sizes */
Borislav Petkov9350f982010-04-22 16:07:00 +0200315 l3->subcaches[0] = sc0 = !(val & BIT(0));
316 l3->subcaches[1] = sc1 = !(val & BIT(4));
Frank Arnold77e75fc2011-05-18 11:32:10 +0200317
318 if (boot_cpu_data.x86 == 0x15) {
319 l3->subcaches[0] = sc0 += !(val & BIT(1));
320 l3->subcaches[1] = sc1 += !(val & BIT(5));
321 }
322
Borislav Petkov9350f982010-04-22 16:07:00 +0200323 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
324 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
Borislav Petkov048a8772010-01-22 16:01:07 +0100325
Hagen Paul Pfeifer732eacc2010-10-26 14:22:23 -0700326 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
Borislav Petkovba06edb2010-04-22 16:07:01 +0200327}
328
Andreas Herrmann32c32332012-02-08 20:52:29 +0100329static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
Borislav Petkovba06edb2010-04-22 16:07:01 +0200330{
Borislav Petkovba06edb2010-04-22 16:07:01 +0200331 int node;
332
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +0200333 /* only for L3, and not in virtualized environments */
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000334 if (index < 3)
Frank Arnoldf2b20e42010-04-22 16:06:59 +0200335 return;
336
Borislav Petkovba06edb2010-04-22 16:07:01 +0200337 node = amd_get_nb_id(smp_processor_id());
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000338 this_leaf->nb = node_to_amd_nb(node);
339 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
340 amd_calc_l3_indices(this_leaf->nb);
Mark Langsdorf8cb22bc2008-07-18 16:03:52 -0500341}
342
Borislav Petkov8cc11762010-06-02 18:18:40 +0200343/*
344 * check whether a slot used for disabling an L3 index is occupied.
345 * @l3: L3 cache descriptor
346 * @slot: slot number (0..1)
347 *
348 * @returns: the disabled index if used or negative value if slot free.
349 */
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000350int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
Borislav Petkov8cc11762010-06-02 18:18:40 +0200351{
352 unsigned int reg = 0;
353
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000354 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, &reg);
Borislav Petkov8cc11762010-06-02 18:18:40 +0200355
356 /* check whether this slot is activated already */
357 if (reg & (3UL << 30))
358 return reg & 0xfff;
359
360 return -1;
361}
362
Borislav Petkovcb190602010-02-18 19:37:14 +0100363static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
Borislav Petkov59d3b382010-04-22 16:07:02 +0200364 unsigned int slot)
Borislav Petkovcb190602010-02-18 19:37:14 +0100365{
Borislav Petkov8cc11762010-06-02 18:18:40 +0200366 int index;
Borislav Petkovcb190602010-02-18 19:37:14 +0100367
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000368 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
Borislav Petkovcb190602010-02-18 19:37:14 +0100369 return -EINVAL;
370
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000371 index = amd_get_l3_disable_slot(this_leaf->base.nb, slot);
Borislav Petkov8cc11762010-06-02 18:18:40 +0200372 if (index >= 0)
373 return sprintf(buf, "%d\n", index);
Borislav Petkovcb190602010-02-18 19:37:14 +0100374
Borislav Petkov8cc11762010-06-02 18:18:40 +0200375 return sprintf(buf, "FREE\n");
Borislav Petkovcb190602010-02-18 19:37:14 +0100376}
377
Borislav Petkov59d3b382010-04-22 16:07:02 +0200378#define SHOW_CACHE_DISABLE(slot) \
Borislav Petkovcb190602010-02-18 19:37:14 +0100379static ssize_t \
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100380show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf, \
381 unsigned int cpu) \
Borislav Petkovcb190602010-02-18 19:37:14 +0100382{ \
Borislav Petkov59d3b382010-04-22 16:07:02 +0200383 return show_cache_disable(this_leaf, buf, slot); \
Borislav Petkovcb190602010-02-18 19:37:14 +0100384}
385SHOW_CACHE_DISABLE(0)
386SHOW_CACHE_DISABLE(1)
387
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000388static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
Borislav Petkov59d3b382010-04-22 16:07:02 +0200389 unsigned slot, unsigned long idx)
390{
391 int i;
392
393 idx |= BIT(30);
394
395 /*
396 * disable index in all 4 subcaches
397 */
398 for (i = 0; i < 4; i++) {
399 u32 reg = idx | (i << 20);
400
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000401 if (!nb->l3_cache.subcaches[i])
Borislav Petkov59d3b382010-04-22 16:07:02 +0200402 continue;
403
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000404 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
Borislav Petkov59d3b382010-04-22 16:07:02 +0200405
406 /*
407 * We need to WBINVD on a core on the node containing the L3
408 * cache which indices we disable therefore a simple wbinvd()
409 * is not sufficient.
410 */
411 wbinvd_on_cpu(cpu);
412
413 reg |= BIT(31);
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000414 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
Borislav Petkov59d3b382010-04-22 16:07:02 +0200415 }
416}
417
Borislav Petkov8cc11762010-06-02 18:18:40 +0200418/*
419 * disable a L3 cache index by using a disable-slot
420 *
421 * @l3: L3 cache descriptor
422 * @cpu: A CPU on the node containing the L3 cache
423 * @slot: slot number (0..1)
424 * @index: index to disable
425 *
426 * @return: 0 on success, error status on failure
427 */
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000428int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
Borislav Petkov8cc11762010-06-02 18:18:40 +0200429 unsigned long index)
Borislav Petkovcb190602010-02-18 19:37:14 +0100430{
Borislav Petkov8cc11762010-06-02 18:18:40 +0200431 int ret = 0;
Borislav Petkovcb190602010-02-18 19:37:14 +0100432
Frank Arnold42be4502011-05-16 15:39:47 +0200433 /* check if @slot is already used or the index is already disabled */
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000434 ret = amd_get_l3_disable_slot(nb, slot);
Borislav Petkov8cc11762010-06-02 18:18:40 +0200435 if (ret >= 0)
Srivatsa S. Bhata720b2d2012-04-19 12:35:08 +0200436 return -EEXIST;
Borislav Petkovcb190602010-02-18 19:37:14 +0100437
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000438 if (index > nb->l3_cache.indices)
Borislav Petkov8cc11762010-06-02 18:18:40 +0200439 return -EINVAL;
440
Frank Arnold42be4502011-05-16 15:39:47 +0200441 /* check whether the other slot has disabled the same index already */
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000442 if (index == amd_get_l3_disable_slot(nb, !slot))
Srivatsa S. Bhata720b2d2012-04-19 12:35:08 +0200443 return -EEXIST;
Borislav Petkov8cc11762010-06-02 18:18:40 +0200444
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000445 amd_l3_disable_index(nb, cpu, slot, index);
Borislav Petkov8cc11762010-06-02 18:18:40 +0200446
447 return 0;
448}
449
450static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
451 const char *buf, size_t count,
452 unsigned int slot)
453{
454 unsigned long val = 0;
455 int cpu, err = 0;
456
Borislav Petkovcb190602010-02-18 19:37:14 +0100457 if (!capable(CAP_SYS_ADMIN))
458 return -EPERM;
459
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000460 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
Borislav Petkovcb190602010-02-18 19:37:14 +0100461 return -EINVAL;
462
Borislav Petkov8cc11762010-06-02 18:18:40 +0200463 cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
464
Borislav Petkovcb190602010-02-18 19:37:14 +0100465 if (strict_strtoul(buf, 10, &val) < 0)
466 return -EINVAL;
467
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000468 err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
Borislav Petkov8cc11762010-06-02 18:18:40 +0200469 if (err) {
470 if (err == -EEXIST)
Srivatsa S. Bhata720b2d2012-04-19 12:35:08 +0200471 pr_warning("L3 slot %d in use/index already disabled!\n",
472 slot);
Borislav Petkov8cc11762010-06-02 18:18:40 +0200473 return err;
474 }
Borislav Petkovcb190602010-02-18 19:37:14 +0100475 return count;
476}
477
Borislav Petkov59d3b382010-04-22 16:07:02 +0200478#define STORE_CACHE_DISABLE(slot) \
Borislav Petkovcb190602010-02-18 19:37:14 +0100479static ssize_t \
Borislav Petkov59d3b382010-04-22 16:07:02 +0200480store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100481 const char *buf, size_t count, \
482 unsigned int cpu) \
Borislav Petkovcb190602010-02-18 19:37:14 +0100483{ \
Borislav Petkov59d3b382010-04-22 16:07:02 +0200484 return store_cache_disable(this_leaf, buf, count, slot); \
Borislav Petkovcb190602010-02-18 19:37:14 +0100485}
486STORE_CACHE_DISABLE(0)
487STORE_CACHE_DISABLE(1)
488
489static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
490 show_cache_disable_0, store_cache_disable_0);
491static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
492 show_cache_disable_1, store_cache_disable_1);
493
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100494static ssize_t
495show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
496{
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000497 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100498 return -EINVAL;
499
500 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
501}
502
503static ssize_t
504store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
505 unsigned int cpu)
506{
507 unsigned long val;
508
509 if (!capable(CAP_SYS_ADMIN))
510 return -EPERM;
511
Thomas Gleixnerd2946042011-07-24 09:46:09 +0000512 if (!this_leaf->base.nb || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100513 return -EINVAL;
514
515 if (strict_strtoul(buf, 16, &val) < 0)
516 return -EINVAL;
517
518 if (amd_set_subcaches(cpu, val))
519 return -EINVAL;
520
521 return count;
522}
523
524static struct _cache_attr subcaches =
525 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
526
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +0200527#else /* CONFIG_AMD_NB */
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +0200528#define amd_init_l3_cache(x, y)
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +0200529#endif /* CONFIG_AMD_NB */
Borislav Petkovcb190602010-02-18 19:37:14 +0100530
Ingo Molnar7a4983b2008-07-21 13:34:21 +0200531static int
Mike Travisf9b90562009-01-10 21:58:10 -0800532__cpuinit cpuid4_cache_lookup_regs(int index,
533 struct _cpuid4_info_regs *this_leaf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534{
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100535 union _cpuid4_leaf_eax eax;
536 union _cpuid4_leaf_ebx ebx;
537 union _cpuid4_leaf_ecx ecx;
Andi Kleen240cd6a802006-06-26 13:56:13 +0200538 unsigned edx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
Mark Langsdorf8cb22bc2008-07-18 16:03:52 -0500540 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
Andi Kleen240cd6a802006-06-26 13:56:13 +0200541 amd_cpuid4(index, &eax, &ebx, &ecx);
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +0200542 amd_init_l3_cache(this_leaf, index);
Ingo Molnar7a4983b2008-07-21 13:34:21 +0200543 } else {
544 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
545 }
546
Andi Kleen240cd6a802006-06-26 13:56:13 +0200547 if (eax.split.type == CACHE_TYPE_NULL)
Andi Kleene2cac782005-07-28 21:15:46 -0700548 return -EIO; /* better error ? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
Andi Kleen240cd6a802006-06-26 13:56:13 +0200550 this_leaf->eax = eax;
551 this_leaf->ebx = ebx;
552 this_leaf->ecx = ecx;
Ingo Molnar7a4983b2008-07-21 13:34:21 +0200553 this_leaf->size = (ecx.split.number_of_sets + 1) *
554 (ebx.split.coherency_line_size + 1) *
555 (ebx.split.physical_line_partition + 1) *
556 (ebx.split.ways_of_associativity + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 return 0;
558}
559
Adrian Bunk61d488d2007-07-21 04:37:39 -0700560static int __cpuinit find_num_cache_leaves(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561{
562 unsigned int eax, ebx, ecx, edx;
563 union _cpuid4_leaf_eax cache_eax;
Siddha, Suresh Bd16aafff2005-10-30 14:59:30 -0800564 int i = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
Siddha, Suresh Bd16aafff2005-10-30 14:59:30 -0800566 do {
567 ++i;
568 /* Do cpuid(4) loop to find out num_cache_leaves */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
570 cache_eax.full = eax;
Siddha, Suresh Bd16aafff2005-10-30 14:59:30 -0800571 } while (cache_eax.split.type != CACHE_TYPE_NULL);
572 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573}
574
Ashok Raj1aa1a9f2005-10-30 14:59:50 -0800575unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576{
Alan Cox8bdbd962009-07-04 00:35:45 +0100577 /* Cache sizes */
578 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
580 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -0800581 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
James Bottomley96c52742006-06-27 02:53:49 -0700582#ifdef CONFIG_X86_HT
Mike Travis92cb7612007-10-19 20:35:04 +0200583 unsigned int cpu = c->cpu_index;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -0800584#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
Shaohua Lif2d0d262006-03-23 02:59:52 -0800586 if (c->cpuid_level > 3) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 static int is_initialized;
588
589 if (is_initialized == 0) {
590 /* Init num_cache_leaves from boot CPU */
591 num_cache_leaves = find_num_cache_leaves();
592 is_initialized++;
593 }
594
595 /*
596 * Whenever possible use cpuid(4), deterministic cache
597 * parameters cpuid leaf to find the cache details
598 */
599 for (i = 0; i < num_cache_leaves; i++) {
Mike Travisf9b90562009-01-10 21:58:10 -0800600 struct _cpuid4_info_regs this_leaf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 int retval;
602
Mike Travisf9b90562009-01-10 21:58:10 -0800603 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 if (retval >= 0) {
Alan Cox8bdbd962009-07-04 00:35:45 +0100605 switch (this_leaf.eax.split.level) {
606 case 1:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 if (this_leaf.eax.split.type ==
608 CACHE_TYPE_DATA)
609 new_l1d = this_leaf.size/1024;
610 else if (this_leaf.eax.split.type ==
611 CACHE_TYPE_INST)
612 new_l1i = this_leaf.size/1024;
613 break;
Alan Cox8bdbd962009-07-04 00:35:45 +0100614 case 2:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 new_l2 = this_leaf.size/1024;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -0800616 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
617 index_msb = get_count_order(num_threads_sharing);
618 l2_id = c->apicid >> index_msb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 break;
Alan Cox8bdbd962009-07-04 00:35:45 +0100620 case 3:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 new_l3 = this_leaf.size/1024;
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -0800622 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
Alan Cox8bdbd962009-07-04 00:35:45 +0100623 index_msb = get_count_order(
624 num_threads_sharing);
Siddha, Suresh B1e9f28f2006-03-27 01:15:22 -0800625 l3_id = c->apicid >> index_msb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 break;
Alan Cox8bdbd962009-07-04 00:35:45 +0100627 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 break;
629 }
630 }
631 }
632 }
Shaohua Lib06be9122006-03-27 01:15:24 -0800633 /*
634 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
635 * trace cache
636 */
637 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 /* supports eax=2 call */
Harvey Harrisonc1666e62008-01-31 22:05:43 +0100639 int j, n;
640 unsigned int regs[4];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 unsigned char *dp = (unsigned char *)regs;
Shaohua Lib06be9122006-03-27 01:15:24 -0800642 int only_trace = 0;
643
644 if (num_cache_leaves != 0 && c->x86 == 15)
645 only_trace = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
647 /* Number of times to iterate */
648 n = cpuid_eax(2) & 0xFF;
649
Alan Cox8bdbd962009-07-04 00:35:45 +0100650 for (i = 0 ; i < n ; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
652
653 /* If bit 31 is set, this is an unknown format */
Alan Cox8bdbd962009-07-04 00:35:45 +0100654 for (j = 0 ; j < 3 ; j++)
655 if (regs[j] & (1 << 31))
656 regs[j] = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
658 /* Byte 0 is level count, not a descriptor */
Alan Cox8bdbd962009-07-04 00:35:45 +0100659 for (j = 1 ; j < 16 ; j++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 unsigned char des = dp[j];
661 unsigned char k = 0;
662
663 /* look up this descriptor in the table */
Alan Cox8bdbd962009-07-04 00:35:45 +0100664 while (cache_table[k].descriptor != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 if (cache_table[k].descriptor == des) {
Shaohua Lib06be9122006-03-27 01:15:24 -0800666 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
667 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 switch (cache_table[k].cache_type) {
669 case LVL_1_INST:
670 l1i += cache_table[k].size;
671 break;
672 case LVL_1_DATA:
673 l1d += cache_table[k].size;
674 break;
675 case LVL_2:
676 l2 += cache_table[k].size;
677 break;
678 case LVL_3:
679 l3 += cache_table[k].size;
680 break;
681 case LVL_TRACE:
682 trace += cache_table[k].size;
683 break;
684 }
685
686 break;
687 }
688
689 k++;
690 }
691 }
692 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 }
694
Shaohua Lib06be9122006-03-27 01:15:24 -0800695 if (new_l1d)
696 l1d = new_l1d;
697
698 if (new_l1i)
699 l1i = new_l1i;
700
701 if (new_l2) {
702 l2 = new_l2;
James Bottomley96c52742006-06-27 02:53:49 -0700703#ifdef CONFIG_X86_HT
Mike Travisb6278472007-10-19 20:35:03 +0200704 per_cpu(cpu_llc_id, cpu) = l2_id;
Shaohua Lib06be9122006-03-27 01:15:24 -0800705#endif
706 }
707
708 if (new_l3) {
709 l3 = new_l3;
James Bottomley96c52742006-06-27 02:53:49 -0700710#ifdef CONFIG_X86_HT
Mike Travisb6278472007-10-19 20:35:03 +0200711 per_cpu(cpu_llc_id, cpu) = l3_id;
Shaohua Lib06be9122006-03-27 01:15:24 -0800712#endif
713 }
714
Shaohua Lib06be9122006-03-27 01:15:24 -0800715 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
716
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 return l2;
718}
719
Ingo Molnarba1d7552008-10-18 21:24:45 +0200720#ifdef CONFIG_SYSFS
721
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722/* pointer to _cpuid4_info array (for each cache leaf) */
Tejun Heo0fe1e002009-10-29 22:34:14 +0900723static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
724#define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
726#ifdef CONFIG_SMP
Andreas Herrmann32c32332012-02-08 20:52:29 +0100727
728static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729{
Andreas Herrmann32c32332012-02-08 20:52:29 +0100730 struct _cpuid4_info *this_leaf;
731 int ret, i, sibling;
Mike Travis92cb7612007-10-19 20:35:04 +0200732 struct cpuinfo_x86 *c = &cpu_data(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
Andreas Herrmann32c32332012-02-08 20:52:29 +0100734 ret = 0;
735 if (index == 3) {
736 ret = 1;
Yinghai Lub3d73362011-01-21 15:29:44 -0800737 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
Tejun Heo0fe1e002009-10-29 22:34:14 +0900738 if (!per_cpu(ici_cpuid4_info, i))
Andreas Herrmanna326e942009-09-03 09:41:19 +0200739 continue;
Andreas Herrmanna326e942009-09-03 09:41:19 +0200740 this_leaf = CPUID4_INFO_IDX(i, index);
Yinghai Lub3d73362011-01-21 15:29:44 -0800741 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
Prarit Bhargavaebb682f2009-12-09 13:36:45 -0500742 if (!cpu_online(sibling))
743 continue;
744 set_bit(sibling, this_leaf->shared_cpu_map);
745 }
Andreas Herrmanna326e942009-09-03 09:41:19 +0200746 }
Andreas Herrmann32c32332012-02-08 20:52:29 +0100747 } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) {
748 ret = 1;
749 for_each_cpu(i, cpu_sibling_mask(cpu)) {
750 if (!per_cpu(ici_cpuid4_info, i))
751 continue;
752 this_leaf = CPUID4_INFO_IDX(i, index);
753 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
754 if (!cpu_online(sibling))
755 continue;
756 set_bit(sibling, this_leaf->shared_cpu_map);
757 }
758 }
Andreas Herrmanna326e942009-09-03 09:41:19 +0200759 }
Andreas Herrmann32c32332012-02-08 20:52:29 +0100760
761 return ret;
762}
763
764static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
765{
766 struct _cpuid4_info *this_leaf, *sibling_leaf;
767 unsigned long num_threads_sharing;
768 int index_msb, i;
769 struct cpuinfo_x86 *c = &cpu_data(cpu);
770
771 if (c->x86_vendor == X86_VENDOR_AMD) {
772 if (cache_shared_amd_cpu_map_setup(cpu, index))
773 return;
774 }
775
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 this_leaf = CPUID4_INFO_IDX(cpu, index);
Thomas Gleixnerb7d11a72011-07-24 09:46:08 +0000777 num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
779 if (num_threads_sharing == 1)
Mike Travisf9b90562009-01-10 21:58:10 -0800780 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
Siddha, Suresh B2b091872005-11-05 17:25:54 +0100781 else {
782 index_msb = get_count_order(num_threads_sharing);
783
784 for_each_online_cpu(i) {
Mike Travis92cb7612007-10-19 20:35:04 +0200785 if (cpu_data(i).apicid >> index_msb ==
786 c->apicid >> index_msb) {
Mike Travisf9b90562009-01-10 21:58:10 -0800787 cpumask_set_cpu(i,
788 to_cpumask(this_leaf->shared_cpu_map));
Tejun Heo0fe1e002009-10-29 22:34:14 +0900789 if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
Mike Travisf9b90562009-01-10 21:58:10 -0800790 sibling_leaf =
791 CPUID4_INFO_IDX(i, index);
792 cpumask_set_cpu(cpu, to_cpumask(
793 sibling_leaf->shared_cpu_map));
Siddha, Suresh B2b091872005-11-05 17:25:54 +0100794 }
795 }
796 }
797 }
798}
Chuck Ebbert3bc9b762006-03-23 02:59:33 -0800799static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
Siddha, Suresh B2b091872005-11-05 17:25:54 +0100800{
801 struct _cpuid4_info *this_leaf, *sibling_leaf;
802 int sibling;
803
804 this_leaf = CPUID4_INFO_IDX(cpu, index);
Mike Travisf9b90562009-01-10 21:58:10 -0800805 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
Ingo Molnarcdcf7722008-07-28 16:20:08 +0200806 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
Mike Travisf9b90562009-01-10 21:58:10 -0800807 cpumask_clear_cpu(cpu,
808 to_cpumask(sibling_leaf->shared_cpu_map));
Siddha, Suresh B2b091872005-11-05 17:25:54 +0100809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810}
811#else
Alan Cox8bdbd962009-07-04 00:35:45 +0100812static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
813{
814}
815
816static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
817{
818}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819#endif
820
Adrian Bunkf22d9bc2007-12-04 17:19:07 +0100821static void __cpuinit free_cache_attributes(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822{
Akinobu Mitaef1d7152007-10-18 03:05:16 -0700823 int i;
824
825 for (i = 0; i < num_cache_leaves; i++)
826 cache_remove_shared_cpu_map(cpu, i);
827
Tejun Heo0fe1e002009-10-29 22:34:14 +0900828 kfree(per_cpu(ici_cpuid4_info, cpu));
829 per_cpu(ici_cpuid4_info, cpu) = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830}
831
Sergio Luis60928482008-12-28 04:12:26 -0300832static void __cpuinit get_cpu_leaves(void *_retval)
Mike Travisb2bb8552008-12-16 17:34:03 -0800833{
834 int j, *retval = _retval, cpu = smp_processor_id();
835
836 /* Do cpuid and store the results */
837 for (j = 0; j < num_cache_leaves; j++) {
Thomas Gleixnerb7d11a72011-07-24 09:46:08 +0000838 struct _cpuid4_info *this_leaf = CPUID4_INFO_IDX(cpu, j);
839
840 *retval = cpuid4_cache_lookup_regs(j, &this_leaf->base);
Mike Travisb2bb8552008-12-16 17:34:03 -0800841 if (unlikely(*retval < 0)) {
842 int i;
843
844 for (i = 0; i < j; i++)
845 cache_remove_shared_cpu_map(cpu, i);
846 break;
847 }
848 cache_shared_cpu_map_setup(cpu, j);
849 }
850}
851
Ashok Raj1aa1a9f2005-10-30 14:59:50 -0800852static int __cpuinit detect_cache_attributes(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853{
Akinobu Mitaef1d7152007-10-18 03:05:16 -0700854 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
856 if (num_cache_leaves == 0)
857 return -ENOENT;
858
Tejun Heo0fe1e002009-10-29 22:34:14 +0900859 per_cpu(ici_cpuid4_info, cpu) = kzalloc(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900861 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
Mike Travisb2bb8552008-12-16 17:34:03 -0800864 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
Akinobu Mitaef1d7152007-10-18 03:05:16 -0700865 if (retval) {
Tejun Heo0fe1e002009-10-29 22:34:14 +0900866 kfree(per_cpu(ici_cpuid4_info, cpu));
867 per_cpu(ici_cpuid4_info, cpu) = NULL;
Akinobu Mitaef1d7152007-10-18 03:05:16 -0700868 }
869
Andi Kleene2cac782005-07-28 21:15:46 -0700870 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871}
872
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873#include <linux/kobject.h>
874#include <linux/sysfs.h>
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800875#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876
877/* pointer to kobject for cpuX/cache */
Tejun Heo0fe1e002009-10-29 22:34:14 +0900878static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
880struct _index_kobject {
881 struct kobject kobj;
882 unsigned int cpu;
883 unsigned short index;
884};
885
886/* pointer to array of kobjects for cpuX/cache/indexY */
Tejun Heo0fe1e002009-10-29 22:34:14 +0900887static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
888#define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
890#define show_one_plus(file_name, object, val) \
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100891static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
892 unsigned int cpu) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893{ \
Alan Cox8bdbd962009-07-04 00:35:45 +0100894 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895}
896
Thomas Gleixnerb7d11a72011-07-24 09:46:08 +0000897show_one_plus(level, base.eax.split.level, 0);
898show_one_plus(coherency_line_size, base.ebx.split.coherency_line_size, 1);
899show_one_plus(physical_line_partition, base.ebx.split.physical_line_partition, 1);
900show_one_plus(ways_of_associativity, base.ebx.split.ways_of_associativity, 1);
901show_one_plus(number_of_sets, base.ecx.split.number_of_sets, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100903static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
904 unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905{
Thomas Gleixnerb7d11a72011-07-24 09:46:08 +0000906 return sprintf(buf, "%luK\n", this_leaf->base.size / 1024);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907}
908
Mike Travisfb0f3302008-04-08 11:43:02 -0700909static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
910 int type, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911{
Mike Travisfb0f3302008-04-08 11:43:02 -0700912 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
Mike Travis6b6309b2008-03-25 15:06:56 -0700913 int n = 0;
Mike Travis6b6309b2008-03-25 15:06:56 -0700914
Mike Travisfb0f3302008-04-08 11:43:02 -0700915 if (len > 1) {
Mike Travisf9b90562009-01-10 21:58:10 -0800916 const struct cpumask *mask;
Mike Travisfb0f3302008-04-08 11:43:02 -0700917
Mike Travisf9b90562009-01-10 21:58:10 -0800918 mask = to_cpumask(this_leaf->shared_cpu_map);
Alan Cox8bdbd962009-07-04 00:35:45 +0100919 n = type ?
Rusty Russell29c01772008-12-13 21:20:25 +1030920 cpulist_scnprintf(buf, len-2, mask) :
921 cpumask_scnprintf(buf, len-2, mask);
Mike Travisfb0f3302008-04-08 11:43:02 -0700922 buf[n++] = '\n';
923 buf[n] = '\0';
Mike Travis6b6309b2008-03-25 15:06:56 -0700924 }
925 return n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926}
927
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100928static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
929 unsigned int cpu)
Mike Travisfb0f3302008-04-08 11:43:02 -0700930{
931 return show_shared_cpu_map_func(leaf, 0, buf);
932}
933
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100934static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
935 unsigned int cpu)
Mike Travisfb0f3302008-04-08 11:43:02 -0700936{
937 return show_shared_cpu_map_func(leaf, 1, buf);
938}
939
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100940static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
941 unsigned int cpu)
Jiri Slaby4385cec2008-11-29 22:33:16 +0100942{
Thomas Gleixnerb7d11a72011-07-24 09:46:08 +0000943 switch (this_leaf->base.eax.split.type) {
Jiri Slaby4385cec2008-11-29 22:33:16 +0100944 case CACHE_TYPE_DATA:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 return sprintf(buf, "Data\n");
Jiri Slaby4385cec2008-11-29 22:33:16 +0100946 case CACHE_TYPE_INST:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 return sprintf(buf, "Instruction\n");
Jiri Slaby4385cec2008-11-29 22:33:16 +0100948 case CACHE_TYPE_UNIFIED:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 return sprintf(buf, "Unified\n");
Jiri Slaby4385cec2008-11-29 22:33:16 +0100950 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 return sprintf(buf, "Unknown\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 }
953}
954
Ingo Molnar7a4983b2008-07-21 13:34:21 +0200955#define to_object(k) container_of(k, struct _index_kobject, kobj)
956#define to_attr(a) container_of(a, struct _cache_attr, attr)
Mark Langsdorf8cb22bc2008-07-18 16:03:52 -0500957
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958#define define_one_ro(_name) \
959static struct _cache_attr _name = \
960 __ATTR(_name, 0444, show_##_name, NULL)
961
962define_one_ro(level);
963define_one_ro(type);
964define_one_ro(coherency_line_size);
965define_one_ro(physical_line_partition);
966define_one_ro(ways_of_associativity);
967define_one_ro(number_of_sets);
968define_one_ro(size);
969define_one_ro(shared_cpu_map);
Mike Travisfb0f3302008-04-08 11:43:02 -0700970define_one_ro(shared_cpu_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
Alan Cox8bdbd962009-07-04 00:35:45 +0100972static struct attribute *default_attrs[] = {
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +0200973 &type.attr,
974 &level.attr,
975 &coherency_line_size.attr,
976 &physical_line_partition.attr,
977 &ways_of_associativity.attr,
978 &number_of_sets.attr,
979 &size.attr,
980 &shared_cpu_map.attr,
981 &shared_cpu_list.attr,
Borislav Petkov897de502010-01-22 16:01:06 +0100982 NULL
983};
984
Andreas Herrmann23ac4ae2010-09-17 18:03:43 +0200985#ifdef CONFIG_AMD_NB
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +0200986static struct attribute ** __cpuinit amd_l3_attrs(void)
987{
988 static struct attribute **attrs;
989 int n;
990
991 if (attrs)
992 return attrs;
993
994 n = sizeof (default_attrs) / sizeof (struct attribute *);
995
996 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
997 n += 2;
998
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +0100999 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1000 n += 1;
1001
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +02001002 attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
1003 if (attrs == NULL)
1004 return attrs = default_attrs;
1005
1006 for (n = 0; default_attrs[n]; n++)
1007 attrs[n] = default_attrs[n];
1008
1009 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
1010 attrs[n++] = &cache_disable_0.attr;
1011 attrs[n++] = &cache_disable_1.attr;
1012 }
1013
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +01001014 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
1015 attrs[n++] = &subcaches.attr;
1016
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +02001017 return attrs;
1018}
Borislav Petkovcb190602010-02-18 19:37:14 +01001019#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
Alan Cox8bdbd962009-07-04 00:35:45 +01001021static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022{
1023 struct _cache_attr *fattr = to_attr(attr);
1024 struct _index_kobject *this_leaf = to_object(kobj);
1025 ssize_t ret;
1026
1027 ret = fattr->show ?
1028 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +01001029 buf, this_leaf->cpu) :
Ingo Molnarcdcf7722008-07-28 16:20:08 +02001030 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 return ret;
1032}
1033
Alan Cox8bdbd962009-07-04 00:35:45 +01001034static ssize_t store(struct kobject *kobj, struct attribute *attr,
1035 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036{
Mark Langsdorf8cb22bc2008-07-18 16:03:52 -05001037 struct _cache_attr *fattr = to_attr(attr);
1038 struct _index_kobject *this_leaf = to_object(kobj);
1039 ssize_t ret;
1040
Ingo Molnarcdcf7722008-07-28 16:20:08 +02001041 ret = fattr->store ?
1042 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
Hans Rosenfeldcabb5bd2011-02-07 18:10:39 +01001043 buf, count, this_leaf->cpu) :
Mark Langsdorf8cb22bc2008-07-18 16:03:52 -05001044 0;
1045 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046}
1047
Emese Revfy52cf25d2010-01-19 02:58:23 +01001048static const struct sysfs_ops sysfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 .show = show,
1050 .store = store,
1051};
1052
1053static struct kobj_type ktype_cache = {
1054 .sysfs_ops = &sysfs_ops,
1055 .default_attrs = default_attrs,
1056};
1057
1058static struct kobj_type ktype_percpu_entry = {
1059 .sysfs_ops = &sysfs_ops,
1060};
1061
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001062static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063{
Tejun Heo0fe1e002009-10-29 22:34:14 +09001064 kfree(per_cpu(ici_cache_kobject, cpu));
1065 kfree(per_cpu(ici_index_kobject, cpu));
1066 per_cpu(ici_cache_kobject, cpu) = NULL;
1067 per_cpu(ici_index_kobject, cpu) = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 free_cache_attributes(cpu);
1069}
1070
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001071static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072{
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001073 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074
1075 if (num_cache_leaves == 0)
1076 return -ENOENT;
1077
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001078 err = detect_cache_attributes(cpu);
1079 if (err)
1080 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
1082 /* Allocate all required memory */
Tejun Heo0fe1e002009-10-29 22:34:14 +09001083 per_cpu(ici_cache_kobject, cpu) =
Mike Travis6b6309b2008-03-25 15:06:56 -07001084 kzalloc(sizeof(struct kobject), GFP_KERNEL);
Tejun Heo0fe1e002009-10-29 22:34:14 +09001085 if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
Tejun Heo0fe1e002009-10-29 22:34:14 +09001088 per_cpu(ici_index_kobject, cpu) = kzalloc(
Alan Cox8bdbd962009-07-04 00:35:45 +01001089 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
Tejun Heo0fe1e002009-10-29 22:34:14 +09001090 if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092
1093 return 0;
1094
1095err_out:
1096 cpuid4_cache_sysfs_exit(cpu);
1097 return -ENOMEM;
1098}
1099
Mike Travisf9b90562009-01-10 21:58:10 -08001100static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001101
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102/* Add/Remove cache interface for CPU device */
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001103static int __cpuinit cache_add_dev(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104{
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001105 unsigned int cpu = dev->id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 unsigned long i, j;
1107 struct _index_kobject *this_object;
Borislav Petkov897de502010-01-22 16:01:06 +01001108 struct _cpuid4_info *this_leaf;
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001109 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
1111 retval = cpuid4_cache_sysfs_init(cpu);
1112 if (unlikely(retval < 0))
1113 return retval;
1114
Tejun Heo0fe1e002009-10-29 22:34:14 +09001115 retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
Mike Travis6b6309b2008-03-25 15:06:56 -07001116 &ktype_percpu_entry,
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001117 &dev->kobj, "%s", "cache");
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001118 if (retval < 0) {
1119 cpuid4_cache_sysfs_exit(cpu);
1120 return retval;
1121 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
1123 for (i = 0; i < num_cache_leaves; i++) {
Alan Cox8bdbd962009-07-04 00:35:45 +01001124 this_object = INDEX_KOBJECT_PTR(cpu, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 this_object->cpu = cpu;
1126 this_object->index = i;
Borislav Petkov897de502010-01-22 16:01:06 +01001127
1128 this_leaf = CPUID4_INFO_IDX(cpu, i);
1129
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +02001130 ktype_cache.default_attrs = default_attrs;
1131#ifdef CONFIG_AMD_NB
Thomas Gleixnerd2946042011-07-24 09:46:09 +00001132 if (this_leaf->base.nb)
Hans Rosenfeldf658bcf2010-10-29 17:14:32 +02001133 ktype_cache.default_attrs = amd_l3_attrs();
1134#endif
Greg Kroah-Hartman5b3f3552007-12-17 15:54:39 -04001135 retval = kobject_init_and_add(&(this_object->kobj),
Mike Travis6b6309b2008-03-25 15:06:56 -07001136 &ktype_cache,
Tejun Heo0fe1e002009-10-29 22:34:14 +09001137 per_cpu(ici_cache_kobject, cpu),
Greg Kroah-Hartman5b3f3552007-12-17 15:54:39 -04001138 "index%1lu", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 if (unlikely(retval)) {
Alan Cox8bdbd962009-07-04 00:35:45 +01001140 for (j = 0; j < i; j++)
1141 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
Tejun Heo0fe1e002009-10-29 22:34:14 +09001142 kobject_put(per_cpu(ici_cache_kobject, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 cpuid4_cache_sysfs_exit(cpu);
Akinobu Mita8b2b9c12008-07-15 17:09:03 +09001144 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 }
Greg Kroah-Hartman5b3f3552007-12-17 15:54:39 -04001146 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 }
Mike Travisf9b90562009-01-10 21:58:10 -08001148 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001149
Tejun Heo0fe1e002009-10-29 22:34:14 +09001150 kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
Akinobu Mita8b2b9c12008-07-15 17:09:03 +09001151 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152}
1153
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001154static void __cpuinit cache_remove_dev(struct device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155{
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001156 unsigned int cpu = dev->id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 unsigned long i;
1158
Tejun Heo0fe1e002009-10-29 22:34:14 +09001159 if (per_cpu(ici_cpuid4_info, cpu) == NULL)
Andi Kleen2966c6a2007-09-11 14:02:11 +02001160 return;
Mike Travisf9b90562009-01-10 21:58:10 -08001161 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001162 return;
Mike Travisf9b90562009-01-10 21:58:10 -08001163 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001164
1165 for (i = 0; i < num_cache_leaves; i++)
Alan Cox8bdbd962009-07-04 00:35:45 +01001166 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
Tejun Heo0fe1e002009-10-29 22:34:14 +09001167 kobject_put(per_cpu(ici_cache_kobject, cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 cpuid4_cache_sysfs_exit(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169}
1170
Chandra Seetharaman9c7b2162006-06-27 02:54:07 -07001171static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001172 unsigned long action, void *hcpu)
1173{
1174 unsigned int cpu = (unsigned long)hcpu;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001175 struct device *dev;
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001176
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001177 dev = get_cpu_device(cpu);
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001178 switch (action) {
1179 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001180 case CPU_ONLINE_FROZEN:
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001181 cache_add_dev(dev);
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001182 break;
1183 case CPU_DEAD:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07001184 case CPU_DEAD_FROZEN:
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001185 cache_remove_dev(dev);
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001186 break;
1187 }
1188 return NOTIFY_OK;
1189}
1190
Alan Cox8bdbd962009-07-04 00:35:45 +01001191static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001192 .notifier_call = cacheinfo_cpu_callback,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193};
1194
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001195static int __cpuinit cache_sysfs_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196{
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001197 int i;
1198
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 if (num_cache_leaves == 0)
1200 return 0;
1201
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001202 for_each_online_cpu(i) {
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001203 int err;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001204 struct device *dev = get_cpu_device(i);
Satyam Sharmac789c032007-10-17 18:04:40 +02001205
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001206 err = cache_add_dev(dev);
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001207 if (err)
1208 return err;
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001209 }
Akinobu Mitaef1d7152007-10-18 03:05:16 -07001210 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001211 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212}
1213
Ashok Raj1aa1a9f2005-10-30 14:59:50 -08001214device_initcall(cache_sysfs_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
1216#endif