blob: 72c0f6097402bfae75d7d658efa4dc6544994c78 [file] [log] [blame]
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -07001/*
2 * Handle caching attributes in page tables (PAT)
3 *
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
6 *
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
8 */
9
10#include <linux/mm.h>
11#include <linux/kernel.h>
12#include <linux/gfp.h>
13#include <linux/fs.h>
14
15#include <asm/msr.h>
16#include <asm/tlbflush.h>
17#include <asm/processor.h>
18#include <asm/pgtable.h>
19#include <asm/pat.h>
20#include <asm/e820.h>
21#include <asm/cacheflush.h>
22#include <asm/fcntl.h>
23#include <asm/mtrr.h>
24
25int pat_wc_enabled = 1;
26
27static u64 __read_mostly boot_pat_state;
28
29static int nopat(char *str)
30{
31 pat_wc_enabled = 0;
32 printk(KERN_INFO "x86: PAT support disabled.\n");
33
34 return 0;
35}
36early_param("nopat", nopat);
37
38static int pat_known_cpu(void)
39{
40 if (!pat_wc_enabled)
41 return 0;
42
Yinghai Lu9307cac2008-03-24 23:24:34 -070043 if (cpu_has_pat)
44 return 1;
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -070045
46 pat_wc_enabled = 0;
47 printk(KERN_INFO "CPU and/or kernel does not support PAT.\n");
48 return 0;
49}
50
51enum {
52 PAT_UC = 0, /* uncached */
53 PAT_WC = 1, /* Write combining */
54 PAT_WT = 4, /* Write Through */
55 PAT_WP = 5, /* Write Protected */
56 PAT_WB = 6, /* Write Back (default) */
57 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
58};
59
60#define PAT(x,y) ((u64)PAT_ ## y << ((x)*8))
61
62void pat_init(void)
63{
64 u64 pat;
65
66#ifndef CONFIG_X86_PAT
67 nopat(NULL);
68#endif
69
70 /* Boot CPU enables PAT based on CPU feature */
71 if (!smp_processor_id() && !pat_known_cpu())
72 return;
73
74 /* APs enable PAT iff boot CPU has enabled it before */
75 if (smp_processor_id() && !pat_wc_enabled)
76 return;
77
78 /* Set PWT to Write-Combining. All other bits stay the same */
79 /*
80 * PTE encoding used in Linux:
81 * PAT
82 * |PCD
83 * ||PWT
84 * |||
85 * 000 WB _PAGE_CACHE_WB
86 * 001 WC _PAGE_CACHE_WC
87 * 010 UC- _PAGE_CACHE_UC_MINUS
88 * 011 UC _PAGE_CACHE_UC
89 * PAT bit unused
90 */
91 pat = PAT(0,WB) | PAT(1,WC) | PAT(2,UC_MINUS) | PAT(3,UC) |
92 PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC);
93
94 /* Boot CPU check */
95 if (!smp_processor_id()) {
96 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
97 }
98
99 wrmsrl(MSR_IA32_CR_PAT, pat);
100 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
101 smp_processor_id(), boot_pat_state, pat);
102}
103
104#undef PAT
105
106static char *cattr_name(unsigned long flags)
107{
108 switch (flags & _PAGE_CACHE_MASK) {
109 case _PAGE_CACHE_UC: return "uncached";
110 case _PAGE_CACHE_UC_MINUS: return "uncached-minus";
111 case _PAGE_CACHE_WB: return "write-back";
112 case _PAGE_CACHE_WC: return "write-combining";
113 default: return "broken";
114 }
115}
116
117/*
118 * The global memtype list keeps track of memory type for specific
119 * physical memory areas. Conflicting memory types in different
120 * mappings can cause CPU cache corruption. To avoid this we keep track.
121 *
122 * The list is sorted based on starting address and can contain multiple
123 * entries for each address (this allows reference counting for overlapping
124 * areas). All the aliases have the same cache attributes of course.
125 * Zero attributes are represented as holes.
126 *
127 * Currently the data structure is a list because the number of mappings
128 * are expected to be relatively small. If this should be a problem
129 * it could be changed to a rbtree or similar.
130 *
131 * memtype_lock protects the whole list.
132 */
133
134struct memtype {
135 u64 start;
136 u64 end;
137 unsigned long type;
138 struct list_head nd;
139};
140
141static LIST_HEAD(memtype_list);
142static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
143
144/*
145 * Does intersection of PAT memory type and MTRR memory type and returns
146 * the resulting memory type as PAT understands it.
147 * (Type in pat and mtrr will not have same value)
148 * The intersection is based on "Effective Memory Type" tables in IA-32
149 * SDM vol 3a
150 */
151static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot,
152 unsigned long *ret_prot)
153{
154 unsigned long pat_type;
155 u8 mtrr_type;
156
157 mtrr_type = mtrr_type_lookup(start, end);
158 if (mtrr_type == 0xFF) { /* MTRR not enabled */
159 *ret_prot = prot;
160 return 0;
161 }
162 if (mtrr_type == 0xFE) { /* MTRR match error */
163 *ret_prot = _PAGE_CACHE_UC;
164 return -1;
165 }
166 if (mtrr_type != MTRR_TYPE_UNCACHABLE &&
167 mtrr_type != MTRR_TYPE_WRBACK &&
168 mtrr_type != MTRR_TYPE_WRCOMB) { /* MTRR type unhandled */
169 *ret_prot = _PAGE_CACHE_UC;
170 return -1;
171 }
172
173 pat_type = prot & _PAGE_CACHE_MASK;
174 prot &= (~_PAGE_CACHE_MASK);
175
176 /* Currently doing intersection by hand. Optimize it later. */
177 if (pat_type == _PAGE_CACHE_WC) {
178 *ret_prot = prot | _PAGE_CACHE_WC;
179 } else if (pat_type == _PAGE_CACHE_UC_MINUS) {
180 *ret_prot = prot | _PAGE_CACHE_UC_MINUS;
181 } else if (pat_type == _PAGE_CACHE_UC ||
182 mtrr_type == MTRR_TYPE_UNCACHABLE) {
183 *ret_prot = prot | _PAGE_CACHE_UC;
184 } else if (mtrr_type == MTRR_TYPE_WRCOMB) {
185 *ret_prot = prot | _PAGE_CACHE_WC;
186 } else {
187 *ret_prot = prot | _PAGE_CACHE_WB;
188 }
189
190 return 0;
191}
192
193int reserve_memtype(u64 start, u64 end, unsigned long req_type,
194 unsigned long *ret_type)
195{
196 struct memtype *new_entry = NULL;
197 struct memtype *parse;
198 unsigned long actual_type;
199 int err = 0;
200
201 /* Only track when pat_wc_enabled */
202 if (!pat_wc_enabled) {
203 if (ret_type)
204 *ret_type = req_type;
205
206 return 0;
207 }
208
209 /* Low ISA region is always mapped WB in page table. No need to track */
210 if (start >= ISA_START_ADDRESS && (end - 1) <= ISA_END_ADDRESS) {
211 if (ret_type)
212 *ret_type = _PAGE_CACHE_WB;
213
214 return 0;
215 }
216
217 req_type &= _PAGE_CACHE_MASK;
218 err = pat_x_mtrr_type(start, end, req_type, &actual_type);
219 if (err) {
220 if (ret_type)
221 *ret_type = actual_type;
222
223 return -EINVAL;
224 }
225
226 new_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL);
227 if (!new_entry)
228 return -ENOMEM;
229
230 new_entry->start = start;
231 new_entry->end = end;
232 new_entry->type = actual_type;
233
234 if (ret_type)
235 *ret_type = actual_type;
236
237 spin_lock(&memtype_lock);
238
239 /* Search for existing mapping that overlaps the current range */
240 list_for_each_entry(parse, &memtype_list, nd) {
241 struct memtype *saved_ptr;
242
243 if (parse->start >= end) {
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700244 printk("New Entry\n");
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700245 list_add(&new_entry->nd, parse->nd.prev);
246 new_entry = NULL;
247 break;
248 }
249
250 if (start <= parse->start && end >= parse->start) {
251 if (actual_type != parse->type && ret_type) {
252 actual_type = parse->type;
253 *ret_type = actual_type;
254 new_entry->type = actual_type;
255 }
256
257 if (actual_type != parse->type) {
258 printk(
259 KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
260 current->comm, current->pid,
261 start, end,
262 cattr_name(actual_type),
263 cattr_name(parse->type));
264 err = -EBUSY;
265 break;
266 }
267
268 saved_ptr = parse;
269 /*
270 * Check to see whether the request overlaps more
271 * than one entry in the list
272 */
273 list_for_each_entry_continue(parse, &memtype_list, nd) {
274 if (end <= parse->start) {
275 break;
276 }
277
278 if (actual_type != parse->type) {
279 printk(
280 KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
281 current->comm, current->pid,
282 start, end,
283 cattr_name(actual_type),
284 cattr_name(parse->type));
285 err = -EBUSY;
286 break;
287 }
288 }
289
290 if (err) {
291 break;
292 }
293
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700294 printk("Overlap at 0x%Lx-0x%Lx\n",
295 saved_ptr->start, saved_ptr->end);
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700296 /* No conflict. Go ahead and add this new entry */
297 list_add(&new_entry->nd, saved_ptr->nd.prev);
298 new_entry = NULL;
299 break;
300 }
301
302 if (start < parse->end) {
303 if (actual_type != parse->type && ret_type) {
304 actual_type = parse->type;
305 *ret_type = actual_type;
306 new_entry->type = actual_type;
307 }
308
309 if (actual_type != parse->type) {
310 printk(
311 KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
312 current->comm, current->pid,
313 start, end,
314 cattr_name(actual_type),
315 cattr_name(parse->type));
316 err = -EBUSY;
317 break;
318 }
319
320 saved_ptr = parse;
321 /*
322 * Check to see whether the request overlaps more
323 * than one entry in the list
324 */
325 list_for_each_entry_continue(parse, &memtype_list, nd) {
326 if (end <= parse->start) {
327 break;
328 }
329
330 if (actual_type != parse->type) {
331 printk(
332 KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
333 current->comm, current->pid,
334 start, end,
335 cattr_name(actual_type),
336 cattr_name(parse->type));
337 err = -EBUSY;
338 break;
339 }
340 }
341
342 if (err) {
343 break;
344 }
345
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700346 printk("Overlap at 0x%Lx-0x%Lx\n",
347 saved_ptr->start, saved_ptr->end);
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700348 /* No conflict. Go ahead and add this new entry */
349 list_add(&new_entry->nd, &saved_ptr->nd);
350 new_entry = NULL;
351 break;
352 }
353 }
354
355 if (err) {
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700356 printk(
357 "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n",
358 start, end, cattr_name(new_entry->type),
359 cattr_name(req_type));
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700360 kfree(new_entry);
361 spin_unlock(&memtype_lock);
362 return err;
363 }
364
365 if (new_entry) {
366 /* No conflict. Not yet added to the list. Add to the tail */
367 list_add_tail(&new_entry->nd, &memtype_list);
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700368 printk("New Entry\n");
369 }
370
371 if (ret_type) {
372 printk(
373 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
374 start, end, cattr_name(actual_type),
375 cattr_name(req_type), cattr_name(*ret_type));
376 } else {
377 printk(
378 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n",
379 start, end, cattr_name(actual_type),
380 cattr_name(req_type));
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700381 }
382
383 spin_unlock(&memtype_lock);
384 return err;
385}
386
387int free_memtype(u64 start, u64 end)
388{
389 struct memtype *ml;
390 int err = -EINVAL;
391
392 /* Only track when pat_wc_enabled */
393 if (!pat_wc_enabled) {
394 return 0;
395 }
396
397 /* Low ISA region is always mapped WB. No need to track */
398 if (start >= ISA_START_ADDRESS && end <= ISA_END_ADDRESS) {
399 return 0;
400 }
401
402 spin_lock(&memtype_lock);
403 list_for_each_entry(ml, &memtype_list, nd) {
404 if (ml->start == start && ml->end == end) {
405 list_del(&ml->nd);
406 kfree(ml);
407 err = 0;
408 break;
409 }
410 }
411 spin_unlock(&memtype_lock);
412
413 if (err) {
414 printk(KERN_DEBUG "%s:%d freeing invalid memtype %Lx-%Lx\n",
415 current->comm, current->pid, start, end);
416 }
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700417
418 printk( "free_memtype request 0x%Lx-0x%Lx\n", start, end);
venkatesh.pallipadi@intel.com2e5d9c82008-03-18 17:00:14 -0700419 return err;
420}
421