venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Handle caching attributes in page tables (PAT) |
| 3 | * |
| 4 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> |
| 5 | * Suresh B Siddha <suresh.b.siddha@intel.com> |
| 6 | * |
| 7 | * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. |
| 8 | */ |
| 9 | |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 10 | #include <linux/seq_file.h> |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 11 | #include <linux/bootmem.h> |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 12 | #include <linux/debugfs.h> |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 13 | #include <linux/kernel.h> |
Ingo Molnar | 92b9af9 | 2009-02-28 14:09:27 +0100 | [diff] [blame] | 14 | #include <linux/module.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 15 | #include <linux/slab.h> |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 16 | #include <linux/mm.h> |
| 17 | #include <linux/fs.h> |
Venkatesh Pallipadi | 335ef89 | 2009-07-10 09:57:36 -0700 | [diff] [blame] | 18 | #include <linux/rbtree.h> |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 19 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 20 | #include <asm/cacheflush.h> |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 21 | #include <asm/processor.h> |
| 22 | #include <asm/tlbflush.h> |
Jack Steiner | fd12a0d | 2009-11-19 14:23:41 -0600 | [diff] [blame] | 23 | #include <asm/x86_init.h> |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 24 | #include <asm/pgtable.h> |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 25 | #include <asm/fcntl.h> |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 26 | #include <asm/e820.h> |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 27 | #include <asm/mtrr.h> |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 28 | #include <asm/page.h> |
| 29 | #include <asm/msr.h> |
| 30 | #include <asm/pat.h> |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 31 | #include <asm/io.h> |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 32 | |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 33 | #include "pat_internal.h" |
| 34 | |
Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 35 | #ifdef CONFIG_X86_PAT |
Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 36 | int __read_mostly pat_enabled = 1; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 37 | |
Marcin Slusarz | 1ee4bd9 | 2009-04-10 22:47:17 +0200 | [diff] [blame] | 38 | static inline void pat_disable(const char *reason) |
Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 39 | { |
Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 40 | pat_enabled = 0; |
Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 41 | printk(KERN_INFO "%s\n", reason); |
| 42 | } |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 43 | |
Andrew Morton | be524fb | 2008-05-29 00:01:28 -0700 | [diff] [blame] | 44 | static int __init nopat(char *str) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 45 | { |
Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 46 | pat_disable("PAT support disabled."); |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 47 | return 0; |
| 48 | } |
| 49 | early_param("nopat", nopat); |
H. Peter Anvin | 75a0481 | 2009-01-22 16:17:05 -0800 | [diff] [blame] | 50 | #else |
| 51 | static inline void pat_disable(const char *reason) |
| 52 | { |
| 53 | (void)reason; |
| 54 | } |
Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 55 | #endif |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 56 | |
Venki Pallipadi | 77b52b4 | 2008-05-05 19:09:10 -0700 | [diff] [blame] | 57 | |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 58 | int pat_debug_enable; |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 59 | |
Venki Pallipadi | 77b52b4 | 2008-05-05 19:09:10 -0700 | [diff] [blame] | 60 | static int __init pat_debug_setup(char *str) |
| 61 | { |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 62 | pat_debug_enable = 1; |
Venki Pallipadi | 77b52b4 | 2008-05-05 19:09:10 -0700 | [diff] [blame] | 63 | return 0; |
| 64 | } |
| 65 | __setup("debugpat", pat_debug_setup); |
| 66 | |
Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 67 | static u64 __read_mostly boot_pat_state; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 68 | |
| 69 | enum { |
| 70 | PAT_UC = 0, /* uncached */ |
| 71 | PAT_WC = 1, /* Write combining */ |
| 72 | PAT_WT = 4, /* Write Through */ |
| 73 | PAT_WP = 5, /* Write Protected */ |
| 74 | PAT_WB = 6, /* Write Back (default) */ |
| 75 | PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ |
| 76 | }; |
| 77 | |
Andreas Herrmann | cd7a4e9 | 2008-06-10 16:05:39 +0200 | [diff] [blame] | 78 | #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8)) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 79 | |
| 80 | void pat_init(void) |
| 81 | { |
| 82 | u64 pat; |
Roland Dreier | e23a8b6 | 2009-09-23 15:35:35 -0700 | [diff] [blame] | 83 | bool boot_cpu = !boot_pat_state; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 84 | |
Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 85 | if (!pat_enabled) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 86 | return; |
| 87 | |
H. Peter Anvin | 75a0481 | 2009-01-22 16:17:05 -0800 | [diff] [blame] | 88 | if (!cpu_has_pat) { |
| 89 | if (!boot_pat_state) { |
| 90 | pat_disable("PAT not supported by CPU."); |
| 91 | return; |
| 92 | } else { |
| 93 | /* |
| 94 | * If this happens we are on a secondary CPU, but |
| 95 | * switched to PAT on the boot CPU. We have no way to |
| 96 | * undo PAT. |
| 97 | */ |
| 98 | printk(KERN_ERR "PAT enabled, " |
| 99 | "but not supported by secondary CPU\n"); |
| 100 | BUG(); |
| 101 | } |
Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 102 | } |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 103 | |
| 104 | /* Set PWT to Write-Combining. All other bits stay the same */ |
| 105 | /* |
| 106 | * PTE encoding used in Linux: |
| 107 | * PAT |
| 108 | * |PCD |
| 109 | * ||PWT |
| 110 | * ||| |
| 111 | * 000 WB _PAGE_CACHE_WB |
| 112 | * 001 WC _PAGE_CACHE_WC |
| 113 | * 010 UC- _PAGE_CACHE_UC_MINUS |
| 114 | * 011 UC _PAGE_CACHE_UC |
| 115 | * PAT bit unused |
| 116 | */ |
Andreas Herrmann | cd7a4e9 | 2008-06-10 16:05:39 +0200 | [diff] [blame] | 117 | pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) | |
| 118 | PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC); |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 119 | |
| 120 | /* Boot CPU check */ |
Thomas Gleixner | 8d4a430 | 2008-05-08 09:18:43 +0200 | [diff] [blame] | 121 | if (!boot_pat_state) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 122 | rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 123 | |
| 124 | wrmsrl(MSR_IA32_CR_PAT, pat); |
Roland Dreier | e23a8b6 | 2009-09-23 15:35:35 -0700 | [diff] [blame] | 125 | |
| 126 | if (boot_cpu) |
| 127 | printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", |
| 128 | smp_processor_id(), boot_pat_state, pat); |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 129 | } |
| 130 | |
| 131 | #undef PAT |
| 132 | |
Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 133 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */ |
Venkatesh Pallipadi | 335ef89 | 2009-07-10 09:57:36 -0700 | [diff] [blame] | 134 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 135 | /* |
| 136 | * Does intersection of PAT memory type and MTRR memory type and returns |
| 137 | * the resulting memory type as PAT understands it. |
| 138 | * (Type in pat and mtrr will not have same value) |
| 139 | * The intersection is based on "Effective Memory Type" tables in IA-32 |
| 140 | * SDM vol 3a |
| 141 | */ |
Hugh Dickins | 6cf514f | 2008-06-16 18:42:43 +0100 | [diff] [blame] | 142 | static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 143 | { |
Venki Pallipadi | c26421d | 2008-05-29 12:01:44 -0700 | [diff] [blame] | 144 | /* |
| 145 | * Look for MTRR hint to get the effective type in case where PAT |
| 146 | * request is for WB. |
| 147 | */ |
Andreas Herrmann | dd0c7c4 | 2008-06-18 15:38:57 +0200 | [diff] [blame] | 148 | if (req_type == _PAGE_CACHE_WB) { |
| 149 | u8 mtrr_type; |
| 150 | |
| 151 | mtrr_type = mtrr_type_lookup(start, end); |
Suresh Siddha | b6ff32d | 2009-04-09 14:26:51 -0700 | [diff] [blame] | 152 | if (mtrr_type != MTRR_TYPE_WRBACK) |
| 153 | return _PAGE_CACHE_UC_MINUS; |
| 154 | |
| 155 | return _PAGE_CACHE_WB; |
Andreas Herrmann | dd0c7c4 | 2008-06-18 15:38:57 +0200 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | return req_type; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 159 | } |
| 160 | |
John Dykstra | fa83523 | 2012-05-25 16:12:46 -0500 | [diff] [blame] | 161 | struct pagerange_state { |
| 162 | unsigned long cur_pfn; |
| 163 | int ram; |
| 164 | int not_ram; |
| 165 | }; |
| 166 | |
| 167 | static int |
| 168 | pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg) |
| 169 | { |
| 170 | struct pagerange_state *state = arg; |
| 171 | |
| 172 | state->not_ram |= initial_pfn > state->cur_pfn; |
| 173 | state->ram |= total_nr_pages > 0; |
| 174 | state->cur_pfn = initial_pfn + total_nr_pages; |
| 175 | |
| 176 | return state->ram && state->not_ram; |
| 177 | } |
| 178 | |
Yasuaki Ishimatsu | 3709c85 | 2010-07-22 14:57:35 +0900 | [diff] [blame] | 179 | static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 180 | { |
John Dykstra | fa83523 | 2012-05-25 16:12:46 -0500 | [diff] [blame] | 181 | int ret = 0; |
| 182 | unsigned long start_pfn = start >> PAGE_SHIFT; |
| 183 | unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 184 | struct pagerange_state state = {start_pfn, 0, 0}; |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 185 | |
John Dykstra | fa83523 | 2012-05-25 16:12:46 -0500 | [diff] [blame] | 186 | /* |
| 187 | * For legacy reasons, physical address range in the legacy ISA |
| 188 | * region is tracked as non-RAM. This will allow users of |
| 189 | * /dev/mem to map portions of legacy ISA region, even when |
| 190 | * some of those portions are listed(or not even listed) with |
| 191 | * different e820 types(RAM/reserved/..) |
| 192 | */ |
| 193 | if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT) |
| 194 | start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT; |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 195 | |
John Dykstra | fa83523 | 2012-05-25 16:12:46 -0500 | [diff] [blame] | 196 | if (start_pfn < end_pfn) { |
| 197 | ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, |
| 198 | &state, pagerange_is_ram_callback); |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 199 | } |
| 200 | |
John Dykstra | fa83523 | 2012-05-25 16:12:46 -0500 | [diff] [blame] | 201 | return (ret > 0) ? -1 : (state.ram ? 1 : 0); |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 202 | } |
| 203 | |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 204 | /* |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 205 | * For RAM pages, we use page flags to mark the pages with appropriate type. |
| 206 | * Here we do two pass: |
| 207 | * - Find the memtype of all the pages in the range, look for any conflicts |
| 208 | * - In case of no conflicts, set the new memtype for pages in the range |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 209 | */ |
| 210 | static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 211 | unsigned long *new_type) |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 212 | { |
| 213 | struct page *page; |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 214 | u64 pfn; |
| 215 | |
| 216 | if (req_type == _PAGE_CACHE_UC) { |
| 217 | /* We do not support strong UC */ |
| 218 | WARN_ON_ONCE(1); |
| 219 | req_type = _PAGE_CACHE_UC_MINUS; |
| 220 | } |
| 221 | |
| 222 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { |
| 223 | unsigned long type; |
| 224 | |
| 225 | page = pfn_to_page(pfn); |
| 226 | type = get_page_memtype(page); |
| 227 | if (type != -1) { |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 228 | printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n", |
| 229 | start, end - 1, type, req_type); |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 230 | if (new_type) |
| 231 | *new_type = type; |
| 232 | |
| 233 | return -EBUSY; |
| 234 | } |
| 235 | } |
| 236 | |
| 237 | if (new_type) |
| 238 | *new_type = req_type; |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 239 | |
| 240 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { |
| 241 | page = pfn_to_page(pfn); |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 242 | set_page_memtype(page, req_type); |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 243 | } |
| 244 | return 0; |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 245 | } |
| 246 | |
| 247 | static int free_ram_pages_type(u64 start, u64 end) |
| 248 | { |
| 249 | struct page *page; |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 250 | u64 pfn; |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 251 | |
| 252 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { |
| 253 | page = pfn_to_page(pfn); |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 254 | set_page_memtype(page, -1); |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 255 | } |
| 256 | return 0; |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 257 | } |
| 258 | |
| 259 | /* |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 260 | * req_type typically has one of the: |
| 261 | * - _PAGE_CACHE_WB |
| 262 | * - _PAGE_CACHE_WC |
| 263 | * - _PAGE_CACHE_UC_MINUS |
| 264 | * - _PAGE_CACHE_UC |
| 265 | * |
Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 266 | * If new_type is NULL, function will return an error if it cannot reserve the |
| 267 | * region with req_type. If new_type is non-NULL, function will return |
| 268 | * available type in new_type in case of no error. In case of any error |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 269 | * it will return a negative return value. |
| 270 | */ |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 271 | int reserve_memtype(u64 start, u64 end, unsigned long req_type, |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 272 | unsigned long *new_type) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 273 | { |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 274 | struct memtype *new; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 275 | unsigned long actual_type; |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 276 | int is_range_ram; |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 277 | int err = 0; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 278 | |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 279 | BUG_ON(start >= end); /* end is exclusive */ |
Andreas Herrmann | 69e26be | 2008-06-20 22:03:06 +0200 | [diff] [blame] | 280 | |
Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 281 | if (!pat_enabled) { |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 282 | /* This is identical to page table setting without PAT */ |
Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 283 | if (new_type) { |
Xiaotian Feng | 83ea05e | 2009-11-10 17:23:07 +0800 | [diff] [blame] | 284 | if (req_type == _PAGE_CACHE_WC) |
Venkatesh Pallipadi | 5fc5174 | 2009-07-10 09:57:32 -0700 | [diff] [blame] | 285 | *new_type = _PAGE_CACHE_UC_MINUS; |
Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 286 | else |
| 287 | *new_type = req_type & _PAGE_CACHE_MASK; |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 288 | } |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 289 | return 0; |
| 290 | } |
| 291 | |
| 292 | /* Low ISA region is always mapped WB in page table. No need to track */ |
H. Peter Anvin | 8a27138 | 2009-11-23 14:49:20 -0800 | [diff] [blame] | 293 | if (x86_platform.is_untracked_pat_range(start, end)) { |
Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 294 | if (new_type) |
| 295 | *new_type = _PAGE_CACHE_WB; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 296 | return 0; |
| 297 | } |
| 298 | |
Suresh Siddha | b6ff32d | 2009-04-09 14:26:51 -0700 | [diff] [blame] | 299 | /* |
| 300 | * Call mtrr_lookup to get the type hint. This is an |
| 301 | * optimization for /dev/mem mmap'ers into WB memory (BIOS |
| 302 | * tools and ACPI tools). Use WB request for WB memory and use |
| 303 | * UC_MINUS otherwise. |
| 304 | */ |
| 305 | actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK); |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 306 | |
Suresh Siddha | 9597134 | 2009-01-13 10:21:30 -0800 | [diff] [blame] | 307 | if (new_type) |
| 308 | *new_type = actual_type; |
| 309 | |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 310 | is_range_ram = pat_pagerange_is_ram(start, end); |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 311 | if (is_range_ram == 1) { |
| 312 | |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 313 | err = reserve_ram_pages_type(start, end, req_type, new_type); |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 314 | |
| 315 | return err; |
| 316 | } else if (is_range_ram < 0) { |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 317 | return -EINVAL; |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 318 | } |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 319 | |
Venkatesh Pallipadi | 6a4f3b5 | 2010-06-10 17:45:01 -0700 | [diff] [blame] | 320 | new = kzalloc(sizeof(struct memtype), GFP_KERNEL); |
Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 321 | if (!new) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 322 | return -ENOMEM; |
| 323 | |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 324 | new->start = start; |
| 325 | new->end = end; |
| 326 | new->type = actual_type; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 327 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 328 | spin_lock(&memtype_lock); |
| 329 | |
Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 330 | err = rbt_memtype_check_insert(new, new_type); |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 331 | if (err) { |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 332 | printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n", |
| 333 | start, end - 1, |
| 334 | cattr_name(new->type), cattr_name(req_type)); |
Andreas Herrmann | ac97991 | 2008-06-20 22:01:49 +0200 | [diff] [blame] | 335 | kfree(new); |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 336 | spin_unlock(&memtype_lock); |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 337 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 338 | return err; |
| 339 | } |
| 340 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 341 | spin_unlock(&memtype_lock); |
Andreas Herrmann | 3e9c83b | 2008-06-20 22:04:02 +0200 | [diff] [blame] | 342 | |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 343 | dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n", |
| 344 | start, end - 1, cattr_name(new->type), cattr_name(req_type), |
Andreas Herrmann | 3e9c83b | 2008-06-20 22:04:02 +0200 | [diff] [blame] | 345 | new_type ? cattr_name(*new_type) : "-"); |
| 346 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 347 | return err; |
| 348 | } |
| 349 | |
| 350 | int free_memtype(u64 start, u64 end) |
| 351 | { |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 352 | int err = -EINVAL; |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 353 | int is_range_ram; |
Xiaotian Feng | 20413f2 | 2010-05-26 09:51:10 +0800 | [diff] [blame] | 354 | struct memtype *entry; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 355 | |
Andreas Herrmann | 69e26be | 2008-06-20 22:03:06 +0200 | [diff] [blame] | 356 | if (!pat_enabled) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 357 | return 0; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 358 | |
| 359 | /* Low ISA region is always mapped WB. No need to track */ |
H. Peter Anvin | 8a27138 | 2009-11-23 14:49:20 -0800 | [diff] [blame] | 360 | if (x86_platform.is_untracked_pat_range(start, end)) |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 361 | return 0; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 362 | |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 363 | is_range_ram = pat_pagerange_is_ram(start, end); |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 364 | if (is_range_ram == 1) { |
| 365 | |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 366 | err = free_ram_pages_type(start, end); |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 367 | |
| 368 | return err; |
| 369 | } else if (is_range_ram < 0) { |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 370 | return -EINVAL; |
Venkatesh Pallipadi | f584174 | 2009-07-10 09:57:38 -0700 | [diff] [blame] | 371 | } |
Suresh Siddha | 9542ada | 2008-09-24 08:53:33 -0700 | [diff] [blame] | 372 | |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 373 | spin_lock(&memtype_lock); |
Xiaotian Feng | 20413f2 | 2010-05-26 09:51:10 +0800 | [diff] [blame] | 374 | entry = rbt_memtype_erase(start, end); |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 375 | spin_unlock(&memtype_lock); |
| 376 | |
Xiaotian Feng | 20413f2 | 2010-05-26 09:51:10 +0800 | [diff] [blame] | 377 | if (!entry) { |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 378 | printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n", |
| 379 | current->comm, current->pid, start, end - 1); |
Xiaotian Feng | 20413f2 | 2010-05-26 09:51:10 +0800 | [diff] [blame] | 380 | return -EINVAL; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 381 | } |
venkatesh.pallipadi@intel.com | 6997ab4 | 2008-03-18 17:00:25 -0700 | [diff] [blame] | 382 | |
Xiaotian Feng | 20413f2 | 2010-05-26 09:51:10 +0800 | [diff] [blame] | 383 | kfree(entry); |
| 384 | |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 385 | dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1); |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 386 | |
Xiaotian Feng | 20413f2 | 2010-05-26 09:51:10 +0800 | [diff] [blame] | 387 | return 0; |
venkatesh.pallipadi@intel.com | 2e5d9c8 | 2008-03-18 17:00:14 -0700 | [diff] [blame] | 388 | } |
| 389 | |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 390 | |
Venkatesh Pallipadi | 9fd126b | 2009-07-10 09:57:34 -0700 | [diff] [blame] | 391 | /** |
Venkatesh Pallipadi | 637b86e | 2009-07-10 09:57:39 -0700 | [diff] [blame] | 392 | * lookup_memtype - Looksup the memory type for a physical address |
| 393 | * @paddr: physical address of which memory type needs to be looked up |
| 394 | * |
| 395 | * Only to be called when PAT is enabled |
| 396 | * |
| 397 | * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or |
| 398 | * _PAGE_CACHE_UC |
| 399 | */ |
| 400 | static unsigned long lookup_memtype(u64 paddr) |
| 401 | { |
| 402 | int rettype = _PAGE_CACHE_WB; |
| 403 | struct memtype *entry; |
| 404 | |
H. Peter Anvin | 8a27138 | 2009-11-23 14:49:20 -0800 | [diff] [blame] | 405 | if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE)) |
Venkatesh Pallipadi | 637b86e | 2009-07-10 09:57:39 -0700 | [diff] [blame] | 406 | return rettype; |
| 407 | |
| 408 | if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { |
| 409 | struct page *page; |
Venkatesh Pallipadi | 637b86e | 2009-07-10 09:57:39 -0700 | [diff] [blame] | 410 | page = pfn_to_page(paddr >> PAGE_SHIFT); |
| 411 | rettype = get_page_memtype(page); |
Venkatesh Pallipadi | 637b86e | 2009-07-10 09:57:39 -0700 | [diff] [blame] | 412 | /* |
| 413 | * -1 from get_page_memtype() implies RAM page is in its |
| 414 | * default state and not reserved, and hence of type WB |
| 415 | */ |
| 416 | if (rettype == -1) |
| 417 | rettype = _PAGE_CACHE_WB; |
| 418 | |
| 419 | return rettype; |
| 420 | } |
| 421 | |
| 422 | spin_lock(&memtype_lock); |
| 423 | |
Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 424 | entry = rbt_memtype_lookup(paddr); |
Venkatesh Pallipadi | 637b86e | 2009-07-10 09:57:39 -0700 | [diff] [blame] | 425 | if (entry != NULL) |
| 426 | rettype = entry->type; |
| 427 | else |
| 428 | rettype = _PAGE_CACHE_UC_MINUS; |
| 429 | |
| 430 | spin_unlock(&memtype_lock); |
| 431 | return rettype; |
| 432 | } |
| 433 | |
| 434 | /** |
Venkatesh Pallipadi | 9fd126b | 2009-07-10 09:57:34 -0700 | [diff] [blame] | 435 | * io_reserve_memtype - Request a memory type mapping for a region of memory |
| 436 | * @start: start (physical address) of the region |
| 437 | * @end: end (physical address) of the region |
| 438 | * @type: A pointer to memtype, with requested type. On success, requested |
| 439 | * or any other compatible type that was available for the region is returned |
| 440 | * |
| 441 | * On success, returns 0 |
| 442 | * On failure, returns non-zero |
| 443 | */ |
| 444 | int io_reserve_memtype(resource_size_t start, resource_size_t end, |
| 445 | unsigned long *type) |
| 446 | { |
H. Peter Anvin | b855192 | 2009-08-26 17:17:51 -0700 | [diff] [blame] | 447 | resource_size_t size = end - start; |
Venkatesh Pallipadi | 9fd126b | 2009-07-10 09:57:34 -0700 | [diff] [blame] | 448 | unsigned long req_type = *type; |
| 449 | unsigned long new_type; |
| 450 | int ret; |
| 451 | |
H. Peter Anvin | b855192 | 2009-08-26 17:17:51 -0700 | [diff] [blame] | 452 | WARN_ON_ONCE(iomem_map_sanity_check(start, size)); |
Venkatesh Pallipadi | 9fd126b | 2009-07-10 09:57:34 -0700 | [diff] [blame] | 453 | |
| 454 | ret = reserve_memtype(start, end, req_type, &new_type); |
| 455 | if (ret) |
| 456 | goto out_err; |
| 457 | |
H. Peter Anvin | b855192 | 2009-08-26 17:17:51 -0700 | [diff] [blame] | 458 | if (!is_new_memtype_allowed(start, size, req_type, new_type)) |
Venkatesh Pallipadi | 9fd126b | 2009-07-10 09:57:34 -0700 | [diff] [blame] | 459 | goto out_free; |
| 460 | |
H. Peter Anvin | b855192 | 2009-08-26 17:17:51 -0700 | [diff] [blame] | 461 | if (kernel_map_sync_memtype(start, size, new_type) < 0) |
Venkatesh Pallipadi | 9fd126b | 2009-07-10 09:57:34 -0700 | [diff] [blame] | 462 | goto out_free; |
| 463 | |
| 464 | *type = new_type; |
| 465 | return 0; |
| 466 | |
| 467 | out_free: |
| 468 | free_memtype(start, end); |
| 469 | ret = -EBUSY; |
| 470 | out_err: |
| 471 | return ret; |
| 472 | } |
| 473 | |
| 474 | /** |
| 475 | * io_free_memtype - Release a memory type mapping for a region of memory |
| 476 | * @start: start (physical address) of the region |
| 477 | * @end: end (physical address) of the region |
| 478 | */ |
| 479 | void io_free_memtype(resource_size_t start, resource_size_t end) |
| 480 | { |
| 481 | free_memtype(start, end); |
| 482 | } |
| 483 | |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 484 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 485 | unsigned long size, pgprot_t vma_prot) |
| 486 | { |
| 487 | return vma_prot; |
| 488 | } |
| 489 | |
Ingo Molnar | d092633 | 2008-07-18 00:26:59 +0200 | [diff] [blame] | 490 | #ifdef CONFIG_STRICT_DEVMEM |
| 491 | /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/ |
Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 492 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
| 493 | { |
| 494 | return 1; |
| 495 | } |
| 496 | #else |
Ravikiran G Thirumalai | 9e41bff | 2008-10-30 13:59:21 -0700 | [diff] [blame] | 497 | /* This check is needed to avoid cache aliasing when PAT is enabled */ |
Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 498 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
| 499 | { |
| 500 | u64 from = ((u64)pfn) << PAGE_SHIFT; |
| 501 | u64 to = from + size; |
| 502 | u64 cursor = from; |
| 503 | |
Ravikiran G Thirumalai | 9e41bff | 2008-10-30 13:59:21 -0700 | [diff] [blame] | 504 | if (!pat_enabled) |
| 505 | return 1; |
| 506 | |
Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 507 | while (cursor < to) { |
| 508 | if (!devmem_is_allowed(pfn)) { |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 509 | printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n", |
| 510 | current->comm, from, to - 1); |
Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 511 | return 0; |
| 512 | } |
| 513 | cursor += PAGE_SIZE; |
| 514 | pfn++; |
| 515 | } |
| 516 | return 1; |
| 517 | } |
Ingo Molnar | d092633 | 2008-07-18 00:26:59 +0200 | [diff] [blame] | 518 | #endif /* CONFIG_STRICT_DEVMEM */ |
Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 519 | |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 520 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, |
| 521 | unsigned long size, pgprot_t *vma_prot) |
| 522 | { |
Suresh Siddha | 0c3c8a1 | 2009-04-09 14:26:52 -0700 | [diff] [blame] | 523 | unsigned long flags = _PAGE_CACHE_WB; |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 524 | |
Venki Pallipadi | 0124cec | 2008-04-26 11:32:12 -0700 | [diff] [blame] | 525 | if (!range_is_allowed(pfn, size)) |
| 526 | return 0; |
| 527 | |
Christoph Hellwig | 6b2f3d1 | 2009-10-27 11:05:28 +0100 | [diff] [blame] | 528 | if (file->f_flags & O_DSYNC) |
venkatesh.pallipadi@intel.com | 28df82e | 2008-08-20 16:45:52 -0700 | [diff] [blame] | 529 | flags = _PAGE_CACHE_UC_MINUS; |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 530 | |
| 531 | #ifdef CONFIG_X86_32 |
| 532 | /* |
| 533 | * On the PPro and successors, the MTRRs are used to set |
| 534 | * memory types for physical addresses outside main memory, |
| 535 | * so blindly setting UC or PWT on those pages is wrong. |
| 536 | * For Pentiums and earlier, the surround logic should disable |
| 537 | * caching for the high addresses through the KEN pin, but |
| 538 | * we maintain the tradition of paranoia in this code. |
| 539 | */ |
Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 540 | if (!pat_enabled && |
Andreas Herrmann | cd7a4e9 | 2008-06-10 16:05:39 +0200 | [diff] [blame] | 541 | !(boot_cpu_has(X86_FEATURE_MTRR) || |
| 542 | boot_cpu_has(X86_FEATURE_K6_MTRR) || |
| 543 | boot_cpu_has(X86_FEATURE_CYRIX_ARR) || |
| 544 | boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) && |
| 545 | (pfn << PAGE_SHIFT) >= __pa(high_memory)) { |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 546 | flags = _PAGE_CACHE_UC; |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 547 | } |
| 548 | #endif |
| 549 | |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 550 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | |
| 551 | flags); |
venkatesh.pallipadi@intel.com | f0970c1 | 2008-03-18 17:00:20 -0700 | [diff] [blame] | 552 | return 1; |
| 553 | } |
venkatesh.pallipadi@intel.com | e7f260a | 2008-03-18 17:00:21 -0700 | [diff] [blame] | 554 | |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 555 | /* |
Venkatesh Pallipadi | 7880f74 | 2009-02-24 17:35:13 -0800 | [diff] [blame] | 556 | * Change the memory type for the physial address range in kernel identity |
| 557 | * mapping space if that range is a part of identity map. |
| 558 | */ |
| 559 | int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags) |
| 560 | { |
| 561 | unsigned long id_sz; |
| 562 | |
Dave Hansen | a25b931 | 2013-01-22 13:24:30 -0800 | [diff] [blame] | 563 | if (base > __pa(high_memory-1)) |
Venkatesh Pallipadi | 7880f74 | 2009-02-24 17:35:13 -0800 | [diff] [blame] | 564 | return 0; |
| 565 | |
Dave Hansen | 60f583d | 2013-03-07 08:31:51 -0800 | [diff] [blame] | 566 | /* |
| 567 | * some areas in the middle of the kernel identity range |
| 568 | * are not mapped, like the PCI space. |
| 569 | */ |
| 570 | if (!page_is_ram(base >> PAGE_SHIFT)) |
| 571 | return 0; |
| 572 | |
Dave Hansen | a25b931 | 2013-01-22 13:24:30 -0800 | [diff] [blame] | 573 | id_sz = (__pa(high_memory-1) <= base + size) ? |
Venkatesh Pallipadi | 7880f74 | 2009-02-24 17:35:13 -0800 | [diff] [blame] | 574 | __pa(high_memory) - base : |
| 575 | size; |
| 576 | |
| 577 | if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 578 | printk(KERN_INFO "%s:%d ioremap_change_attr failed %s " |
| 579 | "for [mem %#010Lx-%#010Lx]\n", |
Venkatesh Pallipadi | 7880f74 | 2009-02-24 17:35:13 -0800 | [diff] [blame] | 580 | current->comm, current->pid, |
| 581 | cattr_name(flags), |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 582 | base, (unsigned long long)(base + size-1)); |
Venkatesh Pallipadi | 7880f74 | 2009-02-24 17:35:13 -0800 | [diff] [blame] | 583 | return -EINVAL; |
| 584 | } |
| 585 | return 0; |
| 586 | } |
| 587 | |
| 588 | /* |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 589 | * Internal interface to reserve a range of physical memory with prot. |
| 590 | * Reserved non RAM regions only and after successful reserve_memtype, |
| 591 | * this func also keeps identity mapping (if any) in sync with this new prot. |
| 592 | */ |
venkatesh.pallipadi@intel.com | cdecff6 | 2009-01-09 16:13:12 -0800 | [diff] [blame] | 593 | static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, |
| 594 | int strict_prot) |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 595 | { |
| 596 | int is_ram = 0; |
Venkatesh Pallipadi | 7880f74 | 2009-02-24 17:35:13 -0800 | [diff] [blame] | 597 | int ret; |
venkatesh.pallipadi@intel.com | cdecff6 | 2009-01-09 16:13:12 -0800 | [diff] [blame] | 598 | unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); |
Suresh Siddha | 0c3c8a1 | 2009-04-09 14:26:52 -0700 | [diff] [blame] | 599 | unsigned long flags = want_flags; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 600 | |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 601 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 602 | |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 603 | /* |
Venkatesh Pallipadi | d886c73 | 2009-07-10 09:57:41 -0700 | [diff] [blame] | 604 | * reserve_pfn_range() for RAM pages. We do not refcount to keep |
| 605 | * track of number of mappings of RAM pages. We can assert that |
| 606 | * the type requested matches the type of first page in the range. |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 607 | */ |
Venkatesh Pallipadi | d886c73 | 2009-07-10 09:57:41 -0700 | [diff] [blame] | 608 | if (is_ram) { |
| 609 | if (!pat_enabled) |
| 610 | return 0; |
| 611 | |
| 612 | flags = lookup_memtype(paddr); |
| 613 | if (want_flags != flags) { |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 614 | printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n", |
Venkatesh Pallipadi | d886c73 | 2009-07-10 09:57:41 -0700 | [diff] [blame] | 615 | current->comm, current->pid, |
| 616 | cattr_name(want_flags), |
| 617 | (unsigned long long)paddr, |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 618 | (unsigned long long)(paddr + size - 1), |
Venkatesh Pallipadi | d886c73 | 2009-07-10 09:57:41 -0700 | [diff] [blame] | 619 | cattr_name(flags)); |
| 620 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & |
| 621 | (~_PAGE_CACHE_MASK)) | |
| 622 | flags); |
| 623 | } |
Pallipadi, Venkatesh | 4bb9c5c | 2009-03-12 17:45:27 -0700 | [diff] [blame] | 624 | return 0; |
Venkatesh Pallipadi | d886c73 | 2009-07-10 09:57:41 -0700 | [diff] [blame] | 625 | } |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 626 | |
| 627 | ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); |
| 628 | if (ret) |
| 629 | return ret; |
| 630 | |
| 631 | if (flags != want_flags) { |
Suresh Siddha | 1adcaaf | 2009-08-17 13:23:50 -0700 | [diff] [blame] | 632 | if (strict_prot || |
| 633 | !is_new_memtype_allowed(paddr, size, want_flags, flags)) { |
venkatesh.pallipadi@intel.com | cdecff6 | 2009-01-09 16:13:12 -0800 | [diff] [blame] | 634 | free_memtype(paddr, paddr + size); |
| 635 | printk(KERN_ERR "%s:%d map pfn expected mapping type %s" |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 636 | " for [mem %#010Lx-%#010Lx], got %s\n", |
venkatesh.pallipadi@intel.com | cdecff6 | 2009-01-09 16:13:12 -0800 | [diff] [blame] | 637 | current->comm, current->pid, |
| 638 | cattr_name(want_flags), |
| 639 | (unsigned long long)paddr, |
Bjorn Helgaas | 365811d | 2012-05-29 15:06:29 -0700 | [diff] [blame] | 640 | (unsigned long long)(paddr + size - 1), |
venkatesh.pallipadi@intel.com | cdecff6 | 2009-01-09 16:13:12 -0800 | [diff] [blame] | 641 | cattr_name(flags)); |
| 642 | return -EINVAL; |
| 643 | } |
| 644 | /* |
| 645 | * We allow returning different type than the one requested in |
| 646 | * non strict case. |
| 647 | */ |
| 648 | *vma_prot = __pgprot((pgprot_val(*vma_prot) & |
| 649 | (~_PAGE_CACHE_MASK)) | |
| 650 | flags); |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 651 | } |
| 652 | |
Venkatesh Pallipadi | 7880f74 | 2009-02-24 17:35:13 -0800 | [diff] [blame] | 653 | if (kernel_map_sync_memtype(paddr, size, flags) < 0) { |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 654 | free_memtype(paddr, paddr + size); |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 655 | return -EINVAL; |
| 656 | } |
| 657 | return 0; |
| 658 | } |
| 659 | |
| 660 | /* |
| 661 | * Internal interface to free a range of physical memory. |
| 662 | * Frees non RAM regions only. |
| 663 | */ |
| 664 | static void free_pfn_range(u64 paddr, unsigned long size) |
| 665 | { |
| 666 | int is_ram; |
| 667 | |
Suresh Siddha | be03d9e | 2009-02-11 11:20:23 -0800 | [diff] [blame] | 668 | is_ram = pat_pagerange_is_ram(paddr, paddr + size); |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 669 | if (is_ram == 0) |
| 670 | free_memtype(paddr, paddr + size); |
| 671 | } |
| 672 | |
| 673 | /* |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 674 | * track_pfn_copy is called when vma that is covering the pfnmap gets |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 675 | * copied through copy_page_range(). |
| 676 | * |
| 677 | * If the vma has a linear pfn mapping for the entire range, we get the prot |
| 678 | * from pte and reserve the entire vma range with single reserve_pfn_range call. |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 679 | */ |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 680 | int track_pfn_copy(struct vm_area_struct *vma) |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 681 | { |
H. Peter Anvin | c1c15b6 | 2008-12-23 10:10:40 -0800 | [diff] [blame] | 682 | resource_size_t paddr; |
venkatesh.pallipadi@intel.com | 982d789 | 2008-12-19 13:47:28 -0800 | [diff] [blame] | 683 | unsigned long prot; |
Pallipadi, Venkatesh | 4b06504 | 2009-04-08 15:37:16 -0700 | [diff] [blame] | 684 | unsigned long vma_size = vma->vm_end - vma->vm_start; |
venkatesh.pallipadi@intel.com | cdecff6 | 2009-01-09 16:13:12 -0800 | [diff] [blame] | 685 | pgprot_t pgprot; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 686 | |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 687 | if (vma->vm_flags & VM_PAT) { |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 688 | /* |
venkatesh.pallipadi@intel.com | 982d789 | 2008-12-19 13:47:28 -0800 | [diff] [blame] | 689 | * reserve the whole chunk covered by vma. We need the |
| 690 | * starting address and protection from pte. |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 691 | */ |
Pallipadi, Venkatesh | 4b06504 | 2009-04-08 15:37:16 -0700 | [diff] [blame] | 692 | if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 693 | WARN_ON_ONCE(1); |
venkatesh.pallipadi@intel.com | 982d789 | 2008-12-19 13:47:28 -0800 | [diff] [blame] | 694 | return -EINVAL; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 695 | } |
venkatesh.pallipadi@intel.com | cdecff6 | 2009-01-09 16:13:12 -0800 | [diff] [blame] | 696 | pgprot = __pgprot(prot); |
| 697 | return reserve_pfn_range(paddr, vma_size, &pgprot, 1); |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 698 | } |
| 699 | |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 700 | return 0; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 701 | } |
| 702 | |
| 703 | /* |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 704 | * prot is passed in as a parameter for the new mapping. If the vma has a |
| 705 | * linear pfn mapping for the entire range reserve the entire vma range with |
| 706 | * single reserve_pfn_range call. |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 707 | */ |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 708 | int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 709 | unsigned long pfn, unsigned long addr, unsigned long size) |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 710 | { |
Suresh Siddha | b1a86e1 | 2012-10-08 16:28:23 -0700 | [diff] [blame] | 711 | resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT; |
Venkatesh Pallipadi | 1087637 | 2009-07-10 09:57:40 -0700 | [diff] [blame] | 712 | unsigned long flags; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 713 | |
Suresh Siddha | b1a86e1 | 2012-10-08 16:28:23 -0700 | [diff] [blame] | 714 | /* reserve the whole chunk starting from paddr */ |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 715 | if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) { |
| 716 | int ret; |
| 717 | |
| 718 | ret = reserve_pfn_range(paddr, size, prot, 0); |
| 719 | if (!ret) |
| 720 | vma->vm_flags |= VM_PAT; |
| 721 | return ret; |
| 722 | } |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 723 | |
Venkatesh Pallipadi | 1087637 | 2009-07-10 09:57:40 -0700 | [diff] [blame] | 724 | if (!pat_enabled) |
| 725 | return 0; |
| 726 | |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 727 | /* |
| 728 | * For anything smaller than the vma size we set prot based on the |
| 729 | * lookup. |
| 730 | */ |
Suresh Siddha | b1a86e1 | 2012-10-08 16:28:23 -0700 | [diff] [blame] | 731 | flags = lookup_memtype(paddr); |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 732 | |
| 733 | /* Check memtype for the remaining pages */ |
| 734 | while (size > PAGE_SIZE) { |
| 735 | size -= PAGE_SIZE; |
| 736 | paddr += PAGE_SIZE; |
| 737 | if (flags != lookup_memtype(paddr)) |
| 738 | return -EINVAL; |
| 739 | } |
| 740 | |
| 741 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | |
| 742 | flags); |
| 743 | |
| 744 | return 0; |
| 745 | } |
| 746 | |
| 747 | int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, |
| 748 | unsigned long pfn) |
| 749 | { |
| 750 | unsigned long flags; |
| 751 | |
| 752 | if (!pat_enabled) |
| 753 | return 0; |
| 754 | |
| 755 | /* Set prot based on lookup */ |
| 756 | flags = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT); |
Venkatesh Pallipadi | 1087637 | 2009-07-10 09:57:40 -0700 | [diff] [blame] | 757 | *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) | |
| 758 | flags); |
| 759 | |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 760 | return 0; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 761 | } |
| 762 | |
| 763 | /* |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 764 | * untrack_pfn is called while unmapping a pfnmap for a region. |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 765 | * untrack can be called for a specific region indicated by pfn and size or |
Suresh Siddha | b1a86e1 | 2012-10-08 16:28:23 -0700 | [diff] [blame] | 766 | * can be for the entire vma (in which case pfn, size are zero). |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 767 | */ |
Suresh Siddha | 5180da4 | 2012-10-08 16:28:29 -0700 | [diff] [blame] | 768 | void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, |
| 769 | unsigned long size) |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 770 | { |
H. Peter Anvin | c1c15b6 | 2008-12-23 10:10:40 -0800 | [diff] [blame] | 771 | resource_size_t paddr; |
Suresh Siddha | b1a86e1 | 2012-10-08 16:28:23 -0700 | [diff] [blame] | 772 | unsigned long prot; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 773 | |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 774 | if (!(vma->vm_flags & VM_PAT)) |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 775 | return; |
Suresh Siddha | b1a86e1 | 2012-10-08 16:28:23 -0700 | [diff] [blame] | 776 | |
| 777 | /* free the chunk starting from pfn or the whole chunk */ |
| 778 | paddr = (resource_size_t)pfn << PAGE_SHIFT; |
| 779 | if (!paddr && !size) { |
| 780 | if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { |
| 781 | WARN_ON_ONCE(1); |
| 782 | return; |
| 783 | } |
| 784 | |
| 785 | size = vma->vm_end - vma->vm_start; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 786 | } |
Suresh Siddha | b1a86e1 | 2012-10-08 16:28:23 -0700 | [diff] [blame] | 787 | free_pfn_range(paddr, size); |
Konstantin Khlebnikov | b3b9c29 | 2012-10-08 16:28:34 -0700 | [diff] [blame] | 788 | vma->vm_flags &= ~VM_PAT; |
venkatesh.pallipadi@intel.com | 5899329 | 2008-12-18 11:41:30 -0800 | [diff] [blame] | 789 | } |
| 790 | |
venkatesh.pallipadi@intel.com | 2520bd3 | 2008-12-18 11:41:32 -0800 | [diff] [blame] | 791 | pgprot_t pgprot_writecombine(pgprot_t prot) |
| 792 | { |
| 793 | if (pat_enabled) |
| 794 | return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); |
| 795 | else |
| 796 | return pgprot_noncached(prot); |
| 797 | } |
Ingo Molnar | 92b9af9 | 2009-02-28 14:09:27 +0100 | [diff] [blame] | 798 | EXPORT_SYMBOL_GPL(pgprot_writecombine); |
venkatesh.pallipadi@intel.com | 2520bd3 | 2008-12-18 11:41:32 -0800 | [diff] [blame] | 799 | |
Andreas Herrmann | 012f09e | 2008-08-06 16:23:08 +0200 | [diff] [blame] | 800 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 801 | |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 802 | static struct memtype *memtype_get_idx(loff_t pos) |
| 803 | { |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 804 | struct memtype *print_entry; |
| 805 | int ret; |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 806 | |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 807 | print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL); |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 808 | if (!print_entry) |
| 809 | return NULL; |
| 810 | |
| 811 | spin_lock(&memtype_lock); |
Pallipadi, Venkatesh | 9e41a49 | 2010-02-10 15:26:07 -0800 | [diff] [blame] | 812 | ret = rbt_memtype_copy_nth_element(print_entry, pos); |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 813 | spin_unlock(&memtype_lock); |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 814 | |
venkatesh.pallipadi@intel.com | be5a0c1 | 2010-02-10 11:57:06 -0800 | [diff] [blame] | 815 | if (!ret) { |
| 816 | return print_entry; |
| 817 | } else { |
| 818 | kfree(print_entry); |
| 819 | return NULL; |
| 820 | } |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 821 | } |
| 822 | |
| 823 | static void *memtype_seq_start(struct seq_file *seq, loff_t *pos) |
| 824 | { |
| 825 | if (*pos == 0) { |
| 826 | ++*pos; |
| 827 | seq_printf(seq, "PAT memtype list:\n"); |
| 828 | } |
| 829 | |
| 830 | return memtype_get_idx(*pos); |
| 831 | } |
| 832 | |
| 833 | static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
| 834 | { |
| 835 | ++*pos; |
| 836 | return memtype_get_idx(*pos); |
| 837 | } |
| 838 | |
| 839 | static void memtype_seq_stop(struct seq_file *seq, void *v) |
| 840 | { |
| 841 | } |
| 842 | |
| 843 | static int memtype_seq_show(struct seq_file *seq, void *v) |
| 844 | { |
| 845 | struct memtype *print_entry = (struct memtype *)v; |
| 846 | |
| 847 | seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type), |
| 848 | print_entry->start, print_entry->end); |
| 849 | kfree(print_entry); |
Ingo Molnar | ad2cde1 | 2008-09-30 13:20:45 +0200 | [diff] [blame] | 850 | |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 851 | return 0; |
| 852 | } |
| 853 | |
Tobias Klauser | d535e43 | 2009-09-04 15:53:09 +0200 | [diff] [blame] | 854 | static const struct seq_operations memtype_seq_ops = { |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 855 | .start = memtype_seq_start, |
| 856 | .next = memtype_seq_next, |
| 857 | .stop = memtype_seq_stop, |
| 858 | .show = memtype_seq_show, |
| 859 | }; |
| 860 | |
| 861 | static int memtype_seq_open(struct inode *inode, struct file *file) |
| 862 | { |
| 863 | return seq_open(file, &memtype_seq_ops); |
| 864 | } |
| 865 | |
| 866 | static const struct file_operations memtype_fops = { |
| 867 | .open = memtype_seq_open, |
| 868 | .read = seq_read, |
| 869 | .llseek = seq_lseek, |
| 870 | .release = seq_release, |
| 871 | }; |
| 872 | |
| 873 | static int __init pat_memtype_list_init(void) |
| 874 | { |
Xiaotian Feng | dd4377b | 2009-11-26 19:53:48 +0800 | [diff] [blame] | 875 | if (pat_enabled) { |
| 876 | debugfs_create_file("pat_memtype_list", S_IRUSR, |
| 877 | arch_debugfs_dir, NULL, &memtype_fops); |
| 878 | } |
venkatesh.pallipadi@intel.com | fec0962 | 2008-07-18 16:08:14 -0700 | [diff] [blame] | 879 | return 0; |
| 880 | } |
| 881 | |
| 882 | late_initcall(pat_memtype_list_init); |
| 883 | |
Andreas Herrmann | 012f09e | 2008-08-06 16:23:08 +0200 | [diff] [blame] | 884 | #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */ |