Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 1 | /* |
Christian Borntraeger | 388186b | 2011-10-30 15:17:03 +0100 | [diff] [blame] | 2 | * Copyright IBM Corp. 2007,2011 |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
| 4 | */ |
| 5 | |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/errno.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 9 | #include <linux/gfp.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 10 | #include <linux/mm.h> |
| 11 | #include <linux/swap.h> |
| 12 | #include <linux/smp.h> |
| 13 | #include <linux/highmem.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 14 | #include <linux/pagemap.h> |
| 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/quicklist.h> |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 18 | #include <linux/rcupdate.h> |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 19 | #include <linux/slab.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 20 | |
| 21 | #include <asm/system.h> |
| 22 | #include <asm/pgtable.h> |
| 23 | #include <asm/pgalloc.h> |
| 24 | #include <asm/tlb.h> |
| 25 | #include <asm/tlbflush.h> |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 26 | #include <asm/mmu_context.h> |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 27 | |
| 28 | #ifndef CONFIG_64BIT |
| 29 | #define ALLOC_ORDER 1 |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 30 | #define FRAG_MASK 0x0f |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 31 | #else |
| 32 | #define ALLOC_ORDER 2 |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 33 | #define FRAG_MASK 0x03 |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 34 | #endif |
| 35 | |
Heiko Carstens | 239a6425 | 2009-06-12 10:26:33 +0200 | [diff] [blame] | 36 | unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; |
| 37 | EXPORT_SYMBOL(VMALLOC_START); |
| 38 | |
| 39 | static int __init parse_vmalloc(char *arg) |
| 40 | { |
| 41 | if (!arg) |
| 42 | return -EINVAL; |
| 43 | VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK; |
| 44 | return 0; |
| 45 | } |
| 46 | early_param("vmalloc", parse_vmalloc); |
| 47 | |
Martin Schwidefsky | 043d070 | 2011-05-23 10:24:23 +0200 | [diff] [blame] | 48 | unsigned long *crst_table_alloc(struct mm_struct *mm) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 49 | { |
| 50 | struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); |
| 51 | |
| 52 | if (!page) |
| 53 | return NULL; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 54 | return (unsigned long *) page_to_phys(page); |
| 55 | } |
| 56 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 57 | void crst_table_free(struct mm_struct *mm, unsigned long *table) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 58 | { |
Martin Schwidefsky | 043d070 | 2011-05-23 10:24:23 +0200 | [diff] [blame] | 59 | free_pages((unsigned long) table, ALLOC_ORDER); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 60 | } |
| 61 | |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 62 | #ifdef CONFIG_64BIT |
| 63 | int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) |
| 64 | { |
| 65 | unsigned long *table, *pgd; |
| 66 | unsigned long entry; |
| 67 | |
| 68 | BUG_ON(limit > (1UL << 53)); |
| 69 | repeat: |
Martin Schwidefsky | 043d070 | 2011-05-23 10:24:23 +0200 | [diff] [blame] | 70 | table = crst_table_alloc(mm); |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 71 | if (!table) |
| 72 | return -ENOMEM; |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 73 | spin_lock_bh(&mm->page_table_lock); |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 74 | if (mm->context.asce_limit < limit) { |
| 75 | pgd = (unsigned long *) mm->pgd; |
| 76 | if (mm->context.asce_limit <= (1UL << 31)) { |
| 77 | entry = _REGION3_ENTRY_EMPTY; |
| 78 | mm->context.asce_limit = 1UL << 42; |
| 79 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | |
| 80 | _ASCE_USER_BITS | |
| 81 | _ASCE_TYPE_REGION3; |
| 82 | } else { |
| 83 | entry = _REGION2_ENTRY_EMPTY; |
| 84 | mm->context.asce_limit = 1UL << 53; |
| 85 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | |
| 86 | _ASCE_USER_BITS | |
| 87 | _ASCE_TYPE_REGION2; |
| 88 | } |
| 89 | crst_table_init(table, entry); |
| 90 | pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); |
| 91 | mm->pgd = (pgd_t *) table; |
Martin Schwidefsky | f481bfa | 2009-03-18 13:27:36 +0100 | [diff] [blame] | 92 | mm->task_size = mm->context.asce_limit; |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 93 | table = NULL; |
| 94 | } |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 95 | spin_unlock_bh(&mm->page_table_lock); |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 96 | if (table) |
| 97 | crst_table_free(mm, table); |
| 98 | if (mm->context.asce_limit < limit) |
| 99 | goto repeat; |
| 100 | update_mm(mm, current); |
| 101 | return 0; |
| 102 | } |
| 103 | |
| 104 | void crst_table_downgrade(struct mm_struct *mm, unsigned long limit) |
| 105 | { |
| 106 | pgd_t *pgd; |
| 107 | |
| 108 | if (mm->context.asce_limit <= limit) |
| 109 | return; |
| 110 | __tlb_flush_mm(mm); |
| 111 | while (mm->context.asce_limit > limit) { |
| 112 | pgd = mm->pgd; |
| 113 | switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) { |
| 114 | case _REGION_ENTRY_TYPE_R2: |
| 115 | mm->context.asce_limit = 1UL << 42; |
| 116 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | |
| 117 | _ASCE_USER_BITS | |
| 118 | _ASCE_TYPE_REGION3; |
| 119 | break; |
| 120 | case _REGION_ENTRY_TYPE_R3: |
| 121 | mm->context.asce_limit = 1UL << 31; |
| 122 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | |
| 123 | _ASCE_USER_BITS | |
| 124 | _ASCE_TYPE_SEGMENT; |
| 125 | break; |
| 126 | default: |
| 127 | BUG(); |
| 128 | } |
| 129 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); |
Martin Schwidefsky | f481bfa | 2009-03-18 13:27:36 +0100 | [diff] [blame] | 130 | mm->task_size = mm->context.asce_limit; |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 131 | crst_table_free(mm, (unsigned long *) pgd); |
| 132 | } |
| 133 | update_mm(mm, current); |
| 134 | } |
| 135 | #endif |
| 136 | |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 137 | #ifdef CONFIG_PGSTE |
| 138 | |
| 139 | /** |
| 140 | * gmap_alloc - allocate a guest address space |
| 141 | * @mm: pointer to the parent mm_struct |
| 142 | * |
| 143 | * Returns a guest address space structure. |
| 144 | */ |
| 145 | struct gmap *gmap_alloc(struct mm_struct *mm) |
| 146 | { |
| 147 | struct gmap *gmap; |
| 148 | struct page *page; |
| 149 | unsigned long *table; |
| 150 | |
| 151 | gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); |
| 152 | if (!gmap) |
| 153 | goto out; |
| 154 | INIT_LIST_HEAD(&gmap->crst_list); |
| 155 | gmap->mm = mm; |
| 156 | page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); |
| 157 | if (!page) |
| 158 | goto out_free; |
| 159 | list_add(&page->lru, &gmap->crst_list); |
| 160 | table = (unsigned long *) page_to_phys(page); |
| 161 | crst_table_init(table, _REGION1_ENTRY_EMPTY); |
| 162 | gmap->table = table; |
Christian Borntraeger | 480e592 | 2011-09-20 17:07:28 +0200 | [diff] [blame] | 163 | gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | |
| 164 | _ASCE_USER_BITS | __pa(table); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 165 | list_add(&gmap->list, &mm->context.gmap_list); |
| 166 | return gmap; |
| 167 | |
| 168 | out_free: |
| 169 | kfree(gmap); |
| 170 | out: |
| 171 | return NULL; |
| 172 | } |
| 173 | EXPORT_SYMBOL_GPL(gmap_alloc); |
| 174 | |
| 175 | static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) |
| 176 | { |
| 177 | struct gmap_pgtable *mp; |
| 178 | struct gmap_rmap *rmap; |
| 179 | struct page *page; |
| 180 | |
| 181 | if (*table & _SEGMENT_ENTRY_INV) |
| 182 | return 0; |
| 183 | page = pfn_to_page(*table >> PAGE_SHIFT); |
| 184 | mp = (struct gmap_pgtable *) page->index; |
| 185 | list_for_each_entry(rmap, &mp->mapper, list) { |
| 186 | if (rmap->entry != table) |
| 187 | continue; |
| 188 | list_del(&rmap->list); |
| 189 | kfree(rmap); |
| 190 | break; |
| 191 | } |
| 192 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; |
| 193 | return 1; |
| 194 | } |
| 195 | |
| 196 | static void gmap_flush_tlb(struct gmap *gmap) |
| 197 | { |
| 198 | if (MACHINE_HAS_IDTE) |
| 199 | __tlb_flush_idte((unsigned long) gmap->table | |
| 200 | _ASCE_TYPE_REGION1); |
| 201 | else |
| 202 | __tlb_flush_global(); |
| 203 | } |
| 204 | |
| 205 | /** |
| 206 | * gmap_free - free a guest address space |
| 207 | * @gmap: pointer to the guest address space structure |
| 208 | */ |
| 209 | void gmap_free(struct gmap *gmap) |
| 210 | { |
| 211 | struct page *page, *next; |
| 212 | unsigned long *table; |
| 213 | int i; |
| 214 | |
| 215 | |
| 216 | /* Flush tlb. */ |
| 217 | if (MACHINE_HAS_IDTE) |
| 218 | __tlb_flush_idte((unsigned long) gmap->table | |
| 219 | _ASCE_TYPE_REGION1); |
| 220 | else |
| 221 | __tlb_flush_global(); |
| 222 | |
| 223 | /* Free all segment & region tables. */ |
| 224 | down_read(&gmap->mm->mmap_sem); |
Carsten Otte | cc77245 | 2011-10-30 15:17:01 +0100 | [diff] [blame] | 225 | spin_lock(&gmap->mm->page_table_lock); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 226 | list_for_each_entry_safe(page, next, &gmap->crst_list, lru) { |
| 227 | table = (unsigned long *) page_to_phys(page); |
| 228 | if ((*table & _REGION_ENTRY_TYPE_MASK) == 0) |
| 229 | /* Remove gmap rmap structures for segment table. */ |
| 230 | for (i = 0; i < PTRS_PER_PMD; i++, table++) |
| 231 | gmap_unlink_segment(gmap, table); |
| 232 | __free_pages(page, ALLOC_ORDER); |
| 233 | } |
Carsten Otte | cc77245 | 2011-10-30 15:17:01 +0100 | [diff] [blame] | 234 | spin_unlock(&gmap->mm->page_table_lock); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 235 | up_read(&gmap->mm->mmap_sem); |
| 236 | list_del(&gmap->list); |
| 237 | kfree(gmap); |
| 238 | } |
| 239 | EXPORT_SYMBOL_GPL(gmap_free); |
| 240 | |
| 241 | /** |
| 242 | * gmap_enable - switch primary space to the guest address space |
| 243 | * @gmap: pointer to the guest address space structure |
| 244 | */ |
| 245 | void gmap_enable(struct gmap *gmap) |
| 246 | { |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 247 | S390_lowcore.gmap = (unsigned long) gmap; |
| 248 | } |
| 249 | EXPORT_SYMBOL_GPL(gmap_enable); |
| 250 | |
| 251 | /** |
| 252 | * gmap_disable - switch back to the standard primary address space |
| 253 | * @gmap: pointer to the guest address space structure |
| 254 | */ |
| 255 | void gmap_disable(struct gmap *gmap) |
| 256 | { |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 257 | S390_lowcore.gmap = 0UL; |
| 258 | } |
| 259 | EXPORT_SYMBOL_GPL(gmap_disable); |
| 260 | |
Carsten Otte | a9162f2 | 2011-10-30 15:17:00 +0100 | [diff] [blame] | 261 | /* |
| 262 | * gmap_alloc_table is assumed to be called with mmap_sem held |
| 263 | */ |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 264 | static int gmap_alloc_table(struct gmap *gmap, |
| 265 | unsigned long *table, unsigned long init) |
| 266 | { |
| 267 | struct page *page; |
| 268 | unsigned long *new; |
| 269 | |
| 270 | page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); |
| 271 | if (!page) |
| 272 | return -ENOMEM; |
| 273 | new = (unsigned long *) page_to_phys(page); |
| 274 | crst_table_init(new, init); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 275 | if (*table & _REGION_ENTRY_INV) { |
| 276 | list_add(&page->lru, &gmap->crst_list); |
| 277 | *table = (unsigned long) new | _REGION_ENTRY_LENGTH | |
| 278 | (*table & _REGION_ENTRY_TYPE_MASK); |
| 279 | } else |
| 280 | __free_pages(page, ALLOC_ORDER); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 281 | return 0; |
| 282 | } |
| 283 | |
| 284 | /** |
| 285 | * gmap_unmap_segment - unmap segment from the guest address space |
| 286 | * @gmap: pointer to the guest address space structure |
| 287 | * @addr: address in the guest address space |
| 288 | * @len: length of the memory area to unmap |
| 289 | * |
| 290 | * Returns 0 if the unmap succeded, -EINVAL if not. |
| 291 | */ |
| 292 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) |
| 293 | { |
| 294 | unsigned long *table; |
| 295 | unsigned long off; |
| 296 | int flush; |
| 297 | |
| 298 | if ((to | len) & (PMD_SIZE - 1)) |
| 299 | return -EINVAL; |
| 300 | if (len == 0 || to + len < to) |
| 301 | return -EINVAL; |
| 302 | |
| 303 | flush = 0; |
| 304 | down_read(&gmap->mm->mmap_sem); |
Carsten Otte | cc77245 | 2011-10-30 15:17:01 +0100 | [diff] [blame] | 305 | spin_lock(&gmap->mm->page_table_lock); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 306 | for (off = 0; off < len; off += PMD_SIZE) { |
| 307 | /* Walk the guest addr space page table */ |
| 308 | table = gmap->table + (((to + off) >> 53) & 0x7ff); |
| 309 | if (*table & _REGION_ENTRY_INV) |
Carsten Otte | 05873df | 2011-09-26 16:40:34 +0200 | [diff] [blame] | 310 | goto out; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 311 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 312 | table = table + (((to + off) >> 42) & 0x7ff); |
| 313 | if (*table & _REGION_ENTRY_INV) |
Carsten Otte | 05873df | 2011-09-26 16:40:34 +0200 | [diff] [blame] | 314 | goto out; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 315 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 316 | table = table + (((to + off) >> 31) & 0x7ff); |
| 317 | if (*table & _REGION_ENTRY_INV) |
Carsten Otte | 05873df | 2011-09-26 16:40:34 +0200 | [diff] [blame] | 318 | goto out; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 319 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 320 | table = table + (((to + off) >> 20) & 0x7ff); |
| 321 | |
| 322 | /* Clear segment table entry in guest address space. */ |
| 323 | flush |= gmap_unlink_segment(gmap, table); |
| 324 | *table = _SEGMENT_ENTRY_INV; |
| 325 | } |
Carsten Otte | 05873df | 2011-09-26 16:40:34 +0200 | [diff] [blame] | 326 | out: |
Carsten Otte | cc77245 | 2011-10-30 15:17:01 +0100 | [diff] [blame] | 327 | spin_unlock(&gmap->mm->page_table_lock); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 328 | up_read(&gmap->mm->mmap_sem); |
| 329 | if (flush) |
| 330 | gmap_flush_tlb(gmap); |
| 331 | return 0; |
| 332 | } |
| 333 | EXPORT_SYMBOL_GPL(gmap_unmap_segment); |
| 334 | |
| 335 | /** |
| 336 | * gmap_mmap_segment - map a segment to the guest address space |
| 337 | * @gmap: pointer to the guest address space structure |
| 338 | * @from: source address in the parent address space |
| 339 | * @to: target address in the guest address space |
| 340 | * |
| 341 | * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. |
| 342 | */ |
| 343 | int gmap_map_segment(struct gmap *gmap, unsigned long from, |
| 344 | unsigned long to, unsigned long len) |
| 345 | { |
| 346 | unsigned long *table; |
| 347 | unsigned long off; |
| 348 | int flush; |
| 349 | |
| 350 | if ((from | to | len) & (PMD_SIZE - 1)) |
| 351 | return -EINVAL; |
| 352 | if (len == 0 || from + len > PGDIR_SIZE || |
| 353 | from + len < from || to + len < to) |
| 354 | return -EINVAL; |
| 355 | |
| 356 | flush = 0; |
| 357 | down_read(&gmap->mm->mmap_sem); |
Carsten Otte | cc77245 | 2011-10-30 15:17:01 +0100 | [diff] [blame] | 358 | spin_lock(&gmap->mm->page_table_lock); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 359 | for (off = 0; off < len; off += PMD_SIZE) { |
| 360 | /* Walk the gmap address space page table */ |
| 361 | table = gmap->table + (((to + off) >> 53) & 0x7ff); |
| 362 | if ((*table & _REGION_ENTRY_INV) && |
| 363 | gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) |
| 364 | goto out_unmap; |
| 365 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 366 | table = table + (((to + off) >> 42) & 0x7ff); |
| 367 | if ((*table & _REGION_ENTRY_INV) && |
| 368 | gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) |
| 369 | goto out_unmap; |
| 370 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 371 | table = table + (((to + off) >> 31) & 0x7ff); |
| 372 | if ((*table & _REGION_ENTRY_INV) && |
| 373 | gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) |
| 374 | goto out_unmap; |
| 375 | table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); |
| 376 | table = table + (((to + off) >> 20) & 0x7ff); |
| 377 | |
| 378 | /* Store 'from' address in an invalid segment table entry. */ |
| 379 | flush |= gmap_unlink_segment(gmap, table); |
| 380 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); |
| 381 | } |
Carsten Otte | cc77245 | 2011-10-30 15:17:01 +0100 | [diff] [blame] | 382 | spin_unlock(&gmap->mm->page_table_lock); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 383 | up_read(&gmap->mm->mmap_sem); |
| 384 | if (flush) |
| 385 | gmap_flush_tlb(gmap); |
| 386 | return 0; |
| 387 | |
| 388 | out_unmap: |
Carsten Otte | cc77245 | 2011-10-30 15:17:01 +0100 | [diff] [blame] | 389 | spin_unlock(&gmap->mm->page_table_lock); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 390 | up_read(&gmap->mm->mmap_sem); |
| 391 | gmap_unmap_segment(gmap, to, len); |
| 392 | return -ENOMEM; |
| 393 | } |
| 394 | EXPORT_SYMBOL_GPL(gmap_map_segment); |
| 395 | |
Carsten Otte | 499069e | 2011-10-30 15:17:02 +0100 | [diff] [blame] | 396 | /* |
| 397 | * this function is assumed to be called with mmap_sem held |
| 398 | */ |
| 399 | unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 400 | { |
| 401 | unsigned long *table, vmaddr, segment; |
| 402 | struct mm_struct *mm; |
| 403 | struct gmap_pgtable *mp; |
| 404 | struct gmap_rmap *rmap; |
| 405 | struct vm_area_struct *vma; |
| 406 | struct page *page; |
| 407 | pgd_t *pgd; |
| 408 | pud_t *pud; |
| 409 | pmd_t *pmd; |
| 410 | |
| 411 | current->thread.gmap_addr = address; |
| 412 | mm = gmap->mm; |
| 413 | /* Walk the gmap address space page table */ |
| 414 | table = gmap->table + ((address >> 53) & 0x7ff); |
| 415 | if (unlikely(*table & _REGION_ENTRY_INV)) |
| 416 | return -EFAULT; |
| 417 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 418 | table = table + ((address >> 42) & 0x7ff); |
| 419 | if (unlikely(*table & _REGION_ENTRY_INV)) |
| 420 | return -EFAULT; |
| 421 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 422 | table = table + ((address >> 31) & 0x7ff); |
| 423 | if (unlikely(*table & _REGION_ENTRY_INV)) |
| 424 | return -EFAULT; |
| 425 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 426 | table = table + ((address >> 20) & 0x7ff); |
| 427 | |
| 428 | /* Convert the gmap address to an mm address. */ |
| 429 | segment = *table; |
| 430 | if (likely(!(segment & _SEGMENT_ENTRY_INV))) { |
| 431 | page = pfn_to_page(segment >> PAGE_SHIFT); |
| 432 | mp = (struct gmap_pgtable *) page->index; |
| 433 | return mp->vmaddr | (address & ~PMD_MASK); |
| 434 | } else if (segment & _SEGMENT_ENTRY_RO) { |
| 435 | vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; |
| 436 | vma = find_vma(mm, vmaddr); |
| 437 | if (!vma || vma->vm_start > vmaddr) |
| 438 | return -EFAULT; |
| 439 | |
| 440 | /* Walk the parent mm page table */ |
| 441 | pgd = pgd_offset(mm, vmaddr); |
| 442 | pud = pud_alloc(mm, pgd, vmaddr); |
| 443 | if (!pud) |
| 444 | return -ENOMEM; |
| 445 | pmd = pmd_alloc(mm, pud, vmaddr); |
| 446 | if (!pmd) |
| 447 | return -ENOMEM; |
| 448 | if (!pmd_present(*pmd) && |
| 449 | __pte_alloc(mm, vma, pmd, vmaddr)) |
| 450 | return -ENOMEM; |
| 451 | /* pmd now points to a valid segment table entry. */ |
| 452 | rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT); |
| 453 | if (!rmap) |
| 454 | return -ENOMEM; |
| 455 | /* Link gmap segment table entry location to page table. */ |
| 456 | page = pmd_page(*pmd); |
| 457 | mp = (struct gmap_pgtable *) page->index; |
| 458 | rmap->entry = table; |
Carsten Otte | cc77245 | 2011-10-30 15:17:01 +0100 | [diff] [blame] | 459 | spin_lock(&mm->page_table_lock); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 460 | list_add(&rmap->list, &mp->mapper); |
Carsten Otte | cc77245 | 2011-10-30 15:17:01 +0100 | [diff] [blame] | 461 | spin_unlock(&mm->page_table_lock); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 462 | /* Set gmap segment table entry to page table. */ |
| 463 | *table = pmd_val(*pmd) & PAGE_MASK; |
| 464 | return vmaddr | (address & ~PMD_MASK); |
| 465 | } |
| 466 | return -EFAULT; |
Carsten Otte | 499069e | 2011-10-30 15:17:02 +0100 | [diff] [blame] | 467 | } |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 468 | |
Carsten Otte | 499069e | 2011-10-30 15:17:02 +0100 | [diff] [blame] | 469 | unsigned long gmap_fault(unsigned long address, struct gmap *gmap) |
| 470 | { |
| 471 | unsigned long rc; |
| 472 | |
| 473 | down_read(&gmap->mm->mmap_sem); |
| 474 | rc = __gmap_fault(address, gmap); |
| 475 | up_read(&gmap->mm->mmap_sem); |
| 476 | |
| 477 | return rc; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 478 | } |
| 479 | EXPORT_SYMBOL_GPL(gmap_fault); |
| 480 | |
Christian Borntraeger | 388186b | 2011-10-30 15:17:03 +0100 | [diff] [blame] | 481 | void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) |
| 482 | { |
| 483 | |
| 484 | unsigned long *table, address, size; |
| 485 | struct vm_area_struct *vma; |
| 486 | struct gmap_pgtable *mp; |
| 487 | struct page *page; |
| 488 | |
| 489 | down_read(&gmap->mm->mmap_sem); |
| 490 | address = from; |
| 491 | while (address < to) { |
| 492 | /* Walk the gmap address space page table */ |
| 493 | table = gmap->table + ((address >> 53) & 0x7ff); |
| 494 | if (unlikely(*table & _REGION_ENTRY_INV)) { |
| 495 | address = (address + PMD_SIZE) & PMD_MASK; |
| 496 | continue; |
| 497 | } |
| 498 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 499 | table = table + ((address >> 42) & 0x7ff); |
| 500 | if (unlikely(*table & _REGION_ENTRY_INV)) { |
| 501 | address = (address + PMD_SIZE) & PMD_MASK; |
| 502 | continue; |
| 503 | } |
| 504 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 505 | table = table + ((address >> 31) & 0x7ff); |
| 506 | if (unlikely(*table & _REGION_ENTRY_INV)) { |
| 507 | address = (address + PMD_SIZE) & PMD_MASK; |
| 508 | continue; |
| 509 | } |
| 510 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 511 | table = table + ((address >> 20) & 0x7ff); |
| 512 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) { |
| 513 | address = (address + PMD_SIZE) & PMD_MASK; |
| 514 | continue; |
| 515 | } |
| 516 | page = pfn_to_page(*table >> PAGE_SHIFT); |
| 517 | mp = (struct gmap_pgtable *) page->index; |
| 518 | vma = find_vma(gmap->mm, mp->vmaddr); |
| 519 | size = min(to - address, PMD_SIZE - (address & ~PMD_MASK)); |
| 520 | zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK), |
| 521 | size, NULL); |
| 522 | address = (address + PMD_SIZE) & PMD_MASK; |
| 523 | } |
| 524 | up_read(&gmap->mm->mmap_sem); |
| 525 | } |
| 526 | EXPORT_SYMBOL_GPL(gmap_discard); |
| 527 | |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 528 | void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table) |
| 529 | { |
| 530 | struct gmap_rmap *rmap, *next; |
| 531 | struct gmap_pgtable *mp; |
| 532 | struct page *page; |
| 533 | int flush; |
| 534 | |
| 535 | flush = 0; |
| 536 | spin_lock(&mm->page_table_lock); |
| 537 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
| 538 | mp = (struct gmap_pgtable *) page->index; |
| 539 | list_for_each_entry_safe(rmap, next, &mp->mapper, list) { |
| 540 | *rmap->entry = |
| 541 | _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; |
| 542 | list_del(&rmap->list); |
| 543 | kfree(rmap); |
| 544 | flush = 1; |
| 545 | } |
| 546 | spin_unlock(&mm->page_table_lock); |
| 547 | if (flush) |
| 548 | __tlb_flush_global(); |
| 549 | } |
| 550 | |
| 551 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, |
| 552 | unsigned long vmaddr) |
| 553 | { |
| 554 | struct page *page; |
| 555 | unsigned long *table; |
| 556 | struct gmap_pgtable *mp; |
| 557 | |
| 558 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
| 559 | if (!page) |
| 560 | return NULL; |
| 561 | mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT); |
| 562 | if (!mp) { |
| 563 | __free_page(page); |
| 564 | return NULL; |
| 565 | } |
| 566 | pgtable_page_ctor(page); |
| 567 | mp->vmaddr = vmaddr & PMD_MASK; |
| 568 | INIT_LIST_HEAD(&mp->mapper); |
| 569 | page->index = (unsigned long) mp; |
| 570 | atomic_set(&page->_mapcount, 3); |
| 571 | table = (unsigned long *) page_to_phys(page); |
| 572 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); |
| 573 | clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); |
| 574 | return table; |
| 575 | } |
| 576 | |
| 577 | static inline void page_table_free_pgste(unsigned long *table) |
| 578 | { |
| 579 | struct page *page; |
| 580 | struct gmap_pgtable *mp; |
| 581 | |
| 582 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
| 583 | mp = (struct gmap_pgtable *) page->index; |
| 584 | BUG_ON(!list_empty(&mp->mapper)); |
| 585 | pgtable_page_ctor(page); |
| 586 | atomic_set(&page->_mapcount, -1); |
| 587 | kfree(mp); |
| 588 | __free_page(page); |
| 589 | } |
| 590 | |
| 591 | #else /* CONFIG_PGSTE */ |
| 592 | |
| 593 | static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, |
| 594 | unsigned long vmaddr) |
| 595 | { |
Jan Glauber | 944291d | 2011-08-03 16:44:18 +0200 | [diff] [blame] | 596 | return NULL; |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 597 | } |
| 598 | |
| 599 | static inline void page_table_free_pgste(unsigned long *table) |
| 600 | { |
| 601 | } |
| 602 | |
| 603 | static inline void gmap_unmap_notifier(struct mm_struct *mm, |
| 604 | unsigned long *table) |
| 605 | { |
| 606 | } |
| 607 | |
| 608 | #endif /* CONFIG_PGSTE */ |
| 609 | |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 610 | static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) |
| 611 | { |
| 612 | unsigned int old, new; |
| 613 | |
| 614 | do { |
| 615 | old = atomic_read(v); |
| 616 | new = old ^ bits; |
| 617 | } while (atomic_cmpxchg(v, old, new) != old); |
| 618 | return new; |
| 619 | } |
| 620 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 621 | /* |
| 622 | * page table entry allocation/free routines. |
| 623 | */ |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 624 | unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 625 | { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 626 | struct page *page; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 627 | unsigned long *table; |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 628 | unsigned int mask, bit; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 629 | |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 630 | if (mm_has_pgste(mm)) |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 631 | return page_table_alloc_pgste(mm, vmaddr); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 632 | /* Allocate fragments of a 4K page as 1K/2K page table */ |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 633 | spin_lock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 634 | mask = FRAG_MASK; |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 635 | if (!list_empty(&mm->context.pgtable_list)) { |
| 636 | page = list_first_entry(&mm->context.pgtable_list, |
| 637 | struct page, lru); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 638 | table = (unsigned long *) page_to_phys(page); |
| 639 | mask = atomic_read(&page->_mapcount); |
| 640 | mask = mask | (mask >> 4); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 641 | } |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 642 | if ((mask & FRAG_MASK) == FRAG_MASK) { |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 643 | spin_unlock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 644 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); |
| 645 | if (!page) |
| 646 | return NULL; |
| 647 | pgtable_page_ctor(page); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 648 | atomic_set(&page->_mapcount, 1); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 649 | table = (unsigned long *) page_to_phys(page); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 650 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 651 | spin_lock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 652 | list_add(&page->lru, &mm->context.pgtable_list); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 653 | } else { |
| 654 | for (bit = 1; mask & bit; bit <<= 1) |
| 655 | table += PTRS_PER_PTE; |
| 656 | mask = atomic_xor_bits(&page->_mapcount, bit); |
| 657 | if ((mask & FRAG_MASK) == FRAG_MASK) |
| 658 | list_del(&page->lru); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 659 | } |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 660 | spin_unlock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 661 | return table; |
| 662 | } |
| 663 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 664 | void page_table_free(struct mm_struct *mm, unsigned long *table) |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 665 | { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 666 | struct page *page; |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 667 | unsigned int bit, mask; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 668 | |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 669 | if (mm_has_pgste(mm)) { |
| 670 | gmap_unmap_notifier(mm, table); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 671 | return page_table_free_pgste(table); |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 672 | } |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 673 | /* Free 1K/2K page table fragment of a 4K page */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 674 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 675 | bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 676 | spin_lock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 677 | if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 678 | list_del(&page->lru); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 679 | mask = atomic_xor_bits(&page->_mapcount, bit); |
| 680 | if (mask & FRAG_MASK) |
| 681 | list_add(&page->lru, &mm->context.pgtable_list); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 682 | spin_unlock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 683 | if (mask == 0) { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 684 | pgtable_page_dtor(page); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 685 | atomic_set(&page->_mapcount, -1); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 686 | __free_page(page); |
| 687 | } |
| 688 | } |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 689 | |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 690 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 691 | |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 692 | static void __page_table_free_rcu(void *table, unsigned bit) |
| 693 | { |
| 694 | struct page *page; |
| 695 | |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 696 | if (bit == FRAG_MASK) |
| 697 | return page_table_free_pgste(table); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 698 | /* Free 1K/2K page table fragment of a 4K page */ |
| 699 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
| 700 | if (atomic_xor_bits(&page->_mapcount, bit) == 0) { |
| 701 | pgtable_page_dtor(page); |
| 702 | atomic_set(&page->_mapcount, -1); |
| 703 | __free_page(page); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 704 | } |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 705 | } |
| 706 | |
| 707 | void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table) |
| 708 | { |
| 709 | struct mm_struct *mm; |
| 710 | struct page *page; |
| 711 | unsigned int bit, mask; |
| 712 | |
| 713 | mm = tlb->mm; |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 714 | if (mm_has_pgste(mm)) { |
Martin Schwidefsky | e5992f2 | 2011-07-24 10:48:20 +0200 | [diff] [blame] | 715 | gmap_unmap_notifier(mm, table); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 716 | table = (unsigned long *) (__pa(table) | FRAG_MASK); |
| 717 | tlb_remove_table(tlb, table); |
| 718 | return; |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 719 | } |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 720 | bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 721 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
| 722 | spin_lock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 723 | if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) |
| 724 | list_del(&page->lru); |
| 725 | mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4)); |
| 726 | if (mask & FRAG_MASK) |
| 727 | list_add_tail(&page->lru, &mm->context.pgtable_list); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 728 | spin_unlock_bh(&mm->context.list_lock); |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 729 | table = (unsigned long *) (__pa(table) | (bit << 4)); |
| 730 | tlb_remove_table(tlb, table); |
Martin Schwidefsky | 8021714 | 2010-10-25 16:10:11 +0200 | [diff] [blame] | 731 | } |
| 732 | |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 733 | void __tlb_remove_table(void *_table) |
| 734 | { |
Martin Schwidefsky | e73b7ff | 2011-10-30 15:16:08 +0100 | [diff] [blame] | 735 | const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK; |
| 736 | void *table = (void *)((unsigned long) _table & ~mask); |
| 737 | unsigned type = (unsigned long) _table & mask; |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 738 | |
| 739 | if (type) |
| 740 | __page_table_free_rcu(table, type); |
| 741 | else |
| 742 | free_pages((unsigned long) table, ALLOC_ORDER); |
| 743 | } |
| 744 | |
| 745 | #endif |
| 746 | |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 747 | /* |
| 748 | * switch on pgstes for its userspace process (for kvm) |
| 749 | */ |
| 750 | int s390_enable_sie(void) |
| 751 | { |
| 752 | struct task_struct *tsk = current; |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 753 | struct mm_struct *mm, *old_mm; |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 754 | |
Carsten Otte | 702d9e5 | 2009-03-26 15:23:57 +0100 | [diff] [blame] | 755 | /* Do we have switched amode? If no, we cannot do sie */ |
Martin Schwidefsky | b11b533 | 2009-12-07 12:51:43 +0100 | [diff] [blame] | 756 | if (user_mode == HOME_SPACE_MODE) |
Carsten Otte | 702d9e5 | 2009-03-26 15:23:57 +0100 | [diff] [blame] | 757 | return -EINVAL; |
| 758 | |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 759 | /* Do we have pgstes? if yes, we are done */ |
Martin Schwidefsky | 36409f6 | 2011-06-06 14:14:41 +0200 | [diff] [blame] | 760 | if (mm_has_pgste(tsk->mm)) |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 761 | return 0; |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 762 | |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 763 | /* lets check if we are allowed to replace the mm */ |
| 764 | task_lock(tsk); |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 765 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || |
Martin Schwidefsky | 52a21f2 | 2009-10-06 10:33:55 +0200 | [diff] [blame] | 766 | #ifdef CONFIG_AIO |
| 767 | !hlist_empty(&tsk->mm->ioctx_list) || |
| 768 | #endif |
| 769 | tsk->mm != tsk->active_mm) { |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 770 | task_unlock(tsk); |
| 771 | return -EINVAL; |
| 772 | } |
| 773 | task_unlock(tsk); |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 774 | |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 775 | /* we copy the mm and let dup_mm create the page tables with_pgstes */ |
| 776 | tsk->mm->context.alloc_pgste = 1; |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 777 | mm = dup_mm(tsk); |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 778 | tsk->mm->context.alloc_pgste = 0; |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 779 | if (!mm) |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 780 | return -ENOMEM; |
| 781 | |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 782 | /* Now lets check again if something happened */ |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 783 | task_lock(tsk); |
| 784 | if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || |
Martin Schwidefsky | 52a21f2 | 2009-10-06 10:33:55 +0200 | [diff] [blame] | 785 | #ifdef CONFIG_AIO |
| 786 | !hlist_empty(&tsk->mm->ioctx_list) || |
| 787 | #endif |
| 788 | tsk->mm != tsk->active_mm) { |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 789 | mmput(mm); |
| 790 | task_unlock(tsk); |
| 791 | return -EINVAL; |
| 792 | } |
| 793 | |
| 794 | /* ok, we are alone. No ptrace, no threads, etc. */ |
| 795 | old_mm = tsk->mm; |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 796 | tsk->mm = tsk->active_mm = mm; |
| 797 | preempt_disable(); |
| 798 | update_mm(mm, tsk); |
Christian Borntraeger | e05ef9b | 2010-10-25 16:10:45 +0200 | [diff] [blame] | 799 | atomic_inc(&mm->context.attach_count); |
| 800 | atomic_dec(&old_mm->context.attach_count); |
Rusty Russell | 005f8ee | 2009-03-26 15:25:01 +0100 | [diff] [blame] | 801 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 802 | preempt_enable(); |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 803 | task_unlock(tsk); |
Christian Borntraeger | 74b6b52 | 2008-05-21 13:37:29 +0200 | [diff] [blame] | 804 | mmput(old_mm); |
| 805 | return 0; |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 806 | } |
| 807 | EXPORT_SYMBOL_GPL(s390_enable_sie); |
Hans-Joachim Picht | 7db11a3 | 2009-06-16 10:30:26 +0200 | [diff] [blame] | 808 | |
Heiko Carstens | 87458ff | 2009-09-22 22:58:46 +0200 | [diff] [blame] | 809 | #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION) |
Hans-Joachim Picht | 7db11a3 | 2009-06-16 10:30:26 +0200 | [diff] [blame] | 810 | bool kernel_page_present(struct page *page) |
| 811 | { |
| 812 | unsigned long addr; |
| 813 | int cc; |
| 814 | |
| 815 | addr = page_to_phys(page); |
Heiko Carstens | 87458ff | 2009-09-22 22:58:46 +0200 | [diff] [blame] | 816 | asm volatile( |
| 817 | " lra %1,0(%1)\n" |
| 818 | " ipm %0\n" |
| 819 | " srl %0,28" |
| 820 | : "=d" (cc), "+a" (addr) : : "cc"); |
Hans-Joachim Picht | 7db11a3 | 2009-06-16 10:30:26 +0200 | [diff] [blame] | 821 | return cc == 0; |
| 822 | } |
Heiko Carstens | 87458ff | 2009-09-22 22:58:46 +0200 | [diff] [blame] | 823 | #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */ |