Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 1 | /* |
| 2 | * KVM guest address space mapping code |
| 3 | * |
| 4 | * Copyright IBM Corp. 2007, 2016 |
| 5 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
| 6 | */ |
| 7 | |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/swap.h> |
| 11 | #include <linux/smp.h> |
| 12 | #include <linux/spinlock.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/swapops.h> |
| 15 | #include <linux/ksm.h> |
| 16 | #include <linux/mman.h> |
| 17 | |
| 18 | #include <asm/pgtable.h> |
| 19 | #include <asm/pgalloc.h> |
| 20 | #include <asm/gmap.h> |
| 21 | #include <asm/tlb.h> |
| 22 | |
| 23 | /** |
| 24 | * gmap_alloc - allocate a guest address space |
| 25 | * @mm: pointer to the parent mm_struct |
Christian Borntraeger | 9c650d0 | 2016-04-04 09:41:32 +0200 | [diff] [blame] | 26 | * @limit: maximum address of the gmap address space |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 27 | * |
| 28 | * Returns a guest address space structure. |
| 29 | */ |
| 30 | struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) |
| 31 | { |
| 32 | struct gmap *gmap; |
| 33 | struct page *page; |
| 34 | unsigned long *table; |
| 35 | unsigned long etype, atype; |
| 36 | |
| 37 | if (limit < (1UL << 31)) { |
| 38 | limit = (1UL << 31) - 1; |
| 39 | atype = _ASCE_TYPE_SEGMENT; |
| 40 | etype = _SEGMENT_ENTRY_EMPTY; |
| 41 | } else if (limit < (1UL << 42)) { |
| 42 | limit = (1UL << 42) - 1; |
| 43 | atype = _ASCE_TYPE_REGION3; |
| 44 | etype = _REGION3_ENTRY_EMPTY; |
| 45 | } else if (limit < (1UL << 53)) { |
| 46 | limit = (1UL << 53) - 1; |
| 47 | atype = _ASCE_TYPE_REGION2; |
| 48 | etype = _REGION2_ENTRY_EMPTY; |
| 49 | } else { |
| 50 | limit = -1UL; |
| 51 | atype = _ASCE_TYPE_REGION1; |
| 52 | etype = _REGION1_ENTRY_EMPTY; |
| 53 | } |
| 54 | gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL); |
| 55 | if (!gmap) |
| 56 | goto out; |
| 57 | INIT_LIST_HEAD(&gmap->crst_list); |
| 58 | INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL); |
| 59 | INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC); |
| 60 | spin_lock_init(&gmap->guest_table_lock); |
| 61 | gmap->mm = mm; |
| 62 | page = alloc_pages(GFP_KERNEL, 2); |
| 63 | if (!page) |
| 64 | goto out_free; |
| 65 | page->index = 0; |
| 66 | list_add(&page->lru, &gmap->crst_list); |
| 67 | table = (unsigned long *) page_to_phys(page); |
| 68 | crst_table_init(table, etype); |
| 69 | gmap->table = table; |
| 70 | gmap->asce = atype | _ASCE_TABLE_LENGTH | |
| 71 | _ASCE_USER_BITS | __pa(table); |
| 72 | gmap->asce_end = limit; |
Martin Schwidefsky | 8ecb1a5 | 2016-03-08 11:54:14 +0100 | [diff] [blame^] | 73 | spin_lock(&mm->context.gmap_lock); |
| 74 | list_add_rcu(&gmap->list, &mm->context.gmap_list); |
| 75 | spin_unlock(&mm->context.gmap_lock); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 76 | return gmap; |
| 77 | |
| 78 | out_free: |
| 79 | kfree(gmap); |
| 80 | out: |
| 81 | return NULL; |
| 82 | } |
| 83 | EXPORT_SYMBOL_GPL(gmap_alloc); |
| 84 | |
| 85 | static void gmap_flush_tlb(struct gmap *gmap) |
| 86 | { |
| 87 | if (MACHINE_HAS_IDTE) |
| 88 | __tlb_flush_asce(gmap->mm, gmap->asce); |
| 89 | else |
| 90 | __tlb_flush_global(); |
| 91 | } |
| 92 | |
| 93 | static void gmap_radix_tree_free(struct radix_tree_root *root) |
| 94 | { |
| 95 | struct radix_tree_iter iter; |
| 96 | unsigned long indices[16]; |
| 97 | unsigned long index; |
| 98 | void **slot; |
| 99 | int i, nr; |
| 100 | |
| 101 | /* A radix tree is freed by deleting all of its entries */ |
| 102 | index = 0; |
| 103 | do { |
| 104 | nr = 0; |
| 105 | radix_tree_for_each_slot(slot, root, &iter, index) { |
| 106 | indices[nr] = iter.index; |
| 107 | if (++nr == 16) |
| 108 | break; |
| 109 | } |
| 110 | for (i = 0; i < nr; i++) { |
| 111 | index = indices[i]; |
| 112 | radix_tree_delete(root, index); |
| 113 | } |
| 114 | } while (nr > 0); |
| 115 | } |
| 116 | |
| 117 | /** |
| 118 | * gmap_free - free a guest address space |
| 119 | * @gmap: pointer to the guest address space structure |
| 120 | */ |
| 121 | void gmap_free(struct gmap *gmap) |
| 122 | { |
| 123 | struct page *page, *next; |
| 124 | |
| 125 | /* Flush tlb. */ |
| 126 | if (MACHINE_HAS_IDTE) |
| 127 | __tlb_flush_asce(gmap->mm, gmap->asce); |
| 128 | else |
| 129 | __tlb_flush_global(); |
| 130 | |
Martin Schwidefsky | 8ecb1a5 | 2016-03-08 11:54:14 +0100 | [diff] [blame^] | 131 | spin_lock(&gmap->mm->context.gmap_lock); |
| 132 | list_del_rcu(&gmap->list); |
| 133 | spin_unlock(&gmap->mm->context.gmap_lock); |
| 134 | synchronize_rcu(); |
| 135 | |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 136 | /* Free all segment & region tables. */ |
| 137 | list_for_each_entry_safe(page, next, &gmap->crst_list, lru) |
| 138 | __free_pages(page, 2); |
| 139 | gmap_radix_tree_free(&gmap->guest_to_host); |
| 140 | gmap_radix_tree_free(&gmap->host_to_guest); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 141 | kfree(gmap); |
| 142 | } |
| 143 | EXPORT_SYMBOL_GPL(gmap_free); |
| 144 | |
| 145 | /** |
| 146 | * gmap_enable - switch primary space to the guest address space |
| 147 | * @gmap: pointer to the guest address space structure |
| 148 | */ |
| 149 | void gmap_enable(struct gmap *gmap) |
| 150 | { |
| 151 | S390_lowcore.gmap = (unsigned long) gmap; |
| 152 | } |
| 153 | EXPORT_SYMBOL_GPL(gmap_enable); |
| 154 | |
| 155 | /** |
| 156 | * gmap_disable - switch back to the standard primary address space |
| 157 | * @gmap: pointer to the guest address space structure |
| 158 | */ |
| 159 | void gmap_disable(struct gmap *gmap) |
| 160 | { |
| 161 | S390_lowcore.gmap = 0UL; |
| 162 | } |
| 163 | EXPORT_SYMBOL_GPL(gmap_disable); |
| 164 | |
| 165 | /* |
| 166 | * gmap_alloc_table is assumed to be called with mmap_sem held |
| 167 | */ |
| 168 | static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, |
| 169 | unsigned long init, unsigned long gaddr) |
| 170 | { |
| 171 | struct page *page; |
| 172 | unsigned long *new; |
| 173 | |
| 174 | /* since we dont free the gmap table until gmap_free we can unlock */ |
| 175 | page = alloc_pages(GFP_KERNEL, 2); |
| 176 | if (!page) |
| 177 | return -ENOMEM; |
| 178 | new = (unsigned long *) page_to_phys(page); |
| 179 | crst_table_init(new, init); |
| 180 | spin_lock(&gmap->mm->page_table_lock); |
| 181 | if (*table & _REGION_ENTRY_INVALID) { |
| 182 | list_add(&page->lru, &gmap->crst_list); |
| 183 | *table = (unsigned long) new | _REGION_ENTRY_LENGTH | |
| 184 | (*table & _REGION_ENTRY_TYPE_MASK); |
| 185 | page->index = gaddr; |
| 186 | page = NULL; |
| 187 | } |
| 188 | spin_unlock(&gmap->mm->page_table_lock); |
| 189 | if (page) |
| 190 | __free_pages(page, 2); |
| 191 | return 0; |
| 192 | } |
| 193 | |
| 194 | /** |
| 195 | * __gmap_segment_gaddr - find virtual address from segment pointer |
| 196 | * @entry: pointer to a segment table entry in the guest address space |
| 197 | * |
| 198 | * Returns the virtual address in the guest address space for the segment |
| 199 | */ |
| 200 | static unsigned long __gmap_segment_gaddr(unsigned long *entry) |
| 201 | { |
| 202 | struct page *page; |
| 203 | unsigned long offset, mask; |
| 204 | |
| 205 | offset = (unsigned long) entry / sizeof(unsigned long); |
| 206 | offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE; |
| 207 | mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); |
| 208 | page = virt_to_page((void *)((unsigned long) entry & mask)); |
| 209 | return page->index + offset; |
| 210 | } |
| 211 | |
| 212 | /** |
| 213 | * __gmap_unlink_by_vmaddr - unlink a single segment via a host address |
| 214 | * @gmap: pointer to the guest address space structure |
| 215 | * @vmaddr: address in the host process address space |
| 216 | * |
| 217 | * Returns 1 if a TLB flush is required |
| 218 | */ |
| 219 | static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr) |
| 220 | { |
| 221 | unsigned long *entry; |
| 222 | int flush = 0; |
| 223 | |
| 224 | spin_lock(&gmap->guest_table_lock); |
| 225 | entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT); |
| 226 | if (entry) { |
| 227 | flush = (*entry != _SEGMENT_ENTRY_INVALID); |
| 228 | *entry = _SEGMENT_ENTRY_INVALID; |
| 229 | } |
| 230 | spin_unlock(&gmap->guest_table_lock); |
| 231 | return flush; |
| 232 | } |
| 233 | |
| 234 | /** |
| 235 | * __gmap_unmap_by_gaddr - unmap a single segment via a guest address |
| 236 | * @gmap: pointer to the guest address space structure |
| 237 | * @gaddr: address in the guest address space |
| 238 | * |
| 239 | * Returns 1 if a TLB flush is required |
| 240 | */ |
| 241 | static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr) |
| 242 | { |
| 243 | unsigned long vmaddr; |
| 244 | |
| 245 | vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host, |
| 246 | gaddr >> PMD_SHIFT); |
| 247 | return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0; |
| 248 | } |
| 249 | |
| 250 | /** |
| 251 | * gmap_unmap_segment - unmap segment from the guest address space |
| 252 | * @gmap: pointer to the guest address space structure |
| 253 | * @to: address in the guest address space |
| 254 | * @len: length of the memory area to unmap |
| 255 | * |
| 256 | * Returns 0 if the unmap succeeded, -EINVAL if not. |
| 257 | */ |
| 258 | int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) |
| 259 | { |
| 260 | unsigned long off; |
| 261 | int flush; |
| 262 | |
| 263 | if ((to | len) & (PMD_SIZE - 1)) |
| 264 | return -EINVAL; |
| 265 | if (len == 0 || to + len < to) |
| 266 | return -EINVAL; |
| 267 | |
| 268 | flush = 0; |
| 269 | down_write(&gmap->mm->mmap_sem); |
| 270 | for (off = 0; off < len; off += PMD_SIZE) |
| 271 | flush |= __gmap_unmap_by_gaddr(gmap, to + off); |
| 272 | up_write(&gmap->mm->mmap_sem); |
| 273 | if (flush) |
| 274 | gmap_flush_tlb(gmap); |
| 275 | return 0; |
| 276 | } |
| 277 | EXPORT_SYMBOL_GPL(gmap_unmap_segment); |
| 278 | |
| 279 | /** |
| 280 | * gmap_map_segment - map a segment to the guest address space |
| 281 | * @gmap: pointer to the guest address space structure |
| 282 | * @from: source address in the parent address space |
| 283 | * @to: target address in the guest address space |
| 284 | * @len: length of the memory area to map |
| 285 | * |
| 286 | * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not. |
| 287 | */ |
| 288 | int gmap_map_segment(struct gmap *gmap, unsigned long from, |
| 289 | unsigned long to, unsigned long len) |
| 290 | { |
| 291 | unsigned long off; |
| 292 | int flush; |
| 293 | |
| 294 | if ((from | to | len) & (PMD_SIZE - 1)) |
| 295 | return -EINVAL; |
| 296 | if (len == 0 || from + len < from || to + len < to || |
Christian Borntraeger | 9c650d0 | 2016-04-04 09:41:32 +0200 | [diff] [blame] | 297 | from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end) |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 298 | return -EINVAL; |
| 299 | |
| 300 | flush = 0; |
| 301 | down_write(&gmap->mm->mmap_sem); |
| 302 | for (off = 0; off < len; off += PMD_SIZE) { |
| 303 | /* Remove old translation */ |
| 304 | flush |= __gmap_unmap_by_gaddr(gmap, to + off); |
| 305 | /* Store new translation */ |
| 306 | if (radix_tree_insert(&gmap->guest_to_host, |
| 307 | (to + off) >> PMD_SHIFT, |
| 308 | (void *) from + off)) |
| 309 | break; |
| 310 | } |
| 311 | up_write(&gmap->mm->mmap_sem); |
| 312 | if (flush) |
| 313 | gmap_flush_tlb(gmap); |
| 314 | if (off >= len) |
| 315 | return 0; |
| 316 | gmap_unmap_segment(gmap, to, len); |
| 317 | return -ENOMEM; |
| 318 | } |
| 319 | EXPORT_SYMBOL_GPL(gmap_map_segment); |
| 320 | |
| 321 | /** |
| 322 | * __gmap_translate - translate a guest address to a user space address |
| 323 | * @gmap: pointer to guest mapping meta data structure |
| 324 | * @gaddr: guest address |
| 325 | * |
| 326 | * Returns user space address which corresponds to the guest address or |
| 327 | * -EFAULT if no such mapping exists. |
| 328 | * This function does not establish potentially missing page table entries. |
| 329 | * The mmap_sem of the mm that belongs to the address space must be held |
| 330 | * when this function gets called. |
| 331 | */ |
| 332 | unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr) |
| 333 | { |
| 334 | unsigned long vmaddr; |
| 335 | |
| 336 | vmaddr = (unsigned long) |
| 337 | radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT); |
| 338 | return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT; |
| 339 | } |
| 340 | EXPORT_SYMBOL_GPL(__gmap_translate); |
| 341 | |
| 342 | /** |
| 343 | * gmap_translate - translate a guest address to a user space address |
| 344 | * @gmap: pointer to guest mapping meta data structure |
| 345 | * @gaddr: guest address |
| 346 | * |
| 347 | * Returns user space address which corresponds to the guest address or |
| 348 | * -EFAULT if no such mapping exists. |
| 349 | * This function does not establish potentially missing page table entries. |
| 350 | */ |
| 351 | unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr) |
| 352 | { |
| 353 | unsigned long rc; |
| 354 | |
| 355 | down_read(&gmap->mm->mmap_sem); |
| 356 | rc = __gmap_translate(gmap, gaddr); |
| 357 | up_read(&gmap->mm->mmap_sem); |
| 358 | return rc; |
| 359 | } |
| 360 | EXPORT_SYMBOL_GPL(gmap_translate); |
| 361 | |
| 362 | /** |
| 363 | * gmap_unlink - disconnect a page table from the gmap shadow tables |
| 364 | * @gmap: pointer to guest mapping meta data structure |
| 365 | * @table: pointer to the host page table |
| 366 | * @vmaddr: vm address associated with the host page table |
| 367 | */ |
| 368 | void gmap_unlink(struct mm_struct *mm, unsigned long *table, |
| 369 | unsigned long vmaddr) |
| 370 | { |
| 371 | struct gmap *gmap; |
| 372 | int flush; |
| 373 | |
Martin Schwidefsky | 8ecb1a5 | 2016-03-08 11:54:14 +0100 | [diff] [blame^] | 374 | rcu_read_lock(); |
| 375 | list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) { |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 376 | flush = __gmap_unlink_by_vmaddr(gmap, vmaddr); |
| 377 | if (flush) |
| 378 | gmap_flush_tlb(gmap); |
| 379 | } |
Martin Schwidefsky | 8ecb1a5 | 2016-03-08 11:54:14 +0100 | [diff] [blame^] | 380 | rcu_read_unlock(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 381 | } |
| 382 | |
| 383 | /** |
| 384 | * gmap_link - set up shadow page tables to connect a host to a guest address |
| 385 | * @gmap: pointer to guest mapping meta data structure |
| 386 | * @gaddr: guest address |
| 387 | * @vmaddr: vm address |
| 388 | * |
| 389 | * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT |
| 390 | * if the vm address is already mapped to a different guest segment. |
| 391 | * The mmap_sem of the mm that belongs to the address space must be held |
| 392 | * when this function gets called. |
| 393 | */ |
| 394 | int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) |
| 395 | { |
| 396 | struct mm_struct *mm; |
| 397 | unsigned long *table; |
| 398 | spinlock_t *ptl; |
| 399 | pgd_t *pgd; |
| 400 | pud_t *pud; |
| 401 | pmd_t *pmd; |
| 402 | int rc; |
| 403 | |
| 404 | /* Create higher level tables in the gmap page table */ |
| 405 | table = gmap->table; |
| 406 | if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) { |
| 407 | table += (gaddr >> 53) & 0x7ff; |
| 408 | if ((*table & _REGION_ENTRY_INVALID) && |
| 409 | gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY, |
| 410 | gaddr & 0xffe0000000000000UL)) |
| 411 | return -ENOMEM; |
| 412 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 413 | } |
| 414 | if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) { |
| 415 | table += (gaddr >> 42) & 0x7ff; |
| 416 | if ((*table & _REGION_ENTRY_INVALID) && |
| 417 | gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY, |
| 418 | gaddr & 0xfffffc0000000000UL)) |
| 419 | return -ENOMEM; |
| 420 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 421 | } |
| 422 | if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) { |
| 423 | table += (gaddr >> 31) & 0x7ff; |
| 424 | if ((*table & _REGION_ENTRY_INVALID) && |
| 425 | gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY, |
| 426 | gaddr & 0xffffffff80000000UL)) |
| 427 | return -ENOMEM; |
| 428 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
| 429 | } |
| 430 | table += (gaddr >> 20) & 0x7ff; |
| 431 | /* Walk the parent mm page table */ |
| 432 | mm = gmap->mm; |
| 433 | pgd = pgd_offset(mm, vmaddr); |
| 434 | VM_BUG_ON(pgd_none(*pgd)); |
| 435 | pud = pud_offset(pgd, vmaddr); |
| 436 | VM_BUG_ON(pud_none(*pud)); |
| 437 | pmd = pmd_offset(pud, vmaddr); |
| 438 | VM_BUG_ON(pmd_none(*pmd)); |
| 439 | /* large pmds cannot yet be handled */ |
| 440 | if (pmd_large(*pmd)) |
| 441 | return -EFAULT; |
| 442 | /* Link gmap segment table entry location to page table. */ |
| 443 | rc = radix_tree_preload(GFP_KERNEL); |
| 444 | if (rc) |
| 445 | return rc; |
| 446 | ptl = pmd_lock(mm, pmd); |
| 447 | spin_lock(&gmap->guest_table_lock); |
| 448 | if (*table == _SEGMENT_ENTRY_INVALID) { |
| 449 | rc = radix_tree_insert(&gmap->host_to_guest, |
| 450 | vmaddr >> PMD_SHIFT, table); |
| 451 | if (!rc) |
| 452 | *table = pmd_val(*pmd); |
| 453 | } else |
| 454 | rc = 0; |
| 455 | spin_unlock(&gmap->guest_table_lock); |
| 456 | spin_unlock(ptl); |
| 457 | radix_tree_preload_end(); |
| 458 | return rc; |
| 459 | } |
| 460 | |
| 461 | /** |
| 462 | * gmap_fault - resolve a fault on a guest address |
| 463 | * @gmap: pointer to guest mapping meta data structure |
| 464 | * @gaddr: guest address |
| 465 | * @fault_flags: flags to pass down to handle_mm_fault() |
| 466 | * |
| 467 | * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT |
| 468 | * if the vm address is already mapped to a different guest segment. |
| 469 | */ |
| 470 | int gmap_fault(struct gmap *gmap, unsigned long gaddr, |
| 471 | unsigned int fault_flags) |
| 472 | { |
| 473 | unsigned long vmaddr; |
| 474 | int rc; |
| 475 | bool unlocked; |
| 476 | |
| 477 | down_read(&gmap->mm->mmap_sem); |
| 478 | |
| 479 | retry: |
| 480 | unlocked = false; |
| 481 | vmaddr = __gmap_translate(gmap, gaddr); |
| 482 | if (IS_ERR_VALUE(vmaddr)) { |
| 483 | rc = vmaddr; |
| 484 | goto out_up; |
| 485 | } |
| 486 | if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags, |
| 487 | &unlocked)) { |
| 488 | rc = -EFAULT; |
| 489 | goto out_up; |
| 490 | } |
| 491 | /* |
| 492 | * In the case that fixup_user_fault unlocked the mmap_sem during |
| 493 | * faultin redo __gmap_translate to not race with a map/unmap_segment. |
| 494 | */ |
| 495 | if (unlocked) |
| 496 | goto retry; |
| 497 | |
| 498 | rc = __gmap_link(gmap, gaddr, vmaddr); |
| 499 | out_up: |
| 500 | up_read(&gmap->mm->mmap_sem); |
| 501 | return rc; |
| 502 | } |
| 503 | EXPORT_SYMBOL_GPL(gmap_fault); |
| 504 | |
| 505 | /* |
| 506 | * this function is assumed to be called with mmap_sem held |
| 507 | */ |
| 508 | void __gmap_zap(struct gmap *gmap, unsigned long gaddr) |
| 509 | { |
| 510 | unsigned long vmaddr; |
| 511 | spinlock_t *ptl; |
| 512 | pte_t *ptep; |
| 513 | |
| 514 | /* Find the vm address for the guest address */ |
| 515 | vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host, |
| 516 | gaddr >> PMD_SHIFT); |
| 517 | if (vmaddr) { |
| 518 | vmaddr |= gaddr & ~PMD_MASK; |
| 519 | /* Get pointer to the page table entry */ |
| 520 | ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); |
| 521 | if (likely(ptep)) |
| 522 | ptep_zap_unused(gmap->mm, vmaddr, ptep, 0); |
| 523 | pte_unmap_unlock(ptep, ptl); |
| 524 | } |
| 525 | } |
| 526 | EXPORT_SYMBOL_GPL(__gmap_zap); |
| 527 | |
| 528 | void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) |
| 529 | { |
| 530 | unsigned long gaddr, vmaddr, size; |
| 531 | struct vm_area_struct *vma; |
| 532 | |
| 533 | down_read(&gmap->mm->mmap_sem); |
| 534 | for (gaddr = from; gaddr < to; |
| 535 | gaddr = (gaddr + PMD_SIZE) & PMD_MASK) { |
| 536 | /* Find the vm address for the guest address */ |
| 537 | vmaddr = (unsigned long) |
| 538 | radix_tree_lookup(&gmap->guest_to_host, |
| 539 | gaddr >> PMD_SHIFT); |
| 540 | if (!vmaddr) |
| 541 | continue; |
| 542 | vmaddr |= gaddr & ~PMD_MASK; |
| 543 | /* Find vma in the parent mm */ |
| 544 | vma = find_vma(gmap->mm, vmaddr); |
| 545 | size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); |
| 546 | zap_page_range(vma, vmaddr, size, NULL); |
| 547 | } |
| 548 | up_read(&gmap->mm->mmap_sem); |
| 549 | } |
| 550 | EXPORT_SYMBOL_GPL(gmap_discard); |
| 551 | |
| 552 | static LIST_HEAD(gmap_notifier_list); |
| 553 | static DEFINE_SPINLOCK(gmap_notifier_lock); |
| 554 | |
| 555 | /** |
| 556 | * gmap_register_ipte_notifier - register a pte invalidation callback |
| 557 | * @nb: pointer to the gmap notifier block |
| 558 | */ |
| 559 | void gmap_register_ipte_notifier(struct gmap_notifier *nb) |
| 560 | { |
| 561 | spin_lock(&gmap_notifier_lock); |
Martin Schwidefsky | 8ecb1a5 | 2016-03-08 11:54:14 +0100 | [diff] [blame^] | 562 | list_add_rcu(&nb->list, &gmap_notifier_list); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 563 | spin_unlock(&gmap_notifier_lock); |
| 564 | } |
| 565 | EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier); |
| 566 | |
| 567 | /** |
| 568 | * gmap_unregister_ipte_notifier - remove a pte invalidation callback |
| 569 | * @nb: pointer to the gmap notifier block |
| 570 | */ |
| 571 | void gmap_unregister_ipte_notifier(struct gmap_notifier *nb) |
| 572 | { |
| 573 | spin_lock(&gmap_notifier_lock); |
Martin Schwidefsky | 8ecb1a5 | 2016-03-08 11:54:14 +0100 | [diff] [blame^] | 574 | list_del_rcu(&nb->list); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 575 | spin_unlock(&gmap_notifier_lock); |
Martin Schwidefsky | 8ecb1a5 | 2016-03-08 11:54:14 +0100 | [diff] [blame^] | 576 | synchronize_rcu(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 577 | } |
| 578 | EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier); |
| 579 | |
| 580 | /** |
Martin Schwidefsky | 414d3b0 | 2016-03-08 11:52:54 +0100 | [diff] [blame] | 581 | * gmap_call_notifier - call all registered invalidation callbacks |
| 582 | * @gmap: pointer to guest mapping meta data structure |
| 583 | * @start: start virtual address in the guest address space |
| 584 | * @end: end virtual address in the guest address space |
| 585 | */ |
| 586 | static void gmap_call_notifier(struct gmap *gmap, unsigned long start, |
| 587 | unsigned long end) |
| 588 | { |
| 589 | struct gmap_notifier *nb; |
| 590 | |
| 591 | list_for_each_entry(nb, &gmap_notifier_list, list) |
| 592 | nb->notifier_call(gmap, start, end); |
| 593 | } |
| 594 | |
| 595 | /** |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 596 | * gmap_ipte_notify - mark a range of ptes for invalidation notification |
| 597 | * @gmap: pointer to guest mapping meta data structure |
| 598 | * @gaddr: virtual address in the guest address space |
| 599 | * @len: size of area |
| 600 | * |
| 601 | * Returns 0 if for each page in the given range a gmap mapping exists and |
| 602 | * the invalidation notification could be set. If the gmap mapping is missing |
| 603 | * for one or more pages -EFAULT is returned. If no memory could be allocated |
| 604 | * -ENOMEM is returned. This function establishes missing page table entries. |
| 605 | */ |
| 606 | int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len) |
| 607 | { |
| 608 | unsigned long addr; |
| 609 | spinlock_t *ptl; |
| 610 | pte_t *ptep; |
| 611 | bool unlocked; |
| 612 | int rc = 0; |
| 613 | |
| 614 | if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK)) |
| 615 | return -EINVAL; |
| 616 | down_read(&gmap->mm->mmap_sem); |
| 617 | while (len) { |
| 618 | unlocked = false; |
| 619 | /* Convert gmap address and connect the page tables */ |
| 620 | addr = __gmap_translate(gmap, gaddr); |
| 621 | if (IS_ERR_VALUE(addr)) { |
| 622 | rc = addr; |
| 623 | break; |
| 624 | } |
| 625 | /* Get the page mapped */ |
| 626 | if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE, |
| 627 | &unlocked)) { |
| 628 | rc = -EFAULT; |
| 629 | break; |
| 630 | } |
| 631 | /* While trying to map mmap_sem got unlocked. Let us retry */ |
| 632 | if (unlocked) |
| 633 | continue; |
| 634 | rc = __gmap_link(gmap, gaddr, addr); |
| 635 | if (rc) |
| 636 | break; |
| 637 | /* Walk the process page table, lock and get pte pointer */ |
| 638 | ptep = get_locked_pte(gmap->mm, addr, &ptl); |
| 639 | VM_BUG_ON(!ptep); |
| 640 | /* Set notification bit in the pgste of the pte */ |
| 641 | if ((pte_val(*ptep) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) { |
| 642 | ptep_set_notify(gmap->mm, addr, ptep); |
| 643 | gaddr += PAGE_SIZE; |
| 644 | len -= PAGE_SIZE; |
| 645 | } |
| 646 | pte_unmap_unlock(ptep, ptl); |
| 647 | } |
| 648 | up_read(&gmap->mm->mmap_sem); |
| 649 | return rc; |
| 650 | } |
| 651 | EXPORT_SYMBOL_GPL(gmap_ipte_notify); |
| 652 | |
| 653 | /** |
| 654 | * ptep_notify - call all invalidation callbacks for a specific pte. |
| 655 | * @mm: pointer to the process mm_struct |
| 656 | * @addr: virtual address in the process address space |
| 657 | * @pte: pointer to the page table entry |
| 658 | * |
| 659 | * This function is assumed to be called with the page table lock held |
| 660 | * for the pte to notify. |
| 661 | */ |
| 662 | void ptep_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte) |
| 663 | { |
| 664 | unsigned long offset, gaddr; |
| 665 | unsigned long *table; |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 666 | struct gmap *gmap; |
| 667 | |
| 668 | offset = ((unsigned long) pte) & (255 * sizeof(pte_t)); |
| 669 | offset = offset * (4096 / sizeof(pte_t)); |
Martin Schwidefsky | 8ecb1a5 | 2016-03-08 11:54:14 +0100 | [diff] [blame^] | 670 | rcu_read_lock(); |
| 671 | list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) { |
| 672 | spin_lock(&gmap->guest_table_lock); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 673 | table = radix_tree_lookup(&gmap->host_to_guest, |
| 674 | vmaddr >> PMD_SHIFT); |
Martin Schwidefsky | 8ecb1a5 | 2016-03-08 11:54:14 +0100 | [diff] [blame^] | 675 | if (table) |
| 676 | gaddr = __gmap_segment_gaddr(table) + offset; |
| 677 | spin_unlock(&gmap->guest_table_lock); |
| 678 | if (table) |
| 679 | gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 680 | } |
Martin Schwidefsky | 8ecb1a5 | 2016-03-08 11:54:14 +0100 | [diff] [blame^] | 681 | rcu_read_unlock(); |
Martin Schwidefsky | 1e133ab | 2016-03-08 11:49:57 +0100 | [diff] [blame] | 682 | } |
| 683 | EXPORT_SYMBOL_GPL(ptep_notify); |
| 684 | |
| 685 | static inline void thp_split_mm(struct mm_struct *mm) |
| 686 | { |
| 687 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 688 | struct vm_area_struct *vma; |
| 689 | unsigned long addr; |
| 690 | |
| 691 | for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { |
| 692 | for (addr = vma->vm_start; |
| 693 | addr < vma->vm_end; |
| 694 | addr += PAGE_SIZE) |
| 695 | follow_page(vma, addr, FOLL_SPLIT); |
| 696 | vma->vm_flags &= ~VM_HUGEPAGE; |
| 697 | vma->vm_flags |= VM_NOHUGEPAGE; |
| 698 | } |
| 699 | mm->def_flags |= VM_NOHUGEPAGE; |
| 700 | #endif |
| 701 | } |
| 702 | |
| 703 | /* |
| 704 | * switch on pgstes for its userspace process (for kvm) |
| 705 | */ |
| 706 | int s390_enable_sie(void) |
| 707 | { |
| 708 | struct mm_struct *mm = current->mm; |
| 709 | |
| 710 | /* Do we have pgstes? if yes, we are done */ |
| 711 | if (mm_has_pgste(mm)) |
| 712 | return 0; |
| 713 | /* Fail if the page tables are 2K */ |
| 714 | if (!mm_alloc_pgste(mm)) |
| 715 | return -EINVAL; |
| 716 | down_write(&mm->mmap_sem); |
| 717 | mm->context.has_pgste = 1; |
| 718 | /* split thp mappings and disable thp for future mappings */ |
| 719 | thp_split_mm(mm); |
| 720 | up_write(&mm->mmap_sem); |
| 721 | return 0; |
| 722 | } |
| 723 | EXPORT_SYMBOL_GPL(s390_enable_sie); |
| 724 | |
| 725 | /* |
| 726 | * Enable storage key handling from now on and initialize the storage |
| 727 | * keys with the default key. |
| 728 | */ |
| 729 | static int __s390_enable_skey(pte_t *pte, unsigned long addr, |
| 730 | unsigned long next, struct mm_walk *walk) |
| 731 | { |
| 732 | /* |
| 733 | * Remove all zero page mappings, |
| 734 | * after establishing a policy to forbid zero page mappings |
| 735 | * following faults for that page will get fresh anonymous pages |
| 736 | */ |
| 737 | if (is_zero_pfn(pte_pfn(*pte))) |
| 738 | ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID)); |
| 739 | /* Clear storage key */ |
| 740 | ptep_zap_key(walk->mm, addr, pte); |
| 741 | return 0; |
| 742 | } |
| 743 | |
| 744 | int s390_enable_skey(void) |
| 745 | { |
| 746 | struct mm_walk walk = { .pte_entry = __s390_enable_skey }; |
| 747 | struct mm_struct *mm = current->mm; |
| 748 | struct vm_area_struct *vma; |
| 749 | int rc = 0; |
| 750 | |
| 751 | down_write(&mm->mmap_sem); |
| 752 | if (mm_use_skey(mm)) |
| 753 | goto out_up; |
| 754 | |
| 755 | mm->context.use_skey = 1; |
| 756 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
| 757 | if (ksm_madvise(vma, vma->vm_start, vma->vm_end, |
| 758 | MADV_UNMERGEABLE, &vma->vm_flags)) { |
| 759 | mm->context.use_skey = 0; |
| 760 | rc = -ENOMEM; |
| 761 | goto out_up; |
| 762 | } |
| 763 | } |
| 764 | mm->def_flags &= ~VM_MERGEABLE; |
| 765 | |
| 766 | walk.mm = mm; |
| 767 | walk_page_range(0, TASK_SIZE, &walk); |
| 768 | |
| 769 | out_up: |
| 770 | up_write(&mm->mmap_sem); |
| 771 | return rc; |
| 772 | } |
| 773 | EXPORT_SYMBOL_GPL(s390_enable_skey); |
| 774 | |
| 775 | /* |
| 776 | * Reset CMMA state, make all pages stable again. |
| 777 | */ |
| 778 | static int __s390_reset_cmma(pte_t *pte, unsigned long addr, |
| 779 | unsigned long next, struct mm_walk *walk) |
| 780 | { |
| 781 | ptep_zap_unused(walk->mm, addr, pte, 1); |
| 782 | return 0; |
| 783 | } |
| 784 | |
| 785 | void s390_reset_cmma(struct mm_struct *mm) |
| 786 | { |
| 787 | struct mm_walk walk = { .pte_entry = __s390_reset_cmma }; |
| 788 | |
| 789 | down_write(&mm->mmap_sem); |
| 790 | walk.mm = mm; |
| 791 | walk_page_range(0, TASK_SIZE, &walk); |
| 792 | up_write(&mm->mmap_sem); |
| 793 | } |
| 794 | EXPORT_SYMBOL_GPL(s390_reset_cmma); |