blob: 532525ec88c1ebce808da0e5b6dc06630b4d997f [file] [log] [blame]
Gerald Schaefer53492b12008-04-30 13:38:46 +02001/*
2 * IBM System z Huge TLB Page Support for Kernel.
3 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2007
Gerald Schaefer53492b12008-04-30 13:38:46 +02005 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
6 */
7
8#include <linux/mm.h>
9#include <linux/hugetlb.h>
10
11
12void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
13 pte_t *pteptr, pte_t pteval)
14{
15 pmd_t *pmdp = (pmd_t *) pteptr;
Gerald Schaefer53492b12008-04-30 13:38:46 +020016 unsigned long mask;
17
18 if (!MACHINE_HAS_HPAGE) {
19 pteptr = (pte_t *) pte_page(pteval)[1].index;
20 mask = pte_val(pteval) &
21 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
22 pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
Gerald Schaefer53492b12008-04-30 13:38:46 +020023 }
24
25 pmd_val(*pmdp) = pte_val(pteval);
Gerald Schaefer53492b12008-04-30 13:38:46 +020026}
27
28int arch_prepare_hugepage(struct page *page)
29{
30 unsigned long addr = page_to_phys(page);
31 pte_t pte;
32 pte_t *ptep;
33 int i;
34
35 if (MACHINE_HAS_HPAGE)
36 return 0;
37
Martin Schwidefskye5992f22011-07-24 10:48:20 +020038 ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
Gerald Schaefer53492b12008-04-30 13:38:46 +020039 if (!ptep)
40 return -ENOMEM;
41
42 pte = mk_pte(page, PAGE_RW);
43 for (i = 0; i < PTRS_PER_PTE; i++) {
44 set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
45 pte_val(pte) += PAGE_SIZE;
46 }
47 page[1].index = (unsigned long) ptep;
48 return 0;
49}
50
51void arch_release_hugepage(struct page *page)
52{
53 pte_t *ptep;
54
55 if (MACHINE_HAS_HPAGE)
56 return;
57
58 ptep = (pte_t *) page[1].index;
59 if (!ptep)
60 return;
Gerald Schaefera6864252012-05-09 16:27:37 +020061 clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY,
62 PTRS_PER_PTE * sizeof(pte_t));
Martin Schwidefsky80217142010-10-25 16:10:11 +020063 page_table_free(&init_mm, (unsigned long *) ptep);
Gerald Schaefer53492b12008-04-30 13:38:46 +020064 page[1].index = 0;
65}
66
Andi Kleena5516432008-07-23 21:27:41 -070067pte_t *huge_pte_alloc(struct mm_struct *mm,
68 unsigned long addr, unsigned long sz)
Gerald Schaefer53492b12008-04-30 13:38:46 +020069{
70 pgd_t *pgdp;
71 pud_t *pudp;
72 pmd_t *pmdp = NULL;
73
74 pgdp = pgd_offset(mm, addr);
75 pudp = pud_alloc(mm, pgdp, addr);
76 if (pudp)
77 pmdp = pmd_alloc(mm, pudp, addr);
78 return (pte_t *) pmdp;
79}
80
81pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
82{
83 pgd_t *pgdp;
84 pud_t *pudp;
85 pmd_t *pmdp = NULL;
86
87 pgdp = pgd_offset(mm, addr);
88 if (pgd_present(*pgdp)) {
89 pudp = pud_offset(pgdp, addr);
90 if (pud_present(*pudp))
91 pmdp = pmd_offset(pudp, addr);
92 }
93 return (pte_t *) pmdp;
94}
95
96int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
97{
98 return 0;
99}
100
101struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
102 int write)
103{
104 return ERR_PTR(-EINVAL);
105}
106
107int pmd_huge(pmd_t pmd)
108{
109 if (!MACHINE_HAS_HPAGE)
110 return 0;
111
112 return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
113}
114
Andi Kleenceb86872008-07-23 21:27:50 -0700115int pud_huge(pud_t pud)
116{
117 return 0;
118}
119
Gerald Schaefer53492b12008-04-30 13:38:46 +0200120struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
121 pmd_t *pmdp, int write)
122{
123 struct page *page;
124
125 if (!MACHINE_HAS_HPAGE)
126 return NULL;
127
128 page = pmd_page(*pmdp);
129 if (page)
130 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
131 return page;
132}