blob: 982bc068533020a1dd5efe7405fbdabb932cacf4 [file] [log] [blame]
David Daney50a41ff2009-05-27 17:47:42 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
7 */
8
9#ifndef __ASM_HUGETLB_H
10#define __ASM_HUGETLB_H
11
12#include <asm/page.h>
Gerald Schaefer106c9922013-04-29 15:07:23 -070013#include <asm-generic/hugetlb.h>
David Daney50a41ff2009-05-27 17:47:42 -070014
15
16static inline int is_hugepage_only_range(struct mm_struct *mm,
17 unsigned long addr,
18 unsigned long len)
19{
20 return 0;
21}
22
23static inline int prepare_hugepage_range(struct file *file,
24 unsigned long addr,
25 unsigned long len)
26{
27 unsigned long task_size = STACK_TOP;
28 struct hstate *h = hstate_file(file);
29
30 if (len & ~huge_page_mask(h))
31 return -EINVAL;
32 if (addr & ~huge_page_mask(h))
33 return -EINVAL;
34 if (len > task_size)
35 return -ENOMEM;
36 if (task_size - len < addr)
37 return -EINVAL;
38 return 0;
39}
40
David Daney50a41ff2009-05-27 17:47:42 -070041static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
42 unsigned long addr,
43 unsigned long end,
44 unsigned long floor,
45 unsigned long ceiling)
46{
47 free_pgd_range(tlb, addr, end, floor, ceiling);
48}
49
50static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
51 pte_t *ptep, pte_t pte)
52{
53 set_pte_at(mm, addr, ptep, pte);
54}
55
56static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
57 unsigned long addr, pte_t *ptep)
58{
59 pte_t clear;
60 pte_t pte = *ptep;
61
62 pte_val(clear) = (unsigned long)invalid_pte_table;
63 set_pte_at(mm, addr, ptep, clear);
64 return pte;
65}
66
67static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
68 unsigned long addr, pte_t *ptep)
69{
Hillf Danton5639bc42011-11-22 14:38:02 +000070 flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma)));
David Daney50a41ff2009-05-27 17:47:42 -070071}
72
73static inline int huge_pte_none(pte_t pte)
74{
75 unsigned long val = pte_val(pte) & ~_PAGE_GLOBAL;
76 return !val || (val == (unsigned long)invalid_pte_table);
77}
78
79static inline pte_t huge_pte_wrprotect(pte_t pte)
80{
81 return pte_wrprotect(pte);
82}
83
84static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
85 unsigned long addr, pte_t *ptep)
86{
87 ptep_set_wrprotect(mm, addr, ptep);
88}
89
90static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
91 unsigned long addr,
92 pte_t *ptep, pte_t pte,
93 int dirty)
94{
David Daneyac53c4f2012-12-03 12:44:26 -080095 int changed = !pte_same(*ptep, pte);
96
97 if (changed) {
98 set_pte_at(vma->vm_mm, addr, ptep, pte);
99 /*
100 * There could be some standard sized pages in there,
101 * get them all.
102 */
103 flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
104 }
105 return changed;
David Daney50a41ff2009-05-27 17:47:42 -0700106}
107
108static inline pte_t huge_ptep_get(pte_t *ptep)
109{
110 return *ptep;
111}
112
Will Deacon5d3a5512012-10-08 16:29:32 -0700113static inline void arch_clear_hugepage_flags(struct page *page)
114{
115}
116
David Daney50a41ff2009-05-27 17:47:42 -0700117#endif /* __ASM_HUGETLB_H */