blob: 2fac5be4de26a6209da225f1e4cbc08c1f2afbec [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_HUGETLB_H
16#define _ASM_TILE_HUGETLB_H
17
18#include <asm/page.h>
Gerald Schaefer106c9922013-04-29 15:07:23 -070019#include <asm-generic/hugetlb.h>
Chris Metcalf867e3592010-05-28 23:09:12 -040020
21
22static inline int is_hugepage_only_range(struct mm_struct *mm,
23 unsigned long addr,
24 unsigned long len) {
25 return 0;
26}
27
28/*
29 * If the arch doesn't supply something else, assume that hugepage
30 * size aligned regions are ok without further preparation.
31 */
32static inline int prepare_hugepage_range(struct file *file,
33 unsigned long addr, unsigned long len)
34{
35 struct hstate *h = hstate_file(file);
36 if (len & ~huge_page_mask(h))
37 return -EINVAL;
38 if (addr & ~huge_page_mask(h))
39 return -EINVAL;
40 return 0;
41}
42
Chris Metcalf867e3592010-05-28 23:09:12 -040043static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
44 unsigned long addr, unsigned long end,
45 unsigned long floor,
46 unsigned long ceiling)
47{
48 free_pgd_range(tlb, addr, end, floor, ceiling);
49}
50
51static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
52 pte_t *ptep, pte_t pte)
53{
Chris Metcalf76c567f2011-02-28 16:37:34 -050054 set_pte(ptep, pte);
Chris Metcalf867e3592010-05-28 23:09:12 -040055}
56
57static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
58 unsigned long addr, pte_t *ptep)
59{
60 return ptep_get_and_clear(mm, addr, ptep);
61}
62
63static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
64 unsigned long addr, pte_t *ptep)
65{
66 ptep_clear_flush(vma, addr, ptep);
67}
68
69static inline int huge_pte_none(pte_t pte)
70{
71 return pte_none(pte);
72}
73
74static inline pte_t huge_pte_wrprotect(pte_t pte)
75{
76 return pte_wrprotect(pte);
77}
78
79static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
80 unsigned long addr, pte_t *ptep)
81{
82 ptep_set_wrprotect(mm, addr, ptep);
83}
84
85static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
86 unsigned long addr, pte_t *ptep,
87 pte_t pte, int dirty)
88{
89 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
90}
91
92static inline pte_t huge_ptep_get(pte_t *ptep)
93{
94 return *ptep;
95}
96
Will Deacon5d3a5512012-10-08 16:29:32 -070097static inline void arch_clear_hugepage_flags(struct page *page)
98{
99}
100
Chris Metcalf621b1952012-04-01 14:04:21 -0400101#ifdef CONFIG_HUGETLB_SUPER_PAGES
102static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
103 struct page *page, int writable)
104{
105 size_t pagesize = huge_page_size(hstate_vma(vma));
106 if (pagesize != PUD_SIZE && pagesize != PMD_SIZE)
107 entry = pte_mksuper(entry);
108 return entry;
109}
110#define arch_make_huge_pte arch_make_huge_pte
111
112/* Sizes to scale up page size for PTEs with HV_PTE_SUPER bit. */
113enum {
114 HUGE_SHIFT_PGDIR = 0,
115 HUGE_SHIFT_PMD = 1,
116 HUGE_SHIFT_PAGE = 2,
117 HUGE_SHIFT_ENTRIES
118};
119extern int huge_shift[HUGE_SHIFT_ENTRIES];
120#endif
121
Chris Metcalf867e3592010-05-28 23:09:12 -0400122#endif /* _ASM_TILE_HUGETLB_H */