blob: 33587f16c1527ea0db284514564abfb8379e6eb9 [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * This file contains the functions and defines necessary to modify and use
15 * the TILE page table tree.
16 */
17
18#ifndef _ASM_TILE_PGTABLE_H
19#define _ASM_TILE_PGTABLE_H
20
21#include <hv/hypervisor.h>
22
23#ifndef __ASSEMBLY__
24
25#include <linux/bitops.h>
26#include <linux/threads.h>
27#include <linux/slab.h>
28#include <linux/list.h>
29#include <linux/spinlock.h>
Chris Metcalfd5d14ed2012-03-29 13:58:43 -040030#include <linux/pfn.h>
Chris Metcalf867e3592010-05-28 23:09:12 -040031#include <asm/processor.h>
32#include <asm/fixmap.h>
Chris Metcalfd5d14ed2012-03-29 13:58:43 -040033#include <asm/page.h>
Chris Metcalf867e3592010-05-28 23:09:12 -040034
35struct mm_struct;
36struct vm_area_struct;
37
38/*
39 * ZERO_PAGE is a global shared page that is always zero: used
40 * for zero-mapped memory areas etc..
41 */
42extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
43#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
44
45extern pgd_t swapper_pg_dir[];
46extern pgprot_t swapper_pgprot;
47extern struct kmem_cache *pgd_cache;
48extern spinlock_t pgd_lock;
49extern struct list_head pgd_list;
50
51/*
52 * The very last slots in the pgd_t are for addresses unusable by Linux
53 * (pgd_addr_invalid() returns true). So we use them for the list structure.
54 * The x86 code we are modelled on uses the page->private/index fields
55 * (older 2.6 kernels) or the lru list (newer 2.6 kernels), but since
56 * our pgds are so much smaller than a page, it seems a waste to
57 * spend a whole page on each pgd.
58 */
59#define PGD_LIST_OFFSET \
60 ((PTRS_PER_PGD * sizeof(pgd_t)) - sizeof(struct list_head))
61#define pgd_to_list(pgd) \
62 ((struct list_head *)((char *)(pgd) + PGD_LIST_OFFSET))
63#define list_to_pgd(list) \
64 ((pgd_t *)((char *)(list) - PGD_LIST_OFFSET))
65
66extern void pgtable_cache_init(void);
67extern void paging_init(void);
68extern void set_page_homes(void);
69
70#define FIRST_USER_ADDRESS 0
71
72#define _PAGE_PRESENT HV_PTE_PRESENT
73#define _PAGE_HUGE_PAGE HV_PTE_PAGE
Chris Metcalf621b1952012-04-01 14:04:21 -040074#define _PAGE_SUPER_PAGE HV_PTE_SUPER
Chris Metcalf867e3592010-05-28 23:09:12 -040075#define _PAGE_READABLE HV_PTE_READABLE
76#define _PAGE_WRITABLE HV_PTE_WRITABLE
77#define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE
78#define _PAGE_ACCESSED HV_PTE_ACCESSED
79#define _PAGE_DIRTY HV_PTE_DIRTY
80#define _PAGE_GLOBAL HV_PTE_GLOBAL
81#define _PAGE_USER HV_PTE_USER
82
83/*
84 * All the "standard" bits. Cache-control bits are managed elsewhere.
85 * This is used to test for valid level-2 page table pointers by checking
86 * all the bits, and to mask away the cache control bits for mprotect.
87 */
88#define _PAGE_ALL (\
89 _PAGE_PRESENT | \
90 _PAGE_HUGE_PAGE | \
Chris Metcalf621b1952012-04-01 14:04:21 -040091 _PAGE_SUPER_PAGE | \
Chris Metcalf867e3592010-05-28 23:09:12 -040092 _PAGE_READABLE | \
93 _PAGE_WRITABLE | \
94 _PAGE_EXECUTABLE | \
95 _PAGE_ACCESSED | \
96 _PAGE_DIRTY | \
97 _PAGE_GLOBAL | \
98 _PAGE_USER \
99)
100
101#define PAGE_NONE \
102 __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
103#define PAGE_SHARED \
104 __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
105 _PAGE_USER | _PAGE_ACCESSED)
106
107#define PAGE_SHARED_EXEC \
108 __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \
109 _PAGE_EXECUTABLE | _PAGE_USER | _PAGE_ACCESSED)
110#define PAGE_COPY_NOEXEC \
111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
112#define PAGE_COPY_EXEC \
113 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
114 _PAGE_READABLE | _PAGE_EXECUTABLE)
115#define PAGE_COPY \
116 PAGE_COPY_NOEXEC
117#define PAGE_READONLY \
118 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE)
119#define PAGE_READONLY_EXEC \
120 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \
121 _PAGE_READABLE | _PAGE_EXECUTABLE)
122
123#define _PAGE_KERNEL_RO \
124 (_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_READABLE | _PAGE_ACCESSED)
125#define _PAGE_KERNEL \
126 (_PAGE_KERNEL_RO | _PAGE_WRITABLE | _PAGE_DIRTY)
127#define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXECUTABLE)
128
129#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
130#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
131#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
132
133#define page_to_kpgprot(p) PAGE_KERNEL
134
135/*
136 * We could tighten these up, but for now writable or executable
137 * implies readable.
138 */
139#define __P000 PAGE_NONE
140#define __P001 PAGE_READONLY
141#define __P010 PAGE_COPY /* this is write-only, which we won't support */
142#define __P011 PAGE_COPY
143#define __P100 PAGE_READONLY_EXEC
144#define __P101 PAGE_READONLY_EXEC
145#define __P110 PAGE_COPY_EXEC
146#define __P111 PAGE_COPY_EXEC
147
148#define __S000 PAGE_NONE
149#define __S001 PAGE_READONLY
150#define __S010 PAGE_SHARED
151#define __S011 PAGE_SHARED
152#define __S100 PAGE_READONLY_EXEC
153#define __S101 PAGE_READONLY_EXEC
154#define __S110 PAGE_SHARED_EXEC
155#define __S111 PAGE_SHARED_EXEC
156
157/*
158 * All the normal _PAGE_ALL bits are ignored for PMDs, except PAGE_PRESENT
159 * and PAGE_HUGE_PAGE, which must be one and zero, respectively.
160 * We set the ignored bits to zero.
161 */
162#define _PAGE_TABLE _PAGE_PRESENT
163
164/* Inherit the caching flags from the old protection bits. */
165#define pgprot_modify(oldprot, newprot) \
166 (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
167
168/* Just setting the PFN to zero suffices. */
Chris Metcalfd5d14ed2012-03-29 13:58:43 -0400169#define pte_pgprot(x) hv_pte_set_pa((x), 0)
Chris Metcalf867e3592010-05-28 23:09:12 -0400170
171/*
172 * For PTEs and PDEs, we must clear the Present bit first when
173 * clearing a page table entry, so clear the bottom half first and
174 * enforce ordering with a barrier.
175 */
176static inline void __pte_clear(pte_t *ptep)
177{
178#ifdef __tilegx__
179 ptep->val = 0;
180#else
181 u32 *tmp = (u32 *)ptep;
182 tmp[0] = 0;
183 barrier();
184 tmp[1] = 0;
185#endif
186}
187#define pte_clear(mm, addr, ptep) __pte_clear(ptep)
188
189/*
190 * The following only work if pte_present() is true.
191 * Undefined behaviour if not..
192 */
193#define pte_present hv_pte_get_present
Chris Metcalf73636b12012-03-28 13:59:18 -0400194#define pte_mknotpresent hv_pte_clear_present
Chris Metcalf867e3592010-05-28 23:09:12 -0400195#define pte_user hv_pte_get_user
196#define pte_read hv_pte_get_readable
197#define pte_dirty hv_pte_get_dirty
198#define pte_young hv_pte_get_accessed
199#define pte_write hv_pte_get_writable
200#define pte_exec hv_pte_get_executable
201#define pte_huge hv_pte_get_page
Chris Metcalf621b1952012-04-01 14:04:21 -0400202#define pte_super hv_pte_get_super
Chris Metcalf867e3592010-05-28 23:09:12 -0400203#define pte_rdprotect hv_pte_clear_readable
204#define pte_exprotect hv_pte_clear_executable
205#define pte_mkclean hv_pte_clear_dirty
206#define pte_mkold hv_pte_clear_accessed
207#define pte_wrprotect hv_pte_clear_writable
208#define pte_mksmall hv_pte_clear_page
209#define pte_mkread hv_pte_set_readable
210#define pte_mkexec hv_pte_set_executable
211#define pte_mkdirty hv_pte_set_dirty
212#define pte_mkyoung hv_pte_set_accessed
213#define pte_mkwrite hv_pte_set_writable
214#define pte_mkhuge hv_pte_set_page
Chris Metcalf621b1952012-04-01 14:04:21 -0400215#define pte_mksuper hv_pte_set_super
Chris Metcalf867e3592010-05-28 23:09:12 -0400216
217#define pte_special(pte) 0
218#define pte_mkspecial(pte) (pte)
219
220/*
221 * Use some spare bits in the PTE for user-caching tags.
222 */
223#define pte_set_forcecache hv_pte_set_client0
224#define pte_get_forcecache hv_pte_get_client0
225#define pte_clear_forcecache hv_pte_clear_client0
226#define pte_set_anyhome hv_pte_set_client1
227#define pte_get_anyhome hv_pte_get_client1
228#define pte_clear_anyhome hv_pte_clear_client1
229
230/*
231 * A migrating PTE has PAGE_PRESENT clear but all the other bits preserved.
232 */
233#define pte_migrating hv_pte_get_migrating
234#define pte_mkmigrate(x) hv_pte_set_migrating(hv_pte_clear_present(x))
235#define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x))
236
237#define pte_ERROR(e) \
Chris Metcalf0707ad32010-06-25 17:04:17 -0400238 pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e))
Chris Metcalf867e3592010-05-28 23:09:12 -0400239#define pgd_ERROR(e) \
Chris Metcalf0707ad32010-06-25 17:04:17 -0400240 pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
Chris Metcalf867e3592010-05-28 23:09:12 -0400241
Chris Metcalf76c567f2011-02-28 16:37:34 -0500242/* Return PA and protection info for a given kernel VA. */
243int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte);
244
Chris Metcalf867e3592010-05-28 23:09:12 -0400245/*
Chris Metcalf76c567f2011-02-28 16:37:34 -0500246 * __set_pte() ensures we write the 64-bit PTE with 32-bit words in
247 * the right order on 32-bit platforms and also allows us to write
248 * hooks to check valid PTEs, etc., if we want.
249 */
250void __set_pte(pte_t *ptep, pte_t pte);
251
252/*
253 * set_pte() sets the given PTE and also sanity-checks the
Chris Metcalf867e3592010-05-28 23:09:12 -0400254 * requested PTE against the page homecaching. Unspecified parts
255 * of the PTE are filled in when it is written to memory, i.e. all
256 * caching attributes if "!forcecache", or the home cpu if "anyhome".
257 */
Chris Metcalf76c567f2011-02-28 16:37:34 -0500258extern void set_pte(pte_t *ptep, pte_t pte);
Chris Metcalf867e3592010-05-28 23:09:12 -0400259#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
260#define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
261
262#define pte_page(x) pfn_to_page(pte_pfn(x))
263
264static inline int pte_none(pte_t pte)
265{
266 return !pte.val;
267}
268
269static inline unsigned long pte_pfn(pte_t pte)
270{
Chris Metcalfd5d14ed2012-03-29 13:58:43 -0400271 return PFN_DOWN(hv_pte_get_pa(pte));
Chris Metcalf867e3592010-05-28 23:09:12 -0400272}
273
274/* Set or get the remote cache cpu in a pgprot with remote caching. */
275extern pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu);
276extern int get_remote_cache_cpu(pgprot_t prot);
277
278static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
279{
Chris Metcalfd5d14ed2012-03-29 13:58:43 -0400280 return hv_pte_set_pa(prot, PFN_PHYS(pfn));
Chris Metcalf867e3592010-05-28 23:09:12 -0400281}
282
283/* Support for priority mappings. */
284extern void start_mm_caching(struct mm_struct *mm);
285extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
286
287/*
288 * Support non-linear file mappings (see sys_remap_file_pages).
289 * This is defined by CLIENT1 set but CLIENT0 and _PAGE_PRESENT clear, and the
290 * file offset in the 32 high bits.
291 */
292#define _PAGE_FILE HV_PTE_CLIENT1
293#define PTE_FILE_MAX_BITS 32
294#define pte_file(pte) (hv_pte_get_client1(pte) && !hv_pte_get_client0(pte))
295#define pte_to_pgoff(pte) ((pte).val >> 32)
296#define pgoff_to_pte(off) ((pte_t) { (((long long)(off)) << 32) | _PAGE_FILE })
297
298/*
299 * Encode and de-code a swap entry (see <linux/swapops.h>).
300 * We put the swap file type+offset in the 32 high bits;
301 * I believe we can just leave the low bits clear.
302 */
303#define __swp_type(swp) ((swp).val & 0x1f)
304#define __swp_offset(swp) ((swp).val >> 5)
305#define __swp_entry(type, off) ((swp_entry_t) { (type) | ((off) << 5) })
306#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 })
307#define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) })
308
309/*
Chris Metcalf867e3592010-05-28 23:09:12 -0400310 * Conversion functions: convert a page and protection to a page entry,
311 * and a page entry and page directory to the page they refer to.
312 */
313
314#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
315
316/*
317 * If we are doing an mprotect(), just accept the new vma->vm_page_prot
318 * value and combine it with the PFN from the old PTE to get a new PTE.
319 */
320static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
321{
Chris Metcalf73636b12012-03-28 13:59:18 -0400322 return pfn_pte(pte_pfn(pte), newprot);
Chris Metcalf867e3592010-05-28 23:09:12 -0400323}
324
325/*
326 * The pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
327 *
328 * This macro returns the index of the entry in the pgd page which would
329 * control the given virtual address.
330 */
331#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
332
333/*
334 * pgd_offset() returns a (pgd_t *)
335 * pgd_index() is used get the offset into the pgd page's array of pgd_t's.
336 */
337#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
338
339/*
340 * A shortcut which implies the use of the kernel's pgd, instead
341 * of a process's.
342 */
343#define pgd_offset_k(address) pgd_offset(&init_mm, address)
344
Chris Metcalf867e3592010-05-28 23:09:12 -0400345#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
Chris Metcalf867e3592010-05-28 23:09:12 -0400346#define pte_unmap(pte) do { } while (0)
Chris Metcalf867e3592010-05-28 23:09:12 -0400347
348/* Clear a non-executable kernel PTE and flush it from the TLB. */
349#define kpte_clear_flush(ptep, vaddr) \
350do { \
351 pte_clear(&init_mm, (vaddr), (ptep)); \
352 local_flush_tlb_page(FLUSH_NONEXEC, (vaddr), PAGE_SIZE); \
353} while (0)
354
355/*
356 * The kernel page tables contain what we need, and we flush when we
357 * change specific page table entries.
358 */
359#define update_mmu_cache(vma, address, pte) do { } while (0)
360
361#ifdef CONFIG_FLATMEM
362#define kern_addr_valid(addr) (1)
363#endif /* CONFIG_FLATMEM */
364
Chris Metcalf867e3592010-05-28 23:09:12 -0400365extern void vmalloc_sync_all(void);
366
367#endif /* !__ASSEMBLY__ */
368
369#ifdef __tilegx__
370#include <asm/pgtable_64.h>
371#else
372#include <asm/pgtable_32.h>
373#endif
374
375#ifndef __ASSEMBLY__
376
377static inline int pmd_none(pmd_t pmd)
378{
379 /*
380 * Only check low word on 32-bit platforms, since it might be
381 * out of sync with upper half.
382 */
383 return (unsigned long)pmd_val(pmd) == 0;
384}
385
386static inline int pmd_present(pmd_t pmd)
387{
388 return pmd_val(pmd) & _PAGE_PRESENT;
389}
390
391static inline int pmd_bad(pmd_t pmd)
392{
393 return ((pmd_val(pmd) & _PAGE_ALL) != _PAGE_TABLE);
394}
395
396static inline unsigned long pages_to_mb(unsigned long npg)
397{
398 return npg >> (20 - PAGE_SHIFT);
399}
400
401/*
402 * The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD]
403 *
404 * This function returns the index of the entry in the pmd which would
405 * control the given virtual address.
406 */
407static inline unsigned long pmd_index(unsigned long address)
408{
409 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
410}
411
Chris Metcalf73636b12012-03-28 13:59:18 -0400412#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
413static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
414 unsigned long address,
415 pmd_t *pmdp)
416{
417 return ptep_test_and_clear_young(vma, address, pmdp_ptep(pmdp));
418}
419
420#define __HAVE_ARCH_PMDP_SET_WRPROTECT
421static inline void pmdp_set_wrprotect(struct mm_struct *mm,
422 unsigned long address, pmd_t *pmdp)
423{
424 ptep_set_wrprotect(mm, address, pmdp_ptep(pmdp));
425}
426
427
428#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
429static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
430 unsigned long address,
431 pmd_t *pmdp)
432{
433 return pte_pmd(ptep_get_and_clear(mm, address, pmdp_ptep(pmdp)));
434}
435
436static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
437{
438 set_pte(pmdp_ptep(pmdp), pmd_pte(pmdval));
439}
440
441#define set_pmd_at(mm, addr, pmdp, pmdval) __set_pmd(pmdp, pmdval)
442
443/* Create a pmd from a PTFN. */
444static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
445{
446 return pte_pmd(hv_pte_set_ptfn(prot, ptfn));
447}
448
449/* Return the page-table frame number (ptfn) that a pmd_t points at. */
450#define pmd_ptfn(pmd) hv_pte_get_ptfn(pmd_pte(pmd))
451
Chris Metcalf867e3592010-05-28 23:09:12 -0400452/*
453 * A given kernel pmd_t maps to a specific virtual address (either a
454 * kernel huge page or a kernel pte_t table). Since kernel pte_t
455 * tables can be aligned at sub-page granularity, this function can
456 * return non-page-aligned pointers, despite its name.
457 */
458static inline unsigned long pmd_page_vaddr(pmd_t pmd)
459{
460 phys_addr_t pa =
461 (phys_addr_t)pmd_ptfn(pmd) << HV_LOG2_PAGE_TABLE_ALIGN;
462 return (unsigned long)__va(pa);
463}
464
465/*
466 * A pmd_t points to the base of a huge page or to a pte_t array.
467 * If a pte_t array, since we can have multiple per page, we don't
468 * have a one-to-one mapping of pmd_t's to pages. However, this is
469 * OK for pte_lockptr(), since we just end up with potentially one
470 * lock being used for several pte_t arrays.
471 */
Chris Metcalfd5d14ed2012-03-29 13:58:43 -0400472#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))
Chris Metcalf867e3592010-05-28 23:09:12 -0400473
Chris Metcalf73636b12012-03-28 13:59:18 -0400474static inline void pmd_clear(pmd_t *pmdp)
475{
476 __pte_clear(pmdp_ptep(pmdp));
477}
478
479#define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
480#define pmd_young(pmd) pte_young(pmd_pte(pmd))
481#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
482#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
483#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
484#define pmd_write(pmd) pte_write(pmd_pte(pmd))
485#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
486#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
487#define pmd_huge_page(pmd) pte_huge(pmd_pte(pmd))
488#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
489#define __HAVE_ARCH_PMD_WRITE
490
491#define pfn_pmd(pfn, pgprot) pte_pmd(pfn_pte((pfn), (pgprot)))
492#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
493#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
494
495static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
496{
497 return pfn_pmd(pmd_pfn(pmd), newprot);
498}
499
500#ifdef CONFIG_TRANSPARENT_HUGEPAGE
501#define has_transparent_hugepage() 1
502#define pmd_trans_huge pmd_huge_page
503
504static inline pmd_t pmd_mksplitting(pmd_t pmd)
505{
506 return pte_pmd(hv_pte_set_client2(pmd_pte(pmd)));
507}
508
509static inline int pmd_trans_splitting(pmd_t pmd)
510{
511 return hv_pte_get_client2(pmd_pte(pmd));
512}
513#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
514
Chris Metcalf867e3592010-05-28 23:09:12 -0400515/*
516 * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
517 *
518 * This macro returns the index of the entry in the pte page which would
519 * control the given virtual address.
520 */
521static inline unsigned long pte_index(unsigned long address)
522{
523 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
524}
525
526static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
527{
528 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
529}
530
Chris Metcalf867e3592010-05-28 23:09:12 -0400531#include <asm-generic/pgtable.h>
532
Chris Metcalf0707ad32010-06-25 17:04:17 -0400533/* Support /proc/NN/pgtable API. */
534struct seq_file;
535int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm,
Chris Metcalf621b1952012-04-01 14:04:21 -0400536 unsigned long vaddr, unsigned long pagesize,
537 pte_t *ptep, void **datap);
Chris Metcalf0707ad32010-06-25 17:04:17 -0400538
Chris Metcalf867e3592010-05-28 23:09:12 -0400539#endif /* !__ASSEMBLY__ */
540
541#endif /* _ASM_TILE_PGTABLE_H */