blob: 8d1544eb461ebfa13b3cd352786ba2af6fcf077e [file] [log] [blame]
Chris Zankel9a8fd552005-06-23 22:01:26 -07001/*
Chris Zankel66569202007-08-22 10:14:51 -07002 * include/asm-xtensa/pgalloc.h
Chris Zankel9a8fd552005-06-23 22:01:26 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
Chris Zankel66569202007-08-22 10:14:51 -07008 * Copyright (C) 2001-2007 Tensilica Inc.
Chris Zankel9a8fd552005-06-23 22:01:26 -07009 */
10
11#ifndef _XTENSA_PGALLOC_H
12#define _XTENSA_PGALLOC_H
13
14#ifdef __KERNEL__
15
Chris Zankel9a8fd552005-06-23 22:01:26 -070016#include <linux/highmem.h>
Chris Zankel9a8fd552005-06-23 22:01:26 -070017
18/*
19 * Allocating and freeing a pmd is trivial: the 1-entry pmd is
20 * inside the pgd, so has no extra memory associated with it.
21 */
22
Chris Zankel66569202007-08-22 10:14:51 -070023#define pmd_populate_kernel(mm, pmdp, ptep) \
24 (pmd_val(*(pmdp)) = ((unsigned long)ptep))
25#define pmd_populate(mm, pmdp, page) \
26 (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080027#define pmd_pgtable(pmd) pmd_page(pmd)
Chris Zankel9a8fd552005-06-23 22:01:26 -070028
29static inline pgd_t*
30pgd_alloc(struct mm_struct *mm)
31{
Chris Zankel66569202007-08-22 10:14:51 -070032 return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
Chris Zankel9a8fd552005-06-23 22:01:26 -070033}
34
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080035static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
Chris Zankel66569202007-08-22 10:14:51 -070036{
37 free_page((unsigned long)pgd);
38}
Chris Zankel9a8fd552005-06-23 22:01:26 -070039
Chris Zankel66569202007-08-22 10:14:51 -070040/* Use a slab cache for the pte pages (see also sparc64 implementation) */
41
42extern struct kmem_cache *pgtable_cache;
43
44static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
45 unsigned long address)
46{
47 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
48}
49
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080050static inline pte_token_t pte_alloc_one(struct mm_struct *mm,
51 unsigned long addr)
Chris Zankel66569202007-08-22 10:14:51 -070052{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080053 struct page *page;
54
55 page = virt_to_page(pte_alloc_one_kernel(mm, addr));
56 pgtable_page_ctor(page);
57 return page;
Chris Zankel66569202007-08-22 10:14:51 -070058}
59
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080060static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
Chris Zankel66569202007-08-22 10:14:51 -070061{
62 kmem_cache_free(pgtable_cache, pte);
63}
64
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080065static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
Chris Zankel66569202007-08-22 10:14:51 -070066{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080067 pgtable_page_dtor(pte);
68 kmem_cache_free(pgtable_cache, page_address(pte));
Chris Zankel66569202007-08-22 10:14:51 -070069}
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080070#define pmd_pgtable(pmd) pmd_page(pmd)
Chris Zankel9a8fd552005-06-23 22:01:26 -070071
72#endif /* __KERNEL__ */
73#endif /* _XTENSA_PGALLOC_H */