blob: 40cf9bceda2cb92625112a951b174f5b56dd5471 [file] [log] [blame]
Chris Zankel9a8fd552005-06-23 22:01:26 -07001/*
Chris Zankel66569202007-08-22 10:14:51 -07002 * include/asm-xtensa/pgalloc.h
Chris Zankel9a8fd552005-06-23 22:01:26 -07003 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
Chris Zankel66569202007-08-22 10:14:51 -07008 * Copyright (C) 2001-2007 Tensilica Inc.
Chris Zankel9a8fd552005-06-23 22:01:26 -07009 */
10
11#ifndef _XTENSA_PGALLOC_H
12#define _XTENSA_PGALLOC_H
13
14#ifdef __KERNEL__
15
Chris Zankel9a8fd552005-06-23 22:01:26 -070016#include <linux/highmem.h>
Chris Zankel4573e392010-05-02 01:05:13 -070017#include <linux/slab.h>
Chris Zankel9a8fd552005-06-23 22:01:26 -070018
19/*
20 * Allocating and freeing a pmd is trivial: the 1-entry pmd is
21 * inside the pgd, so has no extra memory associated with it.
22 */
23
Chris Zankel66569202007-08-22 10:14:51 -070024#define pmd_populate_kernel(mm, pmdp, ptep) \
25 (pmd_val(*(pmdp)) = ((unsigned long)ptep))
26#define pmd_populate(mm, pmdp, page) \
27 (pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080028#define pmd_pgtable(pmd) pmd_page(pmd)
Chris Zankel9a8fd552005-06-23 22:01:26 -070029
30static inline pgd_t*
31pgd_alloc(struct mm_struct *mm)
32{
Chris Zankel66569202007-08-22 10:14:51 -070033 return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
Chris Zankel9a8fd552005-06-23 22:01:26 -070034}
35
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080036static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
Chris Zankel66569202007-08-22 10:14:51 -070037{
38 free_page((unsigned long)pgd);
39}
Chris Zankel9a8fd552005-06-23 22:01:26 -070040
Chris Zankel66569202007-08-22 10:14:51 -070041/* Use a slab cache for the pte pages (see also sparc64 implementation) */
42
43extern struct kmem_cache *pgtable_cache;
44
45static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
46 unsigned long address)
47{
48 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
49}
50
Chris Zankele584d852008-02-13 16:25:09 -080051static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080052 unsigned long addr)
Chris Zankel66569202007-08-22 10:14:51 -070053{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080054 struct page *page;
55
56 page = virt_to_page(pte_alloc_one_kernel(mm, addr));
57 pgtable_page_ctor(page);
58 return page;
Chris Zankel66569202007-08-22 10:14:51 -070059}
60
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080061static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
Chris Zankel66569202007-08-22 10:14:51 -070062{
63 kmem_cache_free(pgtable_cache, pte);
64}
65
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080066static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
Chris Zankel66569202007-08-22 10:14:51 -070067{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080068 pgtable_page_dtor(pte);
69 kmem_cache_free(pgtable_cache, page_address(pte));
Chris Zankel66569202007-08-22 10:14:51 -070070}
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080071#define pmd_pgtable(pmd) pmd_page(pmd)
Chris Zankel9a8fd552005-06-23 22:01:26 -070072
73#endif /* __KERNEL__ */
74#endif /* _XTENSA_PGALLOC_H */