GuanXuetao | 56372b0 | 2011-01-15 18:17:56 +0800 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/unicore32/mm/pgd.c |
| 3 | * |
| 4 | * Code specific to PKUnity SoC and UniCore ISA |
| 5 | * |
| 6 | * Copyright (C) 2001-2010 GUAN Xue-tao |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as |
| 10 | * published by the Free Software Foundation. |
| 11 | */ |
| 12 | #include <linux/mm.h> |
| 13 | #include <linux/gfp.h> |
| 14 | #include <linux/highmem.h> |
| 15 | |
| 16 | #include <asm/pgalloc.h> |
| 17 | #include <asm/page.h> |
| 18 | #include <asm/tlbflush.h> |
| 19 | |
| 20 | #include "mm.h" |
| 21 | |
| 22 | #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) |
| 23 | |
| 24 | /* |
| 25 | * need to get a 4k page for level 1 |
| 26 | */ |
| 27 | pgd_t *get_pgd_slow(struct mm_struct *mm) |
| 28 | { |
| 29 | pgd_t *new_pgd, *init_pgd; |
| 30 | pmd_t *new_pmd, *init_pmd; |
| 31 | pte_t *new_pte, *init_pte; |
| 32 | |
| 33 | new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 0); |
| 34 | if (!new_pgd) |
| 35 | goto no_pgd; |
| 36 | |
| 37 | memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); |
| 38 | |
| 39 | /* |
| 40 | * Copy over the kernel and IO PGD entries |
| 41 | */ |
| 42 | init_pgd = pgd_offset_k(0); |
| 43 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, |
| 44 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); |
| 45 | |
| 46 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); |
| 47 | |
| 48 | if (!vectors_high()) { |
| 49 | /* |
| 50 | * On UniCore, first page must always be allocated since it |
| 51 | * contains the machine vectors. |
| 52 | */ |
| 53 | new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0); |
| 54 | if (!new_pmd) |
| 55 | goto no_pmd; |
| 56 | |
GuanXuetao | 4ef2ec6 | 2011-02-22 17:34:47 +0800 | [diff] [blame] | 57 | new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); |
GuanXuetao | 56372b0 | 2011-01-15 18:17:56 +0800 | [diff] [blame] | 58 | if (!new_pte) |
| 59 | goto no_pte; |
| 60 | |
| 61 | init_pmd = pmd_offset((pud_t *)init_pgd, 0); |
| 62 | init_pte = pte_offset_map(init_pmd, 0); |
| 63 | set_pte(new_pte, *init_pte); |
| 64 | pte_unmap(init_pte); |
| 65 | pte_unmap(new_pte); |
| 66 | } |
| 67 | |
| 68 | return new_pgd; |
| 69 | |
| 70 | no_pte: |
| 71 | pmd_free(mm, new_pmd); |
Kirill A. Shutemov | b30fe6c | 2015-02-11 15:26:53 -0800 | [diff] [blame] | 72 | mm_dec_nr_pmds(mm); |
GuanXuetao | 56372b0 | 2011-01-15 18:17:56 +0800 | [diff] [blame] | 73 | no_pmd: |
| 74 | free_pages((unsigned long)new_pgd, 0); |
| 75 | no_pgd: |
| 76 | return NULL; |
| 77 | } |
| 78 | |
| 79 | void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) |
| 80 | { |
| 81 | pmd_t *pmd; |
| 82 | pgtable_t pte; |
| 83 | |
| 84 | if (!pgd) |
| 85 | return; |
| 86 | |
| 87 | /* pgd is always present and good */ |
| 88 | pmd = pmd_off(pgd, 0); |
| 89 | if (pmd_none(*pmd)) |
| 90 | goto free; |
| 91 | if (pmd_bad(*pmd)) { |
| 92 | pmd_ERROR(*pmd); |
| 93 | pmd_clear(pmd); |
| 94 | goto free; |
| 95 | } |
| 96 | |
| 97 | pte = pmd_pgtable(*pmd); |
| 98 | pmd_clear(pmd); |
| 99 | pte_free(mm, pte); |
Kirill A. Shutemov | b30fe6c | 2015-02-11 15:26:53 -0800 | [diff] [blame] | 100 | atomic_long_dec(&mm->nr_ptes); |
GuanXuetao | 56372b0 | 2011-01-15 18:17:56 +0800 | [diff] [blame] | 101 | pmd_free(mm, pmd); |
Kirill A. Shutemov | b30fe6c | 2015-02-11 15:26:53 -0800 | [diff] [blame] | 102 | mm_dec_nr_pmds(mm); |
GuanXuetao | 56372b0 | 2011-01-15 18:17:56 +0800 | [diff] [blame] | 103 | free: |
| 104 | free_pages((unsigned long) pgd, 0); |
| 105 | } |