blob: 8b707c249026032ac8bef80c6f1666c105d9b50d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Greg Ungerer74d47992011-10-18 16:52:41 +10002#ifndef M68K_MCF_PGALLOC_H
3#define M68K_MCF_PGALLOC_H
4
5#include <asm/tlb.h>
6#include <asm/tlbflush.h>
7
8extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
9{
10 free_page((unsigned long) pte);
11}
12
13extern const char bad_pmd_string[];
14
15extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
16 unsigned long address)
17{
Michal Hocko32d6bd92016-06-24 14:48:47 -070018 unsigned long page = __get_free_page(GFP_DMA);
Greg Ungerer74d47992011-10-18 16:52:41 +100019
20 if (!page)
21 return NULL;
22
23 memset((void *)page, 0, PAGE_SIZE);
24 return (pte_t *) (page);
25}
26
27extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
28{
29 return (pmd_t *) pgd;
30}
31
32#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
33#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
34
35#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
36
37#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
38 (unsigned long)(page_address(page)))
39
40#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
41
42#define pmd_pgtable(pmd) pmd_page(pmd)
43
44static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
45 unsigned long address)
46{
47 __free_page(page);
48}
49
50#define __pmd_free_tlb(tlb, pmd, address) do { } while (0)
51
52static inline struct page *pte_alloc_one(struct mm_struct *mm,
53 unsigned long address)
54{
Michal Hocko32d6bd92016-06-24 14:48:47 -070055 struct page *page = alloc_pages(GFP_DMA, 0);
Greg Ungerer74d47992011-10-18 16:52:41 +100056 pte_t *pte;
57
58 if (!page)
59 return NULL;
Kirill A. Shutemovf84c9142013-11-14 14:31:34 -080060 if (!pgtable_page_ctor(page)) {
61 __free_page(page);
62 return NULL;
63 }
Greg Ungerer74d47992011-10-18 16:52:41 +100064
65 pte = kmap(page);
66 if (pte) {
67 clear_page(pte);
68 __flush_page_to_ram(pte);
69 flush_tlb_kernel_page(pte);
70 nocache_page(pte);
71 }
72 kunmap(page);
73
74 return page;
75}
76
77extern inline void pte_free(struct mm_struct *mm, struct page *page)
78{
79 __free_page(page);
80}
81
82/*
83 * In our implementation, each pgd entry contains 1 pmd that is never allocated
84 * or freed. pgd_present is always 1, so this should never be called. -NL
85 */
86#define pmd_free(mm, pmd) BUG()
87
88static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
89{
90 free_page((unsigned long) pgd);
91}
92
93static inline pgd_t *pgd_alloc(struct mm_struct *mm)
94{
95 pgd_t *new_pgd;
96
97 new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
98 if (!new_pgd)
99 return NULL;
100 memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
101 memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
102 return new_pgd;
103}
104
105#define pgd_populate(mm, pmd, pte) BUG()
106
107#endif /* M68K_MCF_PGALLOC_H */