blob: 12fe700632f458ea632a18bb9cdccd6660efd241 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Greg Ungerer74d47992011-10-18 16:52:41 +10002#ifndef M68K_MCF_PGALLOC_H
3#define M68K_MCF_PGALLOC_H
4
5#include <asm/tlb.h>
6#include <asm/tlbflush.h>
7
8extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
9{
10 free_page((unsigned long) pte);
11}
12
13extern const char bad_pmd_string[];
14
15extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
16 unsigned long address)
17{
Michal Hocko32d6bd92016-06-24 14:48:47 -070018 unsigned long page = __get_free_page(GFP_DMA);
Greg Ungerer74d47992011-10-18 16:52:41 +100019
20 if (!page)
21 return NULL;
22
23 memset((void *)page, 0, PAGE_SIZE);
24 return (pte_t *) (page);
25}
26
27extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
28{
29 return (pmd_t *) pgd;
30}
31
32#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
33#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
34
35#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
36
37#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
38 (unsigned long)(page_address(page)))
39
40#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
41
42#define pmd_pgtable(pmd) pmd_page(pmd)
43
44static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
45 unsigned long address)
46{
Greg Ungererecd60532018-06-18 15:34:14 +100047 pgtable_page_dtor(page);
Greg Ungerer74d47992011-10-18 16:52:41 +100048 __free_page(page);
49}
50
51#define __pmd_free_tlb(tlb, pmd, address) do { } while (0)
52
53static inline struct page *pte_alloc_one(struct mm_struct *mm,
54 unsigned long address)
55{
Michal Hocko32d6bd92016-06-24 14:48:47 -070056 struct page *page = alloc_pages(GFP_DMA, 0);
Greg Ungerer74d47992011-10-18 16:52:41 +100057 pte_t *pte;
58
59 if (!page)
60 return NULL;
Kirill A. Shutemovf84c9142013-11-14 14:31:34 -080061 if (!pgtable_page_ctor(page)) {
62 __free_page(page);
63 return NULL;
64 }
Greg Ungerer74d47992011-10-18 16:52:41 +100065
66 pte = kmap(page);
67 if (pte) {
68 clear_page(pte);
69 __flush_page_to_ram(pte);
70 flush_tlb_kernel_page(pte);
71 nocache_page(pte);
72 }
73 kunmap(page);
74
75 return page;
76}
77
Greg Ungererecd60532018-06-18 15:34:14 +100078static inline void pte_free(struct mm_struct *mm, struct page *page)
Greg Ungerer74d47992011-10-18 16:52:41 +100079{
Greg Ungererecd60532018-06-18 15:34:14 +100080 pgtable_page_dtor(page);
Greg Ungerer74d47992011-10-18 16:52:41 +100081 __free_page(page);
82}
83
84/*
85 * In our implementation, each pgd entry contains 1 pmd that is never allocated
86 * or freed. pgd_present is always 1, so this should never be called. -NL
87 */
88#define pmd_free(mm, pmd) BUG()
89
90static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
91{
92 free_page((unsigned long) pgd);
93}
94
95static inline pgd_t *pgd_alloc(struct mm_struct *mm)
96{
97 pgd_t *new_pgd;
98
99 new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
100 if (!new_pgd)
101 return NULL;
102 memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
103 memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
104 return new_pgd;
105}
106
107#define pgd_populate(mm, pmd, pte) BUG()
108
109#endif /* M68K_MCF_PGALLOC_H */