blob: 082eb4e50e8b3f53a59cc8343e619bfc6a60b6cc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * include/asm-s390/pgalloc.h
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
11 */
12
13#ifndef _S390_PGALLOC_H
14#define _S390_PGALLOC_H
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/threads.h>
17#include <linux/gfp.h>
18#include <linux/mm.h>
19
20#define check_pgt_cache() do {} while (0)
21
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020022unsigned long *crst_table_alloc(struct mm_struct *, int);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010023void crst_table_free(struct mm_struct *, unsigned long *);
Martin Schwidefsky80217142010-10-25 16:10:11 +020024void crst_table_free_rcu(struct mm_struct *, unsigned long *);
Gerald Schaefer9282ed92006-09-20 15:59:37 +020025
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010026unsigned long *page_table_alloc(struct mm_struct *);
27void page_table_free(struct mm_struct *, unsigned long *);
Martin Schwidefsky80217142010-10-25 16:10:11 +020028void page_table_free_rcu(struct mm_struct *, unsigned long *);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010029void disable_noexec(struct mm_struct *, struct task_struct *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020031static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032{
Martin Schwidefsky4f7e90d2008-12-25 13:39:26 +010033 typedef struct { char _[n]; } addrtype;
34
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020035 *s = val;
36 n = (n / 256) - 1;
37 asm volatile(
38#ifdef CONFIG_64BIT
39 " mvc 8(248,%0),0(%0)\n"
Gerald Schaefer9282ed92006-09-20 15:59:37 +020040#else
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020041 " mvc 4(252,%0),0(%0)\n"
Gerald Schaefer9282ed92006-09-20 15:59:37 +020042#endif
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020043 "0: mvc 256(256,%0),0(%0)\n"
44 " la %0,256(%0)\n"
45 " brct %1,0b\n"
Martin Schwidefsky4f7e90d2008-12-25 13:39:26 +010046 : "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
47 : "m" (*(addrtype *) s));
Linus Torvalds1da177e2005-04-16 15:20:36 -070048}
49
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020050static inline void crst_table_init(unsigned long *crst, unsigned long entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -070051{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020052 clear_table(crst, entry, sizeof(unsigned long)*2048);
53 crst = get_shadow_table(crst);
54 if (crst)
55 clear_table(crst, entry, sizeof(unsigned long)*2048);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58#ifndef __s390x__
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020059
60static inline unsigned long pgd_entry_type(struct mm_struct *mm)
61{
62 return _SEGMENT_ENTRY_EMPTY;
63}
64
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020065#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080066#define pud_free(mm, x) do { } while (0)
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020067
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020068#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080069#define pmd_free(mm, x) do { } while (0)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020070
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020071#define pgd_populate(mm, pgd, pud) BUG()
72#define pgd_populate_kernel(mm, pgd, pud) BUG()
73
74#define pud_populate(mm, pud, pmd) BUG()
75#define pud_populate_kernel(mm, pud, pmd) BUG()
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020076
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#else /* __s390x__ */
Martin Schwidefsky3610cce2007-10-22 12:52:47 +020078
79static inline unsigned long pgd_entry_type(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -070080{
Martin Schwidefsky6252d702008-02-09 18:24:37 +010081 if (mm->context.asce_limit <= (1UL << 31))
82 return _SEGMENT_ENTRY_EMPTY;
83 if (mm->context.asce_limit <= (1UL << 42))
84 return _REGION3_ENTRY_EMPTY;
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010085 return _REGION2_ENTRY_EMPTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
87
Martin Schwidefsky6252d702008-02-09 18:24:37 +010088int crst_table_upgrade(struct mm_struct *, unsigned long limit);
89void crst_table_downgrade(struct mm_struct *, unsigned long limit);
90
Martin Schwidefsky5a216a22008-02-09 18:24:36 +010091static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
92{
93 unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
94 if (table)
95 crst_table_init(table, _REGION3_ENTRY_EMPTY);
96 return (pud_t *) table;
97}
98#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020099
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200100static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101{
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100102 unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
103 if (table)
104 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
105 return (pmd_t *) table;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106}
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100107#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100109static inline void pgd_populate_kernel(struct mm_struct *mm,
110 pgd_t *pgd, pud_t *pud)
111{
112 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
113}
114
115static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
116{
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100117 pgd_populate_kernel(mm, pgd, pud);
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100118 if (mm->context.noexec) {
119 pgd = get_shadow_table(pgd);
120 pud = get_shadow_table(pud);
121 pgd_populate_kernel(mm, pgd, pud);
122 }
Martin Schwidefsky5a216a22008-02-09 18:24:36 +0100123}
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200124
125static inline void pud_populate_kernel(struct mm_struct *mm,
126 pud_t *pud, pmd_t *pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127{
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200128 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129}
130
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200131static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100132{
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200133 pud_populate_kernel(mm, pud, pmd);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100134 if (mm->context.noexec) {
135 pud = get_shadow_table(pud);
136 pmd = get_shadow_table(pmd);
137 pud_populate_kernel(mm, pud, pmd);
138 }
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100139}
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141#endif /* __s390x__ */
142
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200143static inline pgd_t *pgd_alloc(struct mm_struct *mm)
144{
Martin Schwidefsky50aa98b2009-09-11 10:28:57 +0200145 spin_lock_init(&mm->context.list_lock);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100146 INIT_LIST_HEAD(&mm->context.crst_list);
147 INIT_LIST_HEAD(&mm->context.pgtable_list);
Martin Schwidefskyb11b5332009-12-07 12:51:43 +0100148 return (pgd_t *)
149 crst_table_alloc(mm, user_mode == SECONDARY_SPACE_MODE);
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200150}
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100151#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200152
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100153static inline void pmd_populate_kernel(struct mm_struct *mm,
154 pmd_t *pmd, pte_t *pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155{
Martin Schwidefsky3610cce2007-10-22 12:52:47 +0200156 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157}
158
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100159static inline void pmd_populate(struct mm_struct *mm,
160 pmd_t *pmd, pgtable_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161{
Gerald Schaeferc1821c22007-02-05 21:18:17 +0100162 pmd_populate_kernel(mm, pmd, pte);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100163 if (mm->context.noexec) {
164 pmd = get_shadow_table(pmd);
165 pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167}
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100168
169#define pmd_pgtable(pmd) \
170 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
172/*
173 * page table entry allocation/free routines.
174 */
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100175#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
176#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Martin Schwidefsky146e4b32008-02-09 18:24:35 +0100178#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
179#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Martin Schwidefsky80217142010-10-25 16:10:11 +0200181extern void rcu_table_freelist_finish(void);
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183#endif /* _S390_PGALLOC_H */