Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * include/asm-s390/mmu_context.h |
| 3 | * |
| 4 | * S390 version |
| 5 | * |
| 6 | * Derived from "include/asm-i386/mmu_context.h" |
| 7 | */ |
| 8 | |
| 9 | #ifndef __S390_MMU_CONTEXT_H |
| 10 | #define __S390_MMU_CONTEXT_H |
| 11 | |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 12 | #include <asm/pgalloc.h> |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 13 | #include <asm/uaccess.h> |
Martin Schwidefsky | 050eef3 | 2010-08-24 09:26:21 +0200 | [diff] [blame] | 14 | #include <asm/tlbflush.h> |
Jeremy Fitzhardinge | d6dd61c | 2007-05-02 19:27:14 +0200 | [diff] [blame] | 15 | #include <asm-generic/mm_hooks.h> |
| 16 | |
Martin Schwidefsky | 6f457e1 | 2008-01-26 14:10:58 +0100 | [diff] [blame] | 17 | static inline int init_new_context(struct task_struct *tsk, |
| 18 | struct mm_struct *mm) |
| 19 | { |
Martin Schwidefsky | 050eef3 | 2010-08-24 09:26:21 +0200 | [diff] [blame] | 20 | atomic_set(&mm->context.attach_count, 0); |
| 21 | mm->context.flush_mm = 0; |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 22 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; |
Martin Schwidefsky | 6f457e1 | 2008-01-26 14:10:58 +0100 | [diff] [blame] | 23 | #ifdef CONFIG_64BIT |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 24 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; |
Martin Schwidefsky | 6f457e1 | 2008-01-26 14:10:58 +0100 | [diff] [blame] | 25 | #endif |
Martin Schwidefsky | badb8bb | 2011-05-10 17:13:43 +0200 | [diff] [blame] | 26 | if (current->mm && current->mm->context.alloc_pgste) { |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 27 | /* |
| 28 | * alloc_pgste indicates, that any NEW context will be created |
| 29 | * with extended page tables. The old context is unchanged. The |
| 30 | * page table allocation and the page table operations will |
| 31 | * look at has_pgste to distinguish normal and extended page |
| 32 | * tables. The only way to create extended page tables is to |
| 33 | * set alloc_pgste and then create a new context (e.g. dup_mm). |
| 34 | * The page table allocation is called after init_new_context |
| 35 | * and if has_pgste is set, it will create extended page |
| 36 | * tables. |
| 37 | */ |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 38 | mm->context.noexec = 0; |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 39 | mm->context.has_pgste = 1; |
| 40 | mm->context.alloc_pgste = 1; |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 41 | } else { |
Martin Schwidefsky | b11b533 | 2009-12-07 12:51:43 +0100 | [diff] [blame] | 42 | mm->context.noexec = (user_mode == SECONDARY_SPACE_MODE); |
Christian Borntraeger | 250cf77 | 2008-10-28 11:10:15 +0100 | [diff] [blame] | 43 | mm->context.has_pgste = 0; |
| 44 | mm->context.alloc_pgste = 0; |
Carsten Otte | 402b086 | 2008-03-25 18:47:10 +0100 | [diff] [blame] | 45 | } |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 46 | mm->context.asce_limit = STACK_TOP_MAX; |
| 47 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); |
Martin Schwidefsky | 6f457e1 | 2008-01-26 14:10:58 +0100 | [diff] [blame] | 48 | return 0; |
| 49 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
| 51 | #define destroy_context(mm) do { } while (0) |
| 52 | |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 53 | #ifndef __s390x__ |
| 54 | #define LCTL_OPCODE "lctl" |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 55 | #else |
| 56 | #define LCTL_OPCODE "lctlg" |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 57 | #endif |
| 58 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 59 | static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | { |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 61 | pgd_t *pgd = mm->pgd; |
| 62 | |
| 63 | S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd); |
Martin Schwidefsky | b11b533 | 2009-12-07 12:51:43 +0100 | [diff] [blame] | 64 | if (user_mode != HOME_SPACE_MODE) { |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 65 | /* Load primary space page table origin. */ |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 66 | pgd = mm->context.noexec ? get_shadow_table(pgd) : pgd; |
| 67 | S390_lowcore.user_exec_asce = mm->context.asce_bits | __pa(pgd); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 68 | asm volatile(LCTL_OPCODE" 1,1,%0\n" |
| 69 | : : "m" (S390_lowcore.user_exec_asce) ); |
| 70 | } else |
| 71 | /* Load home space page table origin. */ |
| 72 | asm volatile(LCTL_OPCODE" 13,13,%0" |
| 73 | : : "m" (S390_lowcore.user_asce) ); |
Martin Schwidefsky | 6252d70 | 2008-02-09 18:24:37 +0100 | [diff] [blame] | 74 | set_fs(current->thread.mm_segment); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 78 | struct task_struct *tsk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | { |
Rusty Russell | 005f8ee | 2009-03-26 15:25:01 +0100 | [diff] [blame] | 80 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 81 | update_mm(next, tsk); |
Martin Schwidefsky | 050eef3 | 2010-08-24 09:26:21 +0200 | [diff] [blame] | 82 | atomic_dec(&prev->context.attach_count); |
| 83 | WARN_ON(atomic_read(&prev->context.attach_count) < 0); |
| 84 | atomic_inc(&next->context.attach_count); |
| 85 | /* Check for TLBs not flushed yet */ |
| 86 | if (next->context.flush_mm) |
| 87 | __tlb_flush_mm(next); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | } |
| 89 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 90 | #define enter_lazy_tlb(mm,tsk) do { } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | #define deactivate_mm(tsk,mm) do { } while (0) |
| 92 | |
Adrian Bunk | 4448aaf | 2005-11-08 21:34:42 -0800 | [diff] [blame] | 93 | static inline void activate_mm(struct mm_struct *prev, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | struct mm_struct *next) |
| 95 | { |
| 96 | switch_mm(prev, next, current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | } |
| 98 | |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 99 | #endif /* __S390_MMU_CONTEXT_H */ |