David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 1 | /* arch/sparc64/mm/tsb.c |
| 2 | * |
| 3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> |
| 4 | */ |
| 5 | |
| 6 | #include <linux/kernel.h> |
| 7 | #include <asm/system.h> |
| 8 | #include <asm/page.h> |
| 9 | #include <asm/tlbflush.h> |
| 10 | #include <asm/tlb.h> |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame^] | 11 | #include <asm/mmu_context.h> |
David S. Miller | 74bf431 | 2006-01-31 18:29:18 -0800 | [diff] [blame] | 12 | |
| 13 | #define TSB_ENTRY_ALIGNMENT 16 |
| 14 | |
| 15 | struct tsb { |
| 16 | unsigned long tag; |
| 17 | unsigned long pte; |
| 18 | } __attribute__((aligned(TSB_ENTRY_ALIGNMENT))); |
| 19 | |
| 20 | /* We use an 8K TSB for the whole kernel, this allows to |
| 21 | * handle about 4MB of modules and vmalloc mappings without |
| 22 | * incurring many hash conflicts. |
| 23 | */ |
| 24 | #define KERNEL_TSB_SIZE_BYTES 8192 |
| 25 | #define KERNEL_TSB_NENTRIES \ |
| 26 | (KERNEL_TSB_SIZE_BYTES / sizeof(struct tsb)) |
| 27 | |
| 28 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; |
| 29 | |
| 30 | static inline unsigned long tsb_hash(unsigned long vaddr) |
| 31 | { |
| 32 | vaddr >>= PAGE_SHIFT; |
| 33 | return vaddr & (KERNEL_TSB_NENTRIES - 1); |
| 34 | } |
| 35 | |
| 36 | static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context) |
| 37 | { |
| 38 | if (context == ~0UL) |
| 39 | return 1; |
| 40 | |
| 41 | return (entry->tag == ((vaddr >> 22) | (context << 48))); |
| 42 | } |
| 43 | |
| 44 | /* TSB flushes need only occur on the processor initiating the address |
| 45 | * space modification, not on each cpu the address space has run on. |
| 46 | * Only the TLB flush needs that treatment. |
| 47 | */ |
| 48 | |
| 49 | void flush_tsb_kernel_range(unsigned long start, unsigned long end) |
| 50 | { |
| 51 | unsigned long v; |
| 52 | |
| 53 | for (v = start; v < end; v += PAGE_SIZE) { |
| 54 | struct tsb *ent = &swapper_tsb[tsb_hash(v)]; |
| 55 | |
| 56 | if (tag_compare(ent, v, 0)) { |
| 57 | ent->tag = 0UL; |
| 58 | membar_storeload_storestore(); |
| 59 | } |
| 60 | } |
| 61 | } |
| 62 | |
| 63 | void flush_tsb_user(struct mmu_gather *mp) |
| 64 | { |
| 65 | struct mm_struct *mm = mp->mm; |
| 66 | struct tsb *tsb = (struct tsb *) mm->context.sparc64_tsb; |
| 67 | unsigned long ctx = ~0UL; |
| 68 | int i; |
| 69 | |
| 70 | if (CTX_VALID(mm->context)) |
| 71 | ctx = CTX_HWBITS(mm->context); |
| 72 | |
| 73 | for (i = 0; i < mp->tlb_nr; i++) { |
| 74 | unsigned long v = mp->vaddrs[i]; |
| 75 | struct tsb *ent; |
| 76 | |
| 77 | v &= ~0x1UL; |
| 78 | |
| 79 | ent = &tsb[tsb_hash(v)]; |
| 80 | if (tag_compare(ent, v, ctx)) { |
| 81 | ent->tag = 0UL; |
| 82 | membar_storeload_storestore(); |
| 83 | } |
| 84 | } |
| 85 | } |
David S. Miller | 09f9428 | 2006-01-31 18:31:06 -0800 | [diff] [blame^] | 86 | |
| 87 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 88 | { |
| 89 | unsigned long page = get_zeroed_page(GFP_KERNEL); |
| 90 | |
| 91 | mm->context.sparc64_ctx_val = 0UL; |
| 92 | if (unlikely(!page)) |
| 93 | return -ENOMEM; |
| 94 | |
| 95 | mm->context.sparc64_tsb = (unsigned long *) page; |
| 96 | |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | void destroy_context(struct mm_struct *mm) |
| 101 | { |
| 102 | free_page((unsigned long) mm->context.sparc64_tsb); |
| 103 | |
| 104 | spin_lock(&ctx_alloc_lock); |
| 105 | |
| 106 | if (CTX_VALID(mm->context)) { |
| 107 | unsigned long nr = CTX_NRBITS(mm->context); |
| 108 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); |
| 109 | } |
| 110 | |
| 111 | spin_unlock(&ctx_alloc_lock); |
| 112 | } |