blob: 1295b7c1cdac37dca841ea9a692fecfe05c0a36d [file] [log] [blame]
Becky Bruce41151e72011-06-28 09:54:48 +00001/*
2 * PPC Huge TLB Page Support for Book3E MMU
3 *
4 * Copyright (C) 2009 David Gibson, IBM Corporation.
5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
6 *
7 */
8#include <linux/mm.h>
9#include <linux/hugetlb.h>
10
11static inline int mmu_get_tsize(int psize)
12{
13 return mmu_psize_defs[psize].enc;
14}
15
16static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
17{
18 int found = 0;
19
20 mtspr(SPRN_MAS6, pid << 16);
21 if (mmu_has_feature(MMU_FTR_USE_TLBRSRV)) {
22 asm volatile(
23 "li %0,0\n"
24 "tlbsx. 0,%1\n"
25 "bne 1f\n"
26 "li %0,1\n"
27 "1:\n"
28 : "=&r"(found) : "r"(ea));
29 } else {
30 asm volatile(
31 "tlbsx 0,%1\n"
32 "mfspr %0,0x271\n"
33 "srwi %0,%0,31\n"
34 : "=&r"(found) : "r"(ea));
35 }
36
37 return found;
38}
39
40void book3e_hugetlb_preload(struct mm_struct *mm, unsigned long ea, pte_t pte)
41{
42 unsigned long mas1, mas2;
43 u64 mas7_3;
44 unsigned long psize, tsize, shift;
45 unsigned long flags;
46
47#ifdef CONFIG_PPC_FSL_BOOK3E
48 int index, lz, ncams;
49 struct vm_area_struct *vma;
50#endif
51
52 if (unlikely(is_kernel_addr(ea)))
53 return;
54
55#ifdef CONFIG_MM_SLICES
56 psize = mmu_get_tsize(get_slice_psize(mm, ea));
57 tsize = mmu_get_psize(psize);
58 shift = mmu_psize_defs[psize].shift;
59#else
60 vma = find_vma(mm, ea);
61 psize = vma_mmu_pagesize(vma); /* returns actual size in bytes */
62 asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (psize));
63 shift = 31 - lz;
64 tsize = 21 - lz;
65#endif
66
67 /*
68 * We can't be interrupted while we're setting up the MAS
69 * regusters or after we've confirmed that no tlb exists.
70 */
71 local_irq_save(flags);
72
73 if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
74 local_irq_restore(flags);
75 return;
76 }
77
78#ifdef CONFIG_PPC_FSL_BOOK3E
79 ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
80
81 /* We have to use the CAM(TLB1) on FSL parts for hugepages */
82 index = __get_cpu_var(next_tlbcam_idx);
83 mtspr(SPRN_MAS0, MAS0_ESEL(index) | MAS0_TLBSEL(1));
84
85 /* Just round-robin the entries and wrap when we hit the end */
86 if (unlikely(index == ncams - 1))
87 __get_cpu_var(next_tlbcam_idx) = tlbcam_index;
88 else
89 __get_cpu_var(next_tlbcam_idx)++;
90#endif
91 mas1 = MAS1_VALID | MAS1_TID(mm->context.id) | MAS1_TSIZE(tsize);
92 mas2 = ea & ~((1UL << shift) - 1);
93 mas2 |= (pte_val(pte) >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
94 mas7_3 = (u64)pte_pfn(pte) << PAGE_SHIFT;
95 mas7_3 |= (pte_val(pte) >> PTE_BAP_SHIFT) & MAS3_BAP_MASK;
96 if (!pte_dirty(pte))
97 mas7_3 &= ~(MAS3_SW|MAS3_UW);
98
99 mtspr(SPRN_MAS1, mas1);
100 mtspr(SPRN_MAS2, mas2);
101
102 if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) {
103 mtspr(SPRN_MAS7_MAS3, mas7_3);
104 } else {
105 mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
106 mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
107 }
108
109 asm volatile ("tlbwe");
110
111 local_irq_restore(flags);
112}
113
114void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
115{
116 struct hstate *hstate = hstate_file(vma->vm_file);
117 unsigned long tsize = huge_page_shift(hstate) - 10;
118
119 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, tsize, 0);
120
121}