blob: 4bb30206b9426f1fcece4324cc0dfe76b8855c65 [file] [log] [blame]
Haavard Skinnemoen74588d82006-09-30 23:29:12 -07001/*
2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
Haavard Skinnemoen74588d82006-09-30 23:29:12 -07008#include <linux/vmalloc.h>
9#include <linux/mm.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040010#include <linux/sched.h>
Adrian Bunk53fa6642007-10-16 23:26:42 -070011#include <linux/io.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050012#include <linux/export.h>
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070013#include <asm/cacheflush.h>
14#include <asm/pgtable.h>
15
Toshi Kani0ddab1d2015-04-14 15:47:20 -070016#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030017static int __read_mostly ioremap_p4d_capable;
Toshi Kani6b637832015-04-14 15:47:32 -070018static int __read_mostly ioremap_pud_capable;
19static int __read_mostly ioremap_pmd_capable;
20static int __read_mostly ioremap_huge_disabled;
Toshi Kani0ddab1d2015-04-14 15:47:20 -070021
22static int __init set_nohugeiomap(char *str)
23{
24 ioremap_huge_disabled = 1;
25 return 0;
26}
27early_param("nohugeiomap", set_nohugeiomap);
28
29void __init ioremap_huge_init(void)
30{
31 if (!ioremap_huge_disabled) {
32 if (arch_ioremap_pud_supported())
33 ioremap_pud_capable = 1;
34 if (arch_ioremap_pmd_supported())
35 ioremap_pmd_capable = 1;
36 }
37}
38
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030039static inline int ioremap_p4d_enabled(void)
40{
41 return ioremap_p4d_capable;
42}
43
Toshi Kani0ddab1d2015-04-14 15:47:20 -070044static inline int ioremap_pud_enabled(void)
45{
46 return ioremap_pud_capable;
47}
48
49static inline int ioremap_pmd_enabled(void)
50{
51 return ioremap_pmd_capable;
52}
53
54#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030055static inline int ioremap_p4d_enabled(void) { return 0; }
Toshi Kani0ddab1d2015-04-14 15:47:20 -070056static inline int ioremap_pud_enabled(void) { return 0; }
57static inline int ioremap_pmd_enabled(void) { return 0; }
58#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
59
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070060static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090061 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070062{
63 pte_t *pte;
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090064 u64 pfn;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070065
66 pfn = phys_addr >> PAGE_SHIFT;
67 pte = pte_alloc_kernel(pmd, addr);
68 if (!pte)
69 return -ENOMEM;
70 do {
71 BUG_ON(!pte_none(*pte));
72 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
73 pfn++;
74 } while (pte++, addr += PAGE_SIZE, addr != end);
75 return 0;
76}
77
78static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090079 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070080{
81 pmd_t *pmd;
82 unsigned long next;
83
84 phys_addr -= addr;
85 pmd = pmd_alloc(&init_mm, pud, addr);
86 if (!pmd)
87 return -ENOMEM;
88 do {
89 next = pmd_addr_end(addr, end);
Toshi Kanie61ce6a2015-04-14 15:47:23 -070090
91 if (ioremap_pmd_enabled() &&
92 ((next - addr) == PMD_SIZE) &&
93 IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
94 if (pmd_set_huge(pmd, phys_addr + addr, prot))
95 continue;
96 }
97
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070098 if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
99 return -ENOMEM;
100 } while (pmd++, addr = next, addr != end);
101 return 0;
102}
103
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300104static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +0900105 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700106{
107 pud_t *pud;
108 unsigned long next;
109
110 phys_addr -= addr;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300111 pud = pud_alloc(&init_mm, p4d, addr);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700112 if (!pud)
113 return -ENOMEM;
114 do {
115 next = pud_addr_end(addr, end);
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700116
117 if (ioremap_pud_enabled() &&
118 ((next - addr) == PUD_SIZE) &&
119 IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
120 if (pud_set_huge(pud, phys_addr + addr, prot))
121 continue;
122 }
123
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700124 if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
125 return -ENOMEM;
126 } while (pud++, addr = next, addr != end);
127 return 0;
128}
129
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300130static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
131 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
132{
133 p4d_t *p4d;
134 unsigned long next;
135
136 phys_addr -= addr;
137 p4d = p4d_alloc(&init_mm, pgd, addr);
138 if (!p4d)
139 return -ENOMEM;
140 do {
141 next = p4d_addr_end(addr, end);
142
143 if (ioremap_p4d_enabled() &&
144 ((next - addr) == P4D_SIZE) &&
145 IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
146 if (p4d_set_huge(p4d, phys_addr + addr, prot))
147 continue;
148 }
149
150 if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot))
151 return -ENOMEM;
152 } while (p4d++, addr = next, addr != end);
153 return 0;
154}
155
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700156int ioremap_page_range(unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +0900157 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700158{
159 pgd_t *pgd;
160 unsigned long start;
161 unsigned long next;
162 int err;
163
164 BUG_ON(addr >= end);
165
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700166 start = addr;
167 phys_addr -= addr;
168 pgd = pgd_offset_k(addr);
169 do {
170 next = pgd_addr_end(addr, end);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300171 err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700172 if (err)
173 break;
174 } while (pgd++, addr = next, addr != end);
175
Haavard Skinnemoendb71daa2006-09-30 23:29:14 -0700176 flush_cache_vmap(start, end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700177
178 return err;
179}