blob: 517f5853ffed1726a462543f902450ddb7f9a9b4 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Haavard Skinnemoen74588d82006-09-30 23:29:12 -07002/*
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
6 *
7 * (C) Copyright 1995 1996 Linus Torvalds
8 */
Haavard Skinnemoen74588d82006-09-30 23:29:12 -07009#include <linux/vmalloc.h>
10#include <linux/mm.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040011#include <linux/sched.h>
Adrian Bunk53fa6642007-10-16 23:26:42 -070012#include <linux/io.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050013#include <linux/export.h>
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070014#include <asm/cacheflush.h>
15#include <asm/pgtable.h>
16
Toshi Kani0ddab1d2015-04-14 15:47:20 -070017#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030018static int __read_mostly ioremap_p4d_capable;
Toshi Kani6b637832015-04-14 15:47:32 -070019static int __read_mostly ioremap_pud_capable;
20static int __read_mostly ioremap_pmd_capable;
21static int __read_mostly ioremap_huge_disabled;
Toshi Kani0ddab1d2015-04-14 15:47:20 -070022
23static int __init set_nohugeiomap(char *str)
24{
25 ioremap_huge_disabled = 1;
26 return 0;
27}
28early_param("nohugeiomap", set_nohugeiomap);
29
30void __init ioremap_huge_init(void)
31{
32 if (!ioremap_huge_disabled) {
33 if (arch_ioremap_pud_supported())
34 ioremap_pud_capable = 1;
35 if (arch_ioremap_pmd_supported())
36 ioremap_pmd_capable = 1;
37 }
38}
39
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030040static inline int ioremap_p4d_enabled(void)
41{
42 return ioremap_p4d_capable;
43}
44
Toshi Kani0ddab1d2015-04-14 15:47:20 -070045static inline int ioremap_pud_enabled(void)
46{
47 return ioremap_pud_capable;
48}
49
50static inline int ioremap_pmd_enabled(void)
51{
52 return ioremap_pmd_capable;
53}
54
55#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +030056static inline int ioremap_p4d_enabled(void) { return 0; }
Toshi Kani0ddab1d2015-04-14 15:47:20 -070057static inline int ioremap_pud_enabled(void) { return 0; }
58static inline int ioremap_pmd_enabled(void) { return 0; }
59#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
60
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070061static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090062 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070063{
64 pte_t *pte;
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090065 u64 pfn;
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070066
67 pfn = phys_addr >> PAGE_SHIFT;
68 pte = pte_alloc_kernel(pmd, addr);
69 if (!pte)
70 return -ENOMEM;
71 do {
72 BUG_ON(!pte_none(*pte));
73 set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
74 pfn++;
75 } while (pte++, addr += PAGE_SIZE, addr != end);
76 return 0;
77}
78
79static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090080 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -070081{
82 pmd_t *pmd;
83 unsigned long next;
84
85 phys_addr -= addr;
86 pmd = pmd_alloc(&init_mm, pud, addr);
87 if (!pmd)
88 return -ENOMEM;
89 do {
90 next = pmd_addr_end(addr, end);
Toshi Kanie61ce6a2015-04-14 15:47:23 -070091
92 if (ioremap_pmd_enabled() &&
93 ((next - addr) == PMD_SIZE) &&
Toshi Kanib6bdb752018-03-22 16:17:20 -070094 IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
Chintan Pandya785a19f2018-06-27 08:13:47 -060095 pmd_free_pte_page(pmd, addr)) {
Toshi Kanie61ce6a2015-04-14 15:47:23 -070096 if (pmd_set_huge(pmd, phys_addr + addr, prot))
97 continue;
98 }
99
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700100 if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
101 return -ENOMEM;
102 } while (pmd++, addr = next, addr != end);
103 return 0;
104}
105
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300106static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +0900107 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700108{
109 pud_t *pud;
110 unsigned long next;
111
112 phys_addr -= addr;
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300113 pud = pud_alloc(&init_mm, p4d, addr);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700114 if (!pud)
115 return -ENOMEM;
116 do {
117 next = pud_addr_end(addr, end);
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700118
119 if (ioremap_pud_enabled() &&
120 ((next - addr) == PUD_SIZE) &&
Toshi Kanib6bdb752018-03-22 16:17:20 -0700121 IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
Chintan Pandya785a19f2018-06-27 08:13:47 -0600122 pud_free_pmd_page(pud, addr)) {
Toshi Kanie61ce6a2015-04-14 15:47:23 -0700123 if (pud_set_huge(pud, phys_addr + addr, prot))
124 continue;
125 }
126
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700127 if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
128 return -ENOMEM;
129 } while (pud++, addr = next, addr != end);
130 return 0;
131}
132
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300133static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
134 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
135{
136 p4d_t *p4d;
137 unsigned long next;
138
139 phys_addr -= addr;
140 p4d = p4d_alloc(&init_mm, pgd, addr);
141 if (!p4d)
142 return -ENOMEM;
143 do {
144 next = p4d_addr_end(addr, end);
145
146 if (ioremap_p4d_enabled() &&
147 ((next - addr) == P4D_SIZE) &&
148 IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
149 if (p4d_set_huge(p4d, phys_addr + addr, prot))
150 continue;
151 }
152
153 if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot))
154 return -ENOMEM;
155 } while (p4d++, addr = next, addr != end);
156 return 0;
157}
158
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700159int ioremap_page_range(unsigned long addr,
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +0900160 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700161{
162 pgd_t *pgd;
163 unsigned long start;
164 unsigned long next;
165 int err;
166
Linus Torvaldsb39ab982017-10-30 10:09:56 -0700167 might_sleep();
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700168 BUG_ON(addr >= end);
169
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700170 start = addr;
171 phys_addr -= addr;
172 pgd = pgd_offset_k(addr);
173 do {
174 next = pgd_addr_end(addr, end);
Kirill A. Shutemovc2febaf2017-03-09 17:24:07 +0300175 err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700176 if (err)
177 break;
178 } while (pgd++, addr = next, addr != end);
179
Haavard Skinnemoendb71daa2006-09-30 23:29:14 -0700180 flush_cache_vmap(start, end);
Haavard Skinnemoen74588d82006-09-30 23:29:12 -0700181
182 return err;
183}