blob: 6b9a109c3e31fe1a65df9108d5543f78c143522d [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
Chris Metcalf867e3592010-05-28 23:09:12 -040020#include <linux/highmem.h>
21#include <linux/slab.h>
22#include <linux/pagemap.h>
23#include <linux/spinlock.h>
24#include <linux/cpumask.h>
25#include <linux/module.h>
26#include <linux/io.h>
27#include <linux/vmalloc.h>
28#include <linux/smp.h>
29
Chris Metcalf867e3592010-05-28 23:09:12 -040030#include <asm/pgtable.h>
31#include <asm/pgalloc.h>
32#include <asm/fixmap.h>
33#include <asm/tlb.h>
34#include <asm/tlbflush.h>
35#include <asm/homecache.h>
36
37#define K(x) ((x) << (PAGE_SHIFT-10))
38
39/*
40 * The normal show_free_areas() is too verbose on Tile, with dozens
41 * of processors and often four NUMA zones each with high and lowmem.
42 */
David Rientjesb2b755b2011-03-24 15:18:15 -070043void show_mem(unsigned int filter)
Chris Metcalf867e3592010-05-28 23:09:12 -040044{
45 struct zone *zone;
46
Chris Metcalf0707ad32010-06-25 17:04:17 -040047 pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu"
Chris Metcalf867e3592010-05-28 23:09:12 -040048 " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu"
49 " pagecache:%lu swap:%lu\n",
50 (global_page_state(NR_ACTIVE_ANON) +
51 global_page_state(NR_ACTIVE_FILE)),
52 (global_page_state(NR_INACTIVE_ANON) +
53 global_page_state(NR_INACTIVE_FILE)),
54 global_page_state(NR_FILE_DIRTY),
55 global_page_state(NR_WRITEBACK),
56 global_page_state(NR_UNSTABLE_NFS),
57 global_page_state(NR_FREE_PAGES),
58 (global_page_state(NR_SLAB_RECLAIMABLE) +
59 global_page_state(NR_SLAB_UNRECLAIMABLE)),
60 global_page_state(NR_FILE_MAPPED),
61 global_page_state(NR_PAGETABLE),
62 global_page_state(NR_BOUNCE),
63 global_page_state(NR_FILE_PAGES),
64 nr_swap_pages);
65
66 for_each_zone(zone) {
67 unsigned long flags, order, total = 0, largest_order = -1;
68
69 if (!populated_zone(zone))
70 continue;
71
Chris Metcalf867e3592010-05-28 23:09:12 -040072 spin_lock_irqsave(&zone->lock, flags);
73 for (order = 0; order < MAX_ORDER; order++) {
74 int nr = zone->free_area[order].nr_free;
75 total += nr << order;
76 if (nr)
77 largest_order = order;
78 }
79 spin_unlock_irqrestore(&zone->lock, flags);
Chris Metcalf0707ad32010-06-25 17:04:17 -040080 pr_err("Node %d %7s: %lukB (largest %luKb)\n",
81 zone_to_nid(zone), zone->name,
Chris Metcalf867e3592010-05-28 23:09:12 -040082 K(total), largest_order ? K(1UL) << largest_order : 0);
83 }
84}
85
86/*
87 * Associate a virtual page frame with a given physical page frame
88 * and protection flags for that frame.
89 */
90static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
91{
92 pgd_t *pgd;
93 pud_t *pud;
94 pmd_t *pmd;
95 pte_t *pte;
96
97 pgd = swapper_pg_dir + pgd_index(vaddr);
98 if (pgd_none(*pgd)) {
99 BUG();
100 return;
101 }
102 pud = pud_offset(pgd, vaddr);
103 if (pud_none(*pud)) {
104 BUG();
105 return;
106 }
107 pmd = pmd_offset(pud, vaddr);
108 if (pmd_none(*pmd)) {
109 BUG();
110 return;
111 }
112 pte = pte_offset_kernel(pmd, vaddr);
113 /* <pfn,flags> stored as-is, to permit clearing entries */
114 set_pte(pte, pfn_pte(pfn, flags));
115
116 /*
117 * It's enough to flush this one mapping.
118 * This appears conservative since it is only called
119 * from __set_fixmap.
120 */
121 local_flush_tlb_page(NULL, vaddr, PAGE_SIZE);
122}
123
Chris Metcalf867e3592010-05-28 23:09:12 -0400124void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
125{
126 unsigned long address = __fix_to_virt(idx);
127
128 if (idx >= __end_of_fixed_addresses) {
129 BUG();
130 return;
131 }
132 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
133}
134
135#if defined(CONFIG_HIGHPTE)
Chris Metcalf38a6f422010-11-01 15:21:35 -0400136pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
Chris Metcalf867e3592010-05-28 23:09:12 -0400137{
Chris Metcalf38a6f422010-11-01 15:21:35 -0400138 pte_t *pte = kmap_atomic(pmd_page(*dir)) +
Chris Metcalf867e3592010-05-28 23:09:12 -0400139 (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
140 return &pte[pte_index(address)];
141}
142#endif
143
Chris Metcalf76c567f2011-02-28 16:37:34 -0500144/**
145 * shatter_huge_page() - ensure a given address is mapped by a small page.
146 *
147 * This function converts a huge PTE mapping kernel LOWMEM into a bunch
148 * of small PTEs with the same caching. No cache flush required, but we
149 * must do a global TLB flush.
150 *
151 * Any caller that wishes to modify a kernel mapping that might
152 * have been made with a huge page should call this function,
153 * since doing so properly avoids race conditions with installing the
154 * newly-shattered page and then flushing all the TLB entries.
155 *
156 * @addr: Address at which to shatter any existing huge page.
157 */
158void shatter_huge_page(unsigned long addr)
159{
160 pgd_t *pgd;
161 pud_t *pud;
162 pmd_t *pmd;
163 unsigned long flags = 0; /* happy compiler */
164#ifdef __PAGETABLE_PMD_FOLDED
165 struct list_head *pos;
166#endif
167
168 /* Get a pointer to the pmd entry that we need to change. */
169 addr &= HPAGE_MASK;
170 BUG_ON(pgd_addr_invalid(addr));
171 BUG_ON(addr < PAGE_OFFSET); /* only for kernel LOWMEM */
172 pgd = swapper_pg_dir + pgd_index(addr);
173 pud = pud_offset(pgd, addr);
174 BUG_ON(!pud_present(*pud));
175 pmd = pmd_offset(pud, addr);
176 BUG_ON(!pmd_present(*pmd));
177 if (!pmd_huge_page(*pmd))
178 return;
179
180 /*
181 * Grab the pgd_lock, since we may need it to walk the pgd_list,
182 * and since we need some kind of lock here to avoid races.
183 */
184 spin_lock_irqsave(&pgd_lock, flags);
185 if (!pmd_huge_page(*pmd)) {
186 /* Lost the race to convert the huge page. */
187 spin_unlock_irqrestore(&pgd_lock, flags);
188 return;
189 }
190
191 /* Shatter the huge page into the preallocated L2 page table. */
192 pmd_populate_kernel(&init_mm, pmd,
193 get_prealloc_pte(pte_pfn(*(pte_t *)pmd)));
194
195#ifdef __PAGETABLE_PMD_FOLDED
196 /* Walk every pgd on the system and update the pmd there. */
197 list_for_each(pos, &pgd_list) {
198 pmd_t *copy_pmd;
199 pgd = list_to_pgd(pos) + pgd_index(addr);
200 pud = pud_offset(pgd, addr);
201 copy_pmd = pmd_offset(pud, addr);
202 __set_pmd(copy_pmd, *pmd);
203 }
204#endif
205
206 /* Tell every cpu to notice the change. */
207 flush_remote(0, 0, NULL, addr, HPAGE_SIZE, HPAGE_SIZE,
208 cpu_possible_mask, NULL, 0);
209
210 /* Hold the lock until the TLB flush is finished to avoid races. */
211 spin_unlock_irqrestore(&pgd_lock, flags);
212}
213
Chris Metcalf867e3592010-05-28 23:09:12 -0400214/*
215 * List of all pgd's needed so it can invalidate entries in both cached
216 * and uncached pgd's. This is essentially codepath-based locking
217 * against pageattr.c; it is the unique case in which a valid change
218 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
219 * vmalloc faults work because attached pagetables are never freed.
220 * The locking scheme was chosen on the basis of manfred's
221 * recommendations and having no core impact whatsoever.
222 * -- wli
223 */
224DEFINE_SPINLOCK(pgd_lock);
225LIST_HEAD(pgd_list);
226
227static inline void pgd_list_add(pgd_t *pgd)
228{
229 list_add(pgd_to_list(pgd), &pgd_list);
230}
231
232static inline void pgd_list_del(pgd_t *pgd)
233{
234 list_del(pgd_to_list(pgd));
235}
236
237#define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET)
238#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START)
239
240static void pgd_ctor(pgd_t *pgd)
241{
242 unsigned long flags;
243
244 memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t));
245 spin_lock_irqsave(&pgd_lock, flags);
246
247#ifndef __tilegx__
248 /*
249 * Check that the user interrupt vector has no L2.
250 * It never should for the swapper, and new page tables
251 * should always start with an empty user interrupt vector.
252 */
253 BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0);
254#endif
255
Chris Metcalf76c567f2011-02-28 16:37:34 -0500256 memcpy(pgd + KERNEL_PGD_INDEX_START,
257 swapper_pg_dir + KERNEL_PGD_INDEX_START,
258 KERNEL_PGD_PTRS * sizeof(pgd_t));
Chris Metcalf867e3592010-05-28 23:09:12 -0400259
260 pgd_list_add(pgd);
261 spin_unlock_irqrestore(&pgd_lock, flags);
262}
263
264static void pgd_dtor(pgd_t *pgd)
265{
266 unsigned long flags; /* can be called from interrupt context */
267
268 spin_lock_irqsave(&pgd_lock, flags);
269 pgd_list_del(pgd);
270 spin_unlock_irqrestore(&pgd_lock, flags);
271}
272
273pgd_t *pgd_alloc(struct mm_struct *mm)
274{
275 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
276 if (pgd)
277 pgd_ctor(pgd);
278 return pgd;
279}
280
281void pgd_free(struct mm_struct *mm, pgd_t *pgd)
282{
283 pgd_dtor(pgd);
284 kmem_cache_free(pgd_cache, pgd);
285}
286
287
288#define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
289
290struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
291{
Chris Metcalf76c567f2011-02-28 16:37:34 -0500292 gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
Chris Metcalf867e3592010-05-28 23:09:12 -0400293 struct page *p;
Chris Metcalf76c567f2011-02-28 16:37:34 -0500294#if L2_USER_PGTABLE_ORDER > 0
295 int i;
296#endif
Chris Metcalf867e3592010-05-28 23:09:12 -0400297
298#ifdef CONFIG_HIGHPTE
299 flags |= __GFP_HIGHMEM;
300#endif
301
302 p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
303 if (p == NULL)
304 return NULL;
305
Chris Metcalf76c567f2011-02-28 16:37:34 -0500306#if L2_USER_PGTABLE_ORDER > 0
307 /*
308 * Make every page have a page_count() of one, not just the first.
309 * We don't use __GFP_COMP since it doesn't look like it works
310 * correctly with tlb_remove_page().
311 */
312 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
313 init_page_count(p+i);
314 inc_zone_page_state(p+i, NR_PAGETABLE);
315 }
316#endif
317
Chris Metcalf867e3592010-05-28 23:09:12 -0400318 pgtable_page_ctor(p);
319 return p;
320}
321
322/*
323 * Free page immediately (used in __pte_alloc if we raced with another
324 * process). We have to correct whatever pte_alloc_one() did before
325 * returning the pages to the allocator.
326 */
327void pte_free(struct mm_struct *mm, struct page *p)
328{
Chris Metcalf76c567f2011-02-28 16:37:34 -0500329 int i;
330
Chris Metcalf867e3592010-05-28 23:09:12 -0400331 pgtable_page_dtor(p);
Chris Metcalf76c567f2011-02-28 16:37:34 -0500332 __free_page(p);
333
334 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
335 __free_page(p+i);
336 dec_zone_page_state(p+i, NR_PAGETABLE);
337 }
Chris Metcalf867e3592010-05-28 23:09:12 -0400338}
339
340void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
341 unsigned long address)
342{
343 int i;
344
345 pgtable_page_dtor(pte);
Chris Metcalf76c567f2011-02-28 16:37:34 -0500346 tlb_remove_page(tlb, pte);
347
348 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) {
Peter Zijlstra342d87e2011-01-25 18:31:12 +0100349 tlb_remove_page(tlb, pte + i);
Chris Metcalf76c567f2011-02-28 16:37:34 -0500350 dec_zone_page_state(pte + i, NR_PAGETABLE);
351 }
Chris Metcalf867e3592010-05-28 23:09:12 -0400352}
353
354#ifndef __tilegx__
355
356/*
357 * FIXME: needs to be atomic vs hypervisor writes. For now we make the
358 * window of vulnerability a bit smaller by doing an unlocked 8-bit update.
359 */
360int ptep_test_and_clear_young(struct vm_area_struct *vma,
361 unsigned long addr, pte_t *ptep)
362{
363#if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16
364# error Code assumes HV_PTE "accessed" bit in second byte
365#endif
366 u8 *tmp = (u8 *)ptep;
367 u8 second_byte = tmp[1];
368 if (!(second_byte & (1 << (HV_PTE_INDEX_ACCESSED - 8))))
369 return 0;
370 tmp[1] = second_byte & ~(1 << (HV_PTE_INDEX_ACCESSED - 8));
371 return 1;
372}
373
374/*
375 * This implementation is atomic vs hypervisor writes, since the hypervisor
376 * always writes the low word (where "accessed" and "dirty" are) and this
377 * routine only writes the high word.
378 */
379void ptep_set_wrprotect(struct mm_struct *mm,
380 unsigned long addr, pte_t *ptep)
381{
382#if HV_PTE_INDEX_WRITABLE < 32
383# error Code assumes HV_PTE "writable" bit in high word
384#endif
385 u32 *tmp = (u32 *)ptep;
386 tmp[1] = tmp[1] & ~(1 << (HV_PTE_INDEX_WRITABLE - 32));
387}
388
389#endif
390
391pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
392{
393 pgd_t *pgd;
394 pud_t *pud;
395 pmd_t *pmd;
396
397 if (pgd_addr_invalid(addr))
398 return NULL;
399
400 pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr);
401 pud = pud_offset(pgd, addr);
402 if (!pud_present(*pud))
403 return NULL;
404 pmd = pmd_offset(pud, addr);
405 if (pmd_huge_page(*pmd))
406 return (pte_t *)pmd;
407 if (!pmd_present(*pmd))
408 return NULL;
409 return pte_offset_kernel(pmd, addr);
410}
411
412pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu)
413{
414 unsigned int width = smp_width;
415 int x = cpu % width;
416 int y = cpu / width;
417 BUG_ON(y >= smp_height);
418 BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
419 BUG_ON(cpu < 0 || cpu >= NR_CPUS);
420 BUG_ON(!cpu_is_valid_lotar(cpu));
421 return hv_pte_set_lotar(prot, HV_XY_TO_LOTAR(x, y));
422}
423
424int get_remote_cache_cpu(pgprot_t prot)
425{
426 HV_LOTAR lotar = hv_pte_get_lotar(prot);
427 int x = HV_LOTAR_X(lotar);
428 int y = HV_LOTAR_Y(lotar);
429 BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3);
430 return x + y * smp_width;
431}
432
Chris Metcalf76c567f2011-02-28 16:37:34 -0500433/*
434 * Convert a kernel VA to a PA and homing information.
435 */
436int va_to_cpa_and_pte(void *va, unsigned long long *cpa, pte_t *pte)
Chris Metcalf867e3592010-05-28 23:09:12 -0400437{
Chris Metcalf76c567f2011-02-28 16:37:34 -0500438 struct page *page = virt_to_page(va);
439 pte_t null_pte = { 0 };
440
441 *cpa = __pa(va);
442
443 /* Note that this is not writing a page table, just returning a pte. */
444 *pte = pte_set_home(null_pte, page_home(page));
445
446 return 0; /* return non-zero if not hfh? */
447}
448EXPORT_SYMBOL(va_to_cpa_and_pte);
449
450void __set_pte(pte_t *ptep, pte_t pte)
451{
452#ifdef __tilegx__
453 *ptep = pte;
454#else
455# if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32
456# error Must write the present and migrating bits last
457# endif
458 if (pte_present(pte)) {
459 ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
460 barrier();
461 ((u32 *)ptep)[0] = (u32)(pte_val(pte));
462 } else {
463 ((u32 *)ptep)[0] = (u32)(pte_val(pte));
464 barrier();
465 ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32);
466 }
467#endif /* __tilegx__ */
468}
469
470void set_pte(pte_t *ptep, pte_t pte)
471{
Chris Metcalf12400f12012-03-29 15:36:53 -0400472 if (pte_present(pte) &&
473 (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
474 /* The PTE actually references physical memory. */
475 unsigned long pfn = pte_pfn(pte);
476 if (pfn_valid(pfn)) {
477 /* Update the home of the PTE from the struct page. */
478 pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
479 } else if (hv_pte_get_mode(pte) == 0) {
480 /* remap_pfn_range(), etc, must supply PTE mode. */
481 panic("set_pte(): out-of-range PFN and mode 0\n");
482 }
483 }
Chris Metcalf867e3592010-05-28 23:09:12 -0400484
Chris Metcalf76c567f2011-02-28 16:37:34 -0500485 __set_pte(ptep, pte);
Chris Metcalf867e3592010-05-28 23:09:12 -0400486}
487
488/* Can this mm load a PTE with cached_priority set? */
489static inline int mm_is_priority_cached(struct mm_struct *mm)
490{
491 return mm->context.priority_cached;
492}
493
494/*
495 * Add a priority mapping to an mm_context and
496 * notify the hypervisor if this is the first one.
497 */
498void start_mm_caching(struct mm_struct *mm)
499{
500 if (!mm_is_priority_cached(mm)) {
501 mm->context.priority_cached = -1U;
502 hv_set_caching(-1U);
503 }
504}
505
506/*
507 * Validate and return the priority_cached flag. We know if it's zero
508 * that we don't need to scan, since we immediately set it non-zero
509 * when we first consider a MAP_CACHE_PRIORITY mapping.
510 *
511 * We only _try_ to acquire the mmap_sem semaphore; if we can't acquire it,
512 * since we're in an interrupt context (servicing switch_mm) we don't
513 * worry about it and don't unset the "priority_cached" field.
514 * Presumably we'll come back later and have more luck and clear
515 * the value then; for now we'll just keep the cache marked for priority.
516 */
517static unsigned int update_priority_cached(struct mm_struct *mm)
518{
519 if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
520 struct vm_area_struct *vm;
521 for (vm = mm->mmap; vm; vm = vm->vm_next) {
522 if (hv_pte_get_cached_priority(vm->vm_page_prot))
523 break;
524 }
525 if (vm == NULL)
526 mm->context.priority_cached = 0;
527 up_write(&mm->mmap_sem);
528 }
529 return mm->context.priority_cached;
530}
531
532/* Set caching correctly for an mm that we are switching to. */
533void check_mm_caching(struct mm_struct *prev, struct mm_struct *next)
534{
535 if (!mm_is_priority_cached(next)) {
536 /*
537 * If the new mm doesn't use priority caching, just see if we
538 * need the hv_set_caching(), or can assume it's already zero.
539 */
540 if (mm_is_priority_cached(prev))
541 hv_set_caching(0);
542 } else {
543 hv_set_caching(update_priority_cached(next));
544 }
545}
546
547#if CHIP_HAS_MMIO()
548
549/* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */
550void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
551 pgprot_t home)
552{
553 void *addr;
554 struct vm_struct *area;
555 unsigned long offset, last_addr;
556 pgprot_t pgprot;
557
558 /* Don't allow wraparound or zero size */
559 last_addr = phys_addr + size - 1;
560 if (!size || last_addr < phys_addr)
561 return NULL;
562
563 /* Create a read/write, MMIO VA mapping homed at the requested shim. */
564 pgprot = PAGE_KERNEL;
565 pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO);
566 pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home));
567
568 /*
569 * Mappings have to be page-aligned
570 */
571 offset = phys_addr & ~PAGE_MASK;
572 phys_addr &= PAGE_MASK;
573 size = PAGE_ALIGN(last_addr+1) - phys_addr;
574
575 /*
576 * Ok, go for it..
577 */
578 area = get_vm_area(size, VM_IOREMAP /* | other flags? */);
579 if (!area)
580 return NULL;
581 area->phys_addr = phys_addr;
582 addr = area->addr;
583 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
584 phys_addr, pgprot)) {
585 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
586 return NULL;
587 }
588 return (__force void __iomem *) (offset + (char *)addr);
589}
590EXPORT_SYMBOL(ioremap_prot);
591
592/* Map a PCI MMIO bus address into VA space. */
593void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
594{
595 panic("ioremap for PCI MMIO is not supported");
596}
597EXPORT_SYMBOL(ioremap);
598
599/* Unmap an MMIO VA mapping. */
600void iounmap(volatile void __iomem *addr_in)
601{
602 volatile void __iomem *addr = (volatile void __iomem *)
603 (PAGE_MASK & (unsigned long __force)addr_in);
604#if 1
605 vunmap((void * __force)addr);
606#else
607 /* x86 uses this complicated flow instead of vunmap(). Is
608 * there any particular reason we should do the same? */
609 struct vm_struct *p, *o;
610
611 /* Use the vm area unlocked, assuming the caller
612 ensures there isn't another iounmap for the same address
613 in parallel. Reuse of the virtual address is prevented by
614 leaving it in the global lists until we're done with it.
615 cpa takes care of the direct mappings. */
616 read_lock(&vmlist_lock);
617 for (p = vmlist; p; p = p->next) {
618 if (p->addr == addr)
619 break;
620 }
621 read_unlock(&vmlist_lock);
622
623 if (!p) {
Chris Metcalf0707ad32010-06-25 17:04:17 -0400624 pr_err("iounmap: bad address %p\n", addr);
Chris Metcalf867e3592010-05-28 23:09:12 -0400625 dump_stack();
626 return;
627 }
628
629 /* Finally remove it */
630 o = remove_vm_area((void *)addr);
631 BUG_ON(p != o || o == NULL);
632 kfree(p);
633#endif
634}
635EXPORT_SYMBOL(iounmap);
636
637#endif /* CHIP_HAS_MMIO() */