Palmer Dabbelt | 6d60b6e | 2017-07-10 18:05:09 -0700 | [diff] [blame] | 1 | /* |
| 2 | * (C) Copyright 1995 1996 Linus Torvalds |
| 3 | * (C) Copyright 2012 Regents of the University of California |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or |
| 6 | * modify it under the terms of the GNU General Public License |
| 7 | * as published by the Free Software Foundation, version 2. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/export.h> |
| 16 | #include <linux/mm.h> |
| 17 | #include <linux/vmalloc.h> |
| 18 | #include <linux/io.h> |
| 19 | |
| 20 | #include <asm/pgtable.h> |
| 21 | |
| 22 | /* |
| 23 | * Remap an arbitrary physical address space into the kernel virtual |
| 24 | * address space. Needed when the kernel wants to access high addresses |
| 25 | * directly. |
| 26 | * |
| 27 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously |
| 28 | * have to convert them into an offset in a page-aligned mapping, but the |
| 29 | * caller shouldn't need to know that small detail. |
| 30 | */ |
| 31 | static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size, |
| 32 | pgprot_t prot, void *caller) |
| 33 | { |
| 34 | phys_addr_t last_addr; |
| 35 | unsigned long offset, vaddr; |
| 36 | struct vm_struct *area; |
| 37 | |
| 38 | /* Disallow wrap-around or zero size */ |
| 39 | last_addr = addr + size - 1; |
| 40 | if (!size || last_addr < addr) |
| 41 | return NULL; |
| 42 | |
| 43 | /* Page-align mappings */ |
| 44 | offset = addr & (~PAGE_MASK); |
| 45 | addr &= PAGE_MASK; |
| 46 | size = PAGE_ALIGN(size + offset); |
| 47 | |
| 48 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
| 49 | if (!area) |
| 50 | return NULL; |
| 51 | vaddr = (unsigned long)area->addr; |
| 52 | |
| 53 | if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) { |
| 54 | free_vm_area(area); |
| 55 | return NULL; |
| 56 | } |
| 57 | |
| 58 | return (void __iomem *)(vaddr + offset); |
| 59 | } |
| 60 | |
| 61 | /* |
| 62 | * ioremap - map bus memory into CPU space |
| 63 | * @offset: bus address of the memory |
| 64 | * @size: size of the resource to map |
| 65 | * |
| 66 | * ioremap performs a platform specific sequence of operations to |
| 67 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 68 | * writew/writel functions and the other mmio helpers. The returned |
| 69 | * address is not guaranteed to be usable directly as a virtual |
| 70 | * address. |
| 71 | * |
| 72 | * Must be freed with iounmap. |
| 73 | */ |
| 74 | void __iomem *ioremap(phys_addr_t offset, unsigned long size) |
| 75 | { |
| 76 | return __ioremap_caller(offset, size, PAGE_KERNEL, |
| 77 | __builtin_return_address(0)); |
| 78 | } |
| 79 | EXPORT_SYMBOL(ioremap); |
| 80 | |
| 81 | |
| 82 | /** |
| 83 | * iounmap - Free a IO remapping |
| 84 | * @addr: virtual address from ioremap_* |
| 85 | * |
| 86 | * Caller must ensure there is only one unmapping for the same pointer. |
| 87 | */ |
Olof Johansson | fe2726a | 2017-11-29 17:55:14 -0800 | [diff] [blame] | 88 | void iounmap(volatile void __iomem *addr) |
Palmer Dabbelt | 6d60b6e | 2017-07-10 18:05:09 -0700 | [diff] [blame] | 89 | { |
| 90 | vunmap((void *)((unsigned long)addr & PAGE_MASK)); |
| 91 | } |
| 92 | EXPORT_SYMBOL(iounmap); |