blob: 6a1a1297baaeee75ded9e04bc89a402f5c2ef2f0 [file] [log] [blame]
Paul Mundt185aed72008-11-12 12:53:48 +09001/*
2 * arch/sh/mm/mmap.c
3 *
Paul Mundtee1acbf2009-05-07 16:38:16 +09004 * Copyright (C) 2008 - 2009 Paul Mundt
Paul Mundt185aed72008-11-12 12:53:48 +09005 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/io.h>
11#include <linux/mm.h>
Ingo Molnar01042602017-02-08 18:51:31 +010012#include <linux/sched/mm.h>
Paul Mundt4a4a9be2008-11-12 13:17:38 +090013#include <linux/mman.h>
14#include <linux/module.h>
Paul Mundt185aed72008-11-12 12:53:48 +090015#include <asm/page.h>
Paul Mundt4a4a9be2008-11-12 13:17:38 +090016#include <asm/processor.h>
17
Paul Mundt4a4a9be2008-11-12 13:17:38 +090018unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
19EXPORT_SYMBOL(shm_align_mask);
20
Paul Mundtdde5e3f2009-08-15 09:49:32 +090021#ifdef CONFIG_MMU
Paul Mundt4a4a9be2008-11-12 13:17:38 +090022/*
23 * To avoid cache aliases, we map the shared page with same color.
24 */
Paul Mundtee1acbf2009-05-07 16:38:16 +090025static inline unsigned long COLOUR_ALIGN(unsigned long addr,
26 unsigned long pgoff)
27{
28 unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
29 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
30
31 return base + off;
32}
33
Paul Mundt4a4a9be2008-11-12 13:17:38 +090034unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
35 unsigned long len, unsigned long pgoff, unsigned long flags)
36{
37 struct mm_struct *mm = current->mm;
38 struct vm_area_struct *vma;
Paul Mundt4a4a9be2008-11-12 13:17:38 +090039 int do_colour_align;
Michel Lespinasseb4265f12012-12-11 16:02:12 -080040 struct vm_unmapped_area_info info;
Paul Mundt4a4a9be2008-11-12 13:17:38 +090041
42 if (flags & MAP_FIXED) {
43 /* We do not accept a shared mapping if it would violate
44 * cache aliasing constraints.
45 */
Al Viroe77414e2009-12-05 15:10:44 -050046 if ((flags & MAP_SHARED) &&
47 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
Paul Mundt4a4a9be2008-11-12 13:17:38 +090048 return -EINVAL;
49 return addr;
50 }
51
52 if (unlikely(len > TASK_SIZE))
53 return -ENOMEM;
54
55 do_colour_align = 0;
56 if (filp || (flags & MAP_SHARED))
57 do_colour_align = 1;
58
59 if (addr) {
60 if (do_colour_align)
61 addr = COLOUR_ALIGN(addr, pgoff);
62 else
63 addr = PAGE_ALIGN(addr);
64
65 vma = find_vma(mm, addr);
66 if (TASK_SIZE - len >= addr &&
Hugh Dickins1be71072017-06-19 04:03:24 -070067 (!vma || addr + len <= vm_start_gap(vma)))
Paul Mundt4a4a9be2008-11-12 13:17:38 +090068 return addr;
69 }
70
Michel Lespinasseb4265f12012-12-11 16:02:12 -080071 info.flags = 0;
72 info.length = len;
73 info.low_limit = TASK_UNMAPPED_BASE;
74 info.high_limit = TASK_SIZE;
75 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
76 info.align_offset = pgoff << PAGE_SHIFT;
77 return vm_unmapped_area(&info);
Paul Mundt4a4a9be2008-11-12 13:17:38 +090078}
Paul Mundtee1acbf2009-05-07 16:38:16 +090079
80unsigned long
81arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
82 const unsigned long len, const unsigned long pgoff,
83 const unsigned long flags)
84{
85 struct vm_area_struct *vma;
86 struct mm_struct *mm = current->mm;
87 unsigned long addr = addr0;
88 int do_colour_align;
Michel Lespinasseb4265f12012-12-11 16:02:12 -080089 struct vm_unmapped_area_info info;
Paul Mundtee1acbf2009-05-07 16:38:16 +090090
91 if (flags & MAP_FIXED) {
92 /* We do not accept a shared mapping if it would violate
93 * cache aliasing constraints.
94 */
95 if ((flags & MAP_SHARED) &&
96 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
97 return -EINVAL;
98 return addr;
99 }
100
101 if (unlikely(len > TASK_SIZE))
102 return -ENOMEM;
103
104 do_colour_align = 0;
105 if (filp || (flags & MAP_SHARED))
106 do_colour_align = 1;
107
108 /* requesting a specific address */
109 if (addr) {
110 if (do_colour_align)
111 addr = COLOUR_ALIGN(addr, pgoff);
112 else
113 addr = PAGE_ALIGN(addr);
114
115 vma = find_vma(mm, addr);
116 if (TASK_SIZE - len >= addr &&
Hugh Dickins1be71072017-06-19 04:03:24 -0700117 (!vma || addr + len <= vm_start_gap(vma)))
Paul Mundtee1acbf2009-05-07 16:38:16 +0900118 return addr;
119 }
120
Michel Lespinasseb4265f12012-12-11 16:02:12 -0800121 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
122 info.length = len;
123 info.low_limit = PAGE_SIZE;
124 info.high_limit = mm->mmap_base;
125 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
126 info.align_offset = pgoff << PAGE_SHIFT;
127 addr = vm_unmapped_area(&info);
Paul Mundtee1acbf2009-05-07 16:38:16 +0900128
Paul Mundtee1acbf2009-05-07 16:38:16 +0900129 /*
130 * A failed mmap() very likely causes application failure,
131 * so fall back to the bottom-up function here. This scenario
132 * can happen with large stack limits and large mmap()
133 * allocations.
134 */
Michel Lespinasseb4265f12012-12-11 16:02:12 -0800135 if (addr & ~PAGE_MASK) {
136 VM_BUG_ON(addr != -ENOMEM);
137 info.flags = 0;
138 info.low_limit = TASK_UNMAPPED_BASE;
139 info.high_limit = TASK_SIZE;
140 addr = vm_unmapped_area(&info);
141 }
Paul Mundtee1acbf2009-05-07 16:38:16 +0900142
143 return addr;
144}
Paul Mundt4a4a9be2008-11-12 13:17:38 +0900145#endif /* CONFIG_MMU */
Paul Mundt185aed72008-11-12 12:53:48 +0900146
147/*
148 * You really shouldn't be using read() or write() on /dev/mem. This
149 * might go away in the future.
150 */
Cyril Chemparathy7e6735c2012-09-12 14:05:58 -0400151int valid_phys_addr_range(phys_addr_t addr, size_t count)
Paul Mundt185aed72008-11-12 12:53:48 +0900152{
Paul Mundt10840f02008-11-13 15:38:02 +0900153 if (addr < __MEMORY_START)
Paul Mundt185aed72008-11-12 12:53:48 +0900154 return 0;
155 if (addr + count > __pa(high_memory))
156 return 0;
157
158 return 1;
159}
160
161int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
162{
163 return 1;
164}