blob: 6777177807c26f5d6630ec48535bb4f7d5eba0e2 [file] [log] [blame]
Paul Mundt185aed72008-11-12 12:53:48 +09001/*
2 * arch/sh/mm/mmap.c
3 *
Paul Mundtee1acbf2009-05-07 16:38:16 +09004 * Copyright (C) 2008 - 2009 Paul Mundt
Paul Mundt185aed72008-11-12 12:53:48 +09005 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/io.h>
11#include <linux/mm.h>
Paul Mundt4a4a9be2008-11-12 13:17:38 +090012#include <linux/mman.h>
13#include <linux/module.h>
Paul Mundt185aed72008-11-12 12:53:48 +090014#include <asm/page.h>
Paul Mundt4a4a9be2008-11-12 13:17:38 +090015#include <asm/processor.h>
16
Paul Mundt4a4a9be2008-11-12 13:17:38 +090017unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
18EXPORT_SYMBOL(shm_align_mask);
19
Paul Mundtdde5e3f2009-08-15 09:49:32 +090020#ifdef CONFIG_MMU
Paul Mundt4a4a9be2008-11-12 13:17:38 +090021/*
22 * To avoid cache aliases, we map the shared page with same color.
23 */
Paul Mundtee1acbf2009-05-07 16:38:16 +090024static inline unsigned long COLOUR_ALIGN(unsigned long addr,
25 unsigned long pgoff)
26{
27 unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
28 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
29
30 return base + off;
31}
32
Paul Mundt4a4a9be2008-11-12 13:17:38 +090033unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
34 unsigned long len, unsigned long pgoff, unsigned long flags)
35{
36 struct mm_struct *mm = current->mm;
37 struct vm_area_struct *vma;
Paul Mundt4a4a9be2008-11-12 13:17:38 +090038 int do_colour_align;
Michel Lespinasseb4265f12012-12-11 16:02:12 -080039 struct vm_unmapped_area_info info;
Paul Mundt4a4a9be2008-11-12 13:17:38 +090040
41 if (flags & MAP_FIXED) {
42 /* We do not accept a shared mapping if it would violate
43 * cache aliasing constraints.
44 */
Al Viroe77414e2009-12-05 15:10:44 -050045 if ((flags & MAP_SHARED) &&
46 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
Paul Mundt4a4a9be2008-11-12 13:17:38 +090047 return -EINVAL;
48 return addr;
49 }
50
51 if (unlikely(len > TASK_SIZE))
52 return -ENOMEM;
53
54 do_colour_align = 0;
55 if (filp || (flags & MAP_SHARED))
56 do_colour_align = 1;
57
58 if (addr) {
59 if (do_colour_align)
60 addr = COLOUR_ALIGN(addr, pgoff);
61 else
62 addr = PAGE_ALIGN(addr);
63
64 vma = find_vma(mm, addr);
65 if (TASK_SIZE - len >= addr &&
66 (!vma || addr + len <= vma->vm_start))
67 return addr;
68 }
69
Michel Lespinasseb4265f12012-12-11 16:02:12 -080070 info.flags = 0;
71 info.length = len;
72 info.low_limit = TASK_UNMAPPED_BASE;
73 info.high_limit = TASK_SIZE;
74 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
75 info.align_offset = pgoff << PAGE_SHIFT;
76 return vm_unmapped_area(&info);
Paul Mundt4a4a9be2008-11-12 13:17:38 +090077}
Paul Mundtee1acbf2009-05-07 16:38:16 +090078
79unsigned long
80arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
81 const unsigned long len, const unsigned long pgoff,
82 const unsigned long flags)
83{
84 struct vm_area_struct *vma;
85 struct mm_struct *mm = current->mm;
86 unsigned long addr = addr0;
87 int do_colour_align;
Michel Lespinasseb4265f12012-12-11 16:02:12 -080088 struct vm_unmapped_area_info info;
Paul Mundtee1acbf2009-05-07 16:38:16 +090089
90 if (flags & MAP_FIXED) {
91 /* We do not accept a shared mapping if it would violate
92 * cache aliasing constraints.
93 */
94 if ((flags & MAP_SHARED) &&
95 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
96 return -EINVAL;
97 return addr;
98 }
99
100 if (unlikely(len > TASK_SIZE))
101 return -ENOMEM;
102
103 do_colour_align = 0;
104 if (filp || (flags & MAP_SHARED))
105 do_colour_align = 1;
106
107 /* requesting a specific address */
108 if (addr) {
109 if (do_colour_align)
110 addr = COLOUR_ALIGN(addr, pgoff);
111 else
112 addr = PAGE_ALIGN(addr);
113
114 vma = find_vma(mm, addr);
115 if (TASK_SIZE - len >= addr &&
116 (!vma || addr + len <= vma->vm_start))
117 return addr;
118 }
119
Michel Lespinasseb4265f12012-12-11 16:02:12 -0800120 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
121 info.length = len;
122 info.low_limit = PAGE_SIZE;
123 info.high_limit = mm->mmap_base;
124 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
125 info.align_offset = pgoff << PAGE_SHIFT;
126 addr = vm_unmapped_area(&info);
Paul Mundtee1acbf2009-05-07 16:38:16 +0900127
Paul Mundtee1acbf2009-05-07 16:38:16 +0900128 /*
129 * A failed mmap() very likely causes application failure,
130 * so fall back to the bottom-up function here. This scenario
131 * can happen with large stack limits and large mmap()
132 * allocations.
133 */
Michel Lespinasseb4265f12012-12-11 16:02:12 -0800134 if (addr & ~PAGE_MASK) {
135 VM_BUG_ON(addr != -ENOMEM);
136 info.flags = 0;
137 info.low_limit = TASK_UNMAPPED_BASE;
138 info.high_limit = TASK_SIZE;
139 addr = vm_unmapped_area(&info);
140 }
Paul Mundtee1acbf2009-05-07 16:38:16 +0900141
142 return addr;
143}
Paul Mundt4a4a9be2008-11-12 13:17:38 +0900144#endif /* CONFIG_MMU */
Paul Mundt185aed72008-11-12 12:53:48 +0900145
146/*
147 * You really shouldn't be using read() or write() on /dev/mem. This
148 * might go away in the future.
149 */
Cyril Chemparathy7e6735c2012-09-12 14:05:58 -0400150int valid_phys_addr_range(phys_addr_t addr, size_t count)
Paul Mundt185aed72008-11-12 12:53:48 +0900151{
Paul Mundt10840f02008-11-13 15:38:02 +0900152 if (addr < __MEMORY_START)
Paul Mundt185aed72008-11-12 12:53:48 +0900153 return 0;
154 if (addr + count > __pa(high_memory))
155 return 0;
156
157 return 1;
158}
159
160int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
161{
162 return 1;
163}