blob: decccffb03cac60abe6e5628e2d0f3a47cc7d099 [file] [log] [blame]
Catalin Marinas1d18c472012-03-05 11:49:27 +00001/*
2 * Based on arch/arm/mm/mmap.c
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/elf.h>
20#include <linux/fs.h>
Ard Biesheuvel1151f832017-05-19 16:42:00 +010021#include <linux/memblock.h>
Catalin Marinas1d18c472012-03-05 11:49:27 +000022#include <linux/mm.h>
23#include <linux/mman.h>
24#include <linux/export.h>
25#include <linux/shm.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010026#include <linux/sched/signal.h>
Ingo Molnar01042602017-02-08 18:51:31 +010027#include <linux/sched/mm.h>
Catalin Marinas1d18c472012-03-05 11:49:27 +000028#include <linux/io.h>
29#include <linux/personality.h>
30#include <linux/random.h>
31
32#include <asm/cputype.h>
33
34/*
35 * Leave enough space between the mmap area and the stack to honour ulimit in
36 * the face of randomisation.
37 */
Rik van Rielcf922512017-07-12 14:36:36 -070038#define MIN_GAP (SZ_128M)
Catalin Marinas1d18c472012-03-05 11:49:27 +000039#define MAX_GAP (STACK_TOP/6*5)
40
41static int mmap_is_legacy(void)
42{
43 if (current->personality & ADDR_COMPAT_LAYOUT)
44 return 1;
45
46 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
47 return 1;
48
49 return sysctl_legacy_va_layout;
50}
51
Kees Cook2b68f6c2015-04-14 15:48:00 -070052unsigned long arch_mmap_rnd(void)
Catalin Marinas1d18c472012-03-05 11:49:27 +000053{
Kees Cookdd04cff2015-04-14 15:47:48 -070054 unsigned long rnd;
Catalin Marinas1d18c472012-03-05 11:49:27 +000055
Daniel Cashman8f0d3aa2016-01-14 15:20:01 -080056#ifdef CONFIG_COMPAT
57 if (test_thread_flag(TIF_32BIT))
Daniel Cashman5ef11c32016-02-26 15:19:37 -080058 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
Daniel Cashman8f0d3aa2016-01-14 15:20:01 -080059 else
60#endif
Daniel Cashman5ef11c32016-02-26 15:19:37 -080061 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
Yann Droneaudd6c763a2014-11-17 23:02:19 +000062 return rnd << PAGE_SHIFT;
Catalin Marinas1d18c472012-03-05 11:49:27 +000063}
64
Kees Cookdd04cff2015-04-14 15:47:48 -070065static unsigned long mmap_base(unsigned long rnd)
Catalin Marinas1d18c472012-03-05 11:49:27 +000066{
67 unsigned long gap = rlimit(RLIMIT_STACK);
Rik van Rielcf922512017-07-12 14:36:36 -070068 unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
69
70 /* Values close to RLIM_INFINITY can overflow. */
71 if (gap + pad > gap)
72 gap += pad;
Catalin Marinas1d18c472012-03-05 11:49:27 +000073
74 if (gap < MIN_GAP)
75 gap = MIN_GAP;
76 else if (gap > MAX_GAP)
77 gap = MAX_GAP;
78
Kees Cookdd04cff2015-04-14 15:47:48 -070079 return PAGE_ALIGN(STACK_TOP - gap - rnd);
Catalin Marinas1d18c472012-03-05 11:49:27 +000080}
81
82/*
83 * This function, called very early during the creation of a new process VM
84 * image, sets up which VM layout function to use:
85 */
86void arch_pick_mmap_layout(struct mm_struct *mm)
87{
Kees Cookdd04cff2015-04-14 15:47:48 -070088 unsigned long random_factor = 0UL;
89
90 if (current->flags & PF_RANDOMIZE)
Kees Cook2b68f6c2015-04-14 15:48:00 -070091 random_factor = arch_mmap_rnd();
Kees Cookdd04cff2015-04-14 15:47:48 -070092
Catalin Marinas1d18c472012-03-05 11:49:27 +000093 /*
94 * Fall back to the standard layout if the personality bit is set, or
95 * if the expected stack growth is unlimited:
96 */
97 if (mmap_is_legacy()) {
Kees Cookdd04cff2015-04-14 15:47:48 -070098 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
Catalin Marinas1d18c472012-03-05 11:49:27 +000099 mm->get_unmapped_area = arch_get_unmapped_area;
Catalin Marinas1d18c472012-03-05 11:49:27 +0000100 } else {
Kees Cookdd04cff2015-04-14 15:47:48 -0700101 mm->mmap_base = mmap_base(random_factor);
Catalin Marinas1d18c472012-03-05 11:49:27 +0000102 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
Catalin Marinas1d18c472012-03-05 11:49:27 +0000103 }
104}
Catalin Marinas1d18c472012-03-05 11:49:27 +0000105
106/*
107 * You really shouldn't be using read() or write() on /dev/mem. This might go
108 * away in the future.
109 */
Min-Hua Chen097cbd82014-10-02 15:56:59 +0100110int valid_phys_addr_range(phys_addr_t addr, size_t size)
Catalin Marinas1d18c472012-03-05 11:49:27 +0000111{
Ard Biesheuvel1151f832017-05-19 16:42:00 +0100112 /*
113 * Check whether addr is covered by a memory region without the
114 * MEMBLOCK_NOMAP attribute, and whether that region covers the
115 * entire range. In theory, this could lead to false negatives
116 * if the range is covered by distinct but adjacent memory regions
117 * that only differ in other attributes. However, few of such
118 * attributes have been defined, and it is debatable whether it
119 * follows that /dev/mem read() calls should be able traverse
120 * such boundaries.
121 */
122 return memblock_is_region_memory(addr, size) &&
123 memblock_is_map_memory(addr);
Catalin Marinas1d18c472012-03-05 11:49:27 +0000124}
125
126/*
127 * Do not allow /dev/mem mappings beyond the supported physical range.
128 */
129int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
130{
131 return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
132}
133
134#ifdef CONFIG_STRICT_DEVMEM
135
136#include <linux/ioport.h>
137
138/*
139 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
140 * is valid. The argument is a physical page number. We mimic x86 here by
141 * disallowing access to system RAM as well as device-exclusive MMIO regions.
142 * This effectively disable read()/write() on /dev/mem.
143 */
144int devmem_is_allowed(unsigned long pfn)
145{
146 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
147 return 0;
148 if (!page_is_ram(pfn))
149 return 1;
150 return 0;
151}
152
153#endif