Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * S390 version |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 3 | * Copyright IBM Corp. 1999 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * Author(s): Hartmut Penner (hp@de.ibm.com) |
| 5 | * |
| 6 | * Derived from "arch/i386/mm/init.c" |
| 7 | * Copyright (C) 1995 Linus Torvalds |
| 8 | */ |
| 9 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/signal.h> |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/errno.h> |
| 14 | #include <linux/string.h> |
| 15 | #include <linux/types.h> |
| 16 | #include <linux/ptrace.h> |
| 17 | #include <linux/mman.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/swap.h> |
| 20 | #include <linux/smp.h> |
| 21 | #include <linux/init.h> |
| 22 | #include <linux/pagemap.h> |
| 23 | #include <linux/bootmem.h> |
Heiko Carstens | e5d709b | 2013-05-02 09:15:58 +0200 | [diff] [blame] | 24 | #include <linux/memory.h> |
Heiko Carstens | d882b17 | 2006-07-01 04:36:31 -0700 | [diff] [blame] | 25 | #include <linux/pfn.h> |
Heiko Carstens | 028d9b3 | 2006-12-08 15:56:13 +0100 | [diff] [blame] | 26 | #include <linux/poison.h> |
Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 27 | #include <linux/initrd.h> |
Heiko Carstens | 3a4c5d5 | 2011-07-30 09:25:15 +0200 | [diff] [blame] | 28 | #include <linux/export.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 29 | #include <linux/gfp.h> |
Gerald Schaefer | 199071f | 2015-05-08 17:40:43 +0200 | [diff] [blame] | 30 | #include <linux/memblock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <asm/processor.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 32 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/pgtable.h> |
| 34 | #include <asm/pgalloc.h> |
| 35 | #include <asm/dma.h> |
| 36 | #include <asm/lowcore.h> |
| 37 | #include <asm/tlb.h> |
| 38 | #include <asm/tlbflush.h> |
Heiko Carstens | d882b17 | 2006-07-01 04:36:31 -0700 | [diff] [blame] | 39 | #include <asm/sections.h> |
David Howells | a0616cd | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 40 | #include <asm/ctl_reg.h> |
Heiko Carstens | e5d709b | 2013-05-02 09:15:58 +0200 | [diff] [blame] | 41 | #include <asm/sclp.h> |
Laura Abbott | e6c7c63 | 2017-05-08 15:58:08 -0700 | [diff] [blame] | 42 | #include <asm/set_memory.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
Heiko Carstens | 0ccb32c | 2016-05-28 10:03:55 +0200 | [diff] [blame] | 44 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir); |
Heiko Carstens | 1485c5c | 2009-03-26 15:24:04 +0100 | [diff] [blame] | 45 | |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 46 | unsigned long empty_zero_page, zero_page_mask; |
Heiko Carstens | 1485c5c | 2009-03-26 15:24:04 +0100 | [diff] [blame] | 47 | EXPORT_SYMBOL(empty_zero_page); |
Ard Biesheuvel | 0b70068 | 2014-09-12 22:17:23 +0200 | [diff] [blame] | 48 | EXPORT_SYMBOL(zero_page_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
Jiang Liu | 0999f11 | 2013-04-29 15:06:48 -0700 | [diff] [blame] | 50 | static void __init setup_zero_pages(void) |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 51 | { |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 52 | unsigned int order; |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 53 | struct page *page; |
| 54 | int i; |
| 55 | |
Martin Schwidefsky | c7e8b2c | 2015-11-10 12:30:28 +0100 | [diff] [blame] | 56 | /* Latest machines require a mapping granularity of 512KB */ |
| 57 | order = 7; |
| 58 | |
Martin Schwidefsky | 7919e91 | 2013-02-28 11:08:54 +0100 | [diff] [blame] | 59 | /* Limit number of empty zero pages for small memory sizes */ |
Martin Schwidefsky | 1f6b83e | 2015-01-14 17:51:17 +0100 | [diff] [blame] | 60 | while (order > 2 && (totalram_pages >> 10) < (1UL << order)) |
| 61 | order--; |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 62 | |
| 63 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
| 64 | if (!empty_zero_page) |
| 65 | panic("Out of memory in setup_zero_pages"); |
| 66 | |
| 67 | page = virt_to_page((void *) empty_zero_page); |
| 68 | split_page(page, order); |
| 69 | for (i = 1 << order; i > 0; i--) { |
Jiang Liu | 0999f11 | 2013-04-29 15:06:48 -0700 | [diff] [blame] | 70 | mark_page_reserved(page); |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 71 | page++; |
| 72 | } |
| 73 | |
Jiang Liu | 0999f11 | 2013-04-29 15:06:48 -0700 | [diff] [blame] | 74 | zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 75 | } |
| 76 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | /* |
| 78 | * paging_init() sets up the page tables |
| 79 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | void __init paging_init(void) |
| 81 | { |
Heiko Carstens | 39b742f | 2006-12-08 15:56:10 +0100 | [diff] [blame] | 82 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 83 | unsigned long pgd_type, asce_bits; |
Heiko Carstens | 60c4970 | 2017-06-01 11:04:04 +0200 | [diff] [blame] | 84 | psw_t psw; |
Heiko Carstens | d882b17 | 2006-07-01 04:36:31 -0700 | [diff] [blame] | 85 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 86 | init_mm.pgd = swapper_pg_dir; |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 87 | if (VMALLOC_END > (1UL << 42)) { |
| 88 | asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; |
| 89 | pgd_type = _REGION2_ENTRY_EMPTY; |
| 90 | } else { |
| 91 | asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; |
| 92 | pgd_type = _REGION3_ENTRY_EMPTY; |
| 93 | } |
Gerald Schaefer | 723cacb | 2016-04-15 16:38:40 +0200 | [diff] [blame] | 94 | init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; |
| 95 | S390_lowcore.kernel_asce = init_mm.context.asce; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 96 | clear_table((unsigned long *) init_mm.pgd, pgd_type, |
| 97 | sizeof(unsigned long)*2048); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 98 | vmem_map_init(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | /* enable virtual mapping in kernel mode */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 101 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); |
| 102 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); |
| 103 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); |
Heiko Carstens | 60c4970 | 2017-06-01 11:04:04 +0200 | [diff] [blame] | 104 | psw.mask = __extract_psw(); |
Heiko Carstens | a752598 | 2017-06-03 10:56:07 +0200 | [diff] [blame] | 105 | psw_bits(psw).dat = 1; |
Heiko Carstens | 8bb3fdd | 2017-06-03 10:19:55 +0200 | [diff] [blame] | 106 | psw_bits(psw).as = PSW_BITS_AS_HOME; |
Heiko Carstens | 60c4970 | 2017-06-01 11:04:04 +0200 | [diff] [blame] | 107 | __load_psw_mask(psw.mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 109 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
| 110 | sparse_init(); |
Heiko Carstens | 39b742f | 2006-12-08 15:56:10 +0100 | [diff] [blame] | 111 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
| 112 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); |
| 113 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
| 114 | free_area_init_nodes(max_zone_pfns); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | |
Heiko Carstens | 91d3721 | 2016-03-17 12:47:12 +0100 | [diff] [blame] | 117 | void mark_rodata_ro(void) |
| 118 | { |
Heiko Carstens | d07a980 | 2016-06-07 10:12:55 +0200 | [diff] [blame] | 119 | unsigned long size = __end_ro_after_init - __start_ro_after_init; |
| 120 | |
| 121 | set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT); |
| 122 | pr_info("Write protected read-only-after-init data: %luk\n", size >> 10); |
Heiko Carstens | 91d3721 | 2016-03-17 12:47:12 +0100 | [diff] [blame] | 123 | } |
| 124 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | void __init mem_init(void) |
| 126 | { |
Martin Schwidefsky | 64f31d5 | 2016-05-25 09:45:26 +0200 | [diff] [blame] | 127 | cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 128 | cpumask_set_cpu(0, mm_cpumask(&init_mm)); |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 129 | |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 130 | set_max_mapnr(max_low_pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
| 132 | |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 133 | /* Setup guest page hinting */ |
| 134 | cmma_init(); |
| 135 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | /* this will put all low memory onto the freelists */ |
Jiang Liu | 0c98853 | 2013-07-03 15:03:24 -0700 | [diff] [blame] | 137 | free_all_bootmem(); |
Jiang Liu | 0999f11 | 2013-04-29 15:06:48 -0700 | [diff] [blame] | 138 | setup_zero_pages(); /* Setup zeroed pages. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | |
Jiang Liu | a18d0e2 | 2013-07-03 15:04:10 -0700 | [diff] [blame] | 140 | mem_init_print_info(NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | } |
| 142 | |
Heiko Carstens | d96221a | 2010-02-26 22:37:42 +0100 | [diff] [blame] | 143 | void free_initmem(void) |
| 144 | { |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 145 | __set_memory((unsigned long) _sinittext, |
| 146 | (_einittext - _sinittext) >> PAGE_SHIFT, |
| 147 | SET_MEMORY_RW | SET_MEMORY_NX); |
Jiang Liu | dbe67df | 2013-07-03 15:02:51 -0700 | [diff] [blame] | 148 | free_initmem_default(POISON_FREE_INITMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | } |
| 150 | |
| 151 | #ifdef CONFIG_BLK_DEV_INITRD |
Heiko Carstens | 5e249d6 | 2012-09-24 08:17:58 +0200 | [diff] [blame] | 152 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | { |
Jiang Liu | 1119969 | 2013-07-03 15:02:48 -0700 | [diff] [blame] | 154 | free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, |
| 155 | "initrd"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | } |
| 157 | #endif |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 158 | |
Heiko Carstens | 604ddad | 2017-02-13 14:58:36 +0100 | [diff] [blame] | 159 | unsigned long memory_block_size_bytes(void) |
| 160 | { |
| 161 | /* |
| 162 | * Make sure the memory block size is always greater |
| 163 | * or equal than the memory increment size. |
| 164 | */ |
| 165 | return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm); |
| 166 | } |
| 167 | |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 168 | #ifdef CONFIG_MEMORY_HOTPLUG |
Dan Williams | 033fbae | 2015-08-09 15:29:06 -0400 | [diff] [blame] | 169 | int arch_add_memory(int nid, u64 start, u64 size, bool for_device) |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 170 | { |
Gerald Schaefer | 4a65429 | 2016-10-18 17:32:18 +0200 | [diff] [blame] | 171 | unsigned long zone_start_pfn, zone_end_pfn, nr_pages; |
Gerald Schaefer | 892365a | 2012-02-24 18:01:29 +0100 | [diff] [blame] | 172 | unsigned long start_pfn = PFN_DOWN(start); |
| 173 | unsigned long size_pages = PFN_DOWN(size); |
Gerald Schaefer | 4a65429 | 2016-10-18 17:32:18 +0200 | [diff] [blame] | 174 | pg_data_t *pgdat = NODE_DATA(nid); |
| 175 | struct zone *zone; |
| 176 | int rc, i; |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 177 | |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 178 | rc = vmem_add_mapping(start, size); |
| 179 | if (rc) |
| 180 | return rc; |
Gerald Schaefer | 199071f | 2015-05-08 17:40:43 +0200 | [diff] [blame] | 181 | |
Gerald Schaefer | 4a65429 | 2016-10-18 17:32:18 +0200 | [diff] [blame] | 182 | for (i = 0; i < MAX_NR_ZONES; i++) { |
| 183 | zone = pgdat->node_zones + i; |
| 184 | if (zone_idx(zone) != ZONE_MOVABLE) { |
| 185 | /* Add range within existing zone limits, if possible */ |
| 186 | zone_start_pfn = zone->zone_start_pfn; |
| 187 | zone_end_pfn = zone->zone_start_pfn + |
| 188 | zone->spanned_pages; |
Gerald Schaefer | 892365a | 2012-02-24 18:01:29 +0100 | [diff] [blame] | 189 | } else { |
Gerald Schaefer | 4a65429 | 2016-10-18 17:32:18 +0200 | [diff] [blame] | 190 | /* Add remaining range to ZONE_MOVABLE */ |
| 191 | zone_start_pfn = start_pfn; |
| 192 | zone_end_pfn = start_pfn + size_pages; |
Gerald Schaefer | 892365a | 2012-02-24 18:01:29 +0100 | [diff] [blame] | 193 | } |
Gerald Schaefer | 4a65429 | 2016-10-18 17:32:18 +0200 | [diff] [blame] | 194 | if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn) |
| 195 | continue; |
| 196 | nr_pages = (start_pfn + size_pages > zone_end_pfn) ? |
| 197 | zone_end_pfn - start_pfn : size_pages; |
Michal Hocko | 1b862ae | 2017-07-06 15:37:45 -0700 | [diff] [blame^] | 198 | rc = __add_pages(nid, zone, start_pfn, nr_pages, !for_device); |
Gerald Schaefer | 892365a | 2012-02-24 18:01:29 +0100 | [diff] [blame] | 199 | if (rc) |
| 200 | break; |
| 201 | start_pfn += nr_pages; |
| 202 | size_pages -= nr_pages; |
Gerald Schaefer | 4a65429 | 2016-10-18 17:32:18 +0200 | [diff] [blame] | 203 | if (!size_pages) |
| 204 | break; |
Gerald Schaefer | 892365a | 2012-02-24 18:01:29 +0100 | [diff] [blame] | 205 | } |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 206 | if (rc) |
| 207 | vmem_remove_mapping(start, size); |
| 208 | return rc; |
| 209 | } |
Wen Congyang | 24d335c | 2013-02-22 16:32:58 -0800 | [diff] [blame] | 210 | |
| 211 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 212 | int arch_remove_memory(u64 start, u64 size) |
| 213 | { |
| 214 | /* |
| 215 | * There is no hardware or firmware interface which could trigger a |
| 216 | * hot memory remove on s390. So there is nothing that needs to be |
| 217 | * implemented. |
| 218 | */ |
| 219 | return -EBUSY; |
| 220 | } |
| 221 | #endif |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 222 | #endif /* CONFIG_MEMORY_HOTPLUG */ |