Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * S390 version |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 3 | * Copyright IBM Corp. 1999 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * Author(s): Hartmut Penner (hp@de.ibm.com) |
| 5 | * |
| 6 | * Derived from "arch/i386/mm/init.c" |
| 7 | * Copyright (C) 1995 Linus Torvalds |
| 8 | */ |
| 9 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/signal.h> |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/errno.h> |
| 14 | #include <linux/string.h> |
| 15 | #include <linux/types.h> |
| 16 | #include <linux/ptrace.h> |
| 17 | #include <linux/mman.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/swap.h> |
| 20 | #include <linux/smp.h> |
| 21 | #include <linux/init.h> |
| 22 | #include <linux/pagemap.h> |
| 23 | #include <linux/bootmem.h> |
Heiko Carstens | e5d709b | 2013-05-02 09:15:58 +0200 | [diff] [blame] | 24 | #include <linux/memory.h> |
Heiko Carstens | d882b17 | 2006-07-01 04:36:31 -0700 | [diff] [blame] | 25 | #include <linux/pfn.h> |
Heiko Carstens | 028d9b3 | 2006-12-08 15:56:13 +0100 | [diff] [blame] | 26 | #include <linux/poison.h> |
Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 27 | #include <linux/initrd.h> |
Heiko Carstens | 3a4c5d5 | 2011-07-30 09:25:15 +0200 | [diff] [blame] | 28 | #include <linux/export.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 29 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <asm/processor.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <asm/uaccess.h> |
| 32 | #include <asm/pgtable.h> |
| 33 | #include <asm/pgalloc.h> |
| 34 | #include <asm/dma.h> |
| 35 | #include <asm/lowcore.h> |
| 36 | #include <asm/tlb.h> |
| 37 | #include <asm/tlbflush.h> |
Heiko Carstens | d882b17 | 2006-07-01 04:36:31 -0700 | [diff] [blame] | 38 | #include <asm/sections.h> |
David Howells | a0616cd | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 39 | #include <asm/ctl_reg.h> |
Heiko Carstens | e5d709b | 2013-05-02 09:15:58 +0200 | [diff] [blame] | 40 | #include <asm/sclp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); |
Heiko Carstens | 1485c5c | 2009-03-26 15:24:04 +0100 | [diff] [blame] | 43 | |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 44 | unsigned long empty_zero_page, zero_page_mask; |
Heiko Carstens | 1485c5c | 2009-03-26 15:24:04 +0100 | [diff] [blame] | 45 | EXPORT_SYMBOL(empty_zero_page); |
Ard Biesheuvel | 0b70068 | 2014-09-12 22:17:23 +0200 | [diff] [blame] | 46 | EXPORT_SYMBOL(zero_page_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | |
Jiang Liu | 0999f11 | 2013-04-29 15:06:48 -0700 | [diff] [blame] | 48 | static void __init setup_zero_pages(void) |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 49 | { |
| 50 | struct cpuid cpu_id; |
| 51 | unsigned int order; |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 52 | struct page *page; |
| 53 | int i; |
| 54 | |
| 55 | get_cpu_id(&cpu_id); |
| 56 | switch (cpu_id.machine) { |
| 57 | case 0x9672: /* g5 */ |
| 58 | case 0x2064: /* z900 */ |
| 59 | case 0x2066: /* z900 */ |
| 60 | case 0x2084: /* z990 */ |
| 61 | case 0x2086: /* z990 */ |
| 62 | case 0x2094: /* z9-109 */ |
| 63 | case 0x2096: /* z9-109 */ |
| 64 | order = 0; |
| 65 | break; |
| 66 | case 0x2097: /* z10 */ |
| 67 | case 0x2098: /* z10 */ |
Martin Schwidefsky | 7919e91 | 2013-02-28 11:08:54 +0100 | [diff] [blame] | 68 | case 0x2817: /* z196 */ |
| 69 | case 0x2818: /* z196 */ |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 70 | order = 2; |
| 71 | break; |
Martin Schwidefsky | 7919e91 | 2013-02-28 11:08:54 +0100 | [diff] [blame] | 72 | case 0x2827: /* zEC12 */ |
Heiko Carstens | 5947122 | 2013-07-24 10:35:33 +0200 | [diff] [blame] | 73 | case 0x2828: /* zEC12 */ |
Martin Schwidefsky | 7919e91 | 2013-02-28 11:08:54 +0100 | [diff] [blame] | 74 | order = 5; |
| 75 | break; |
Martin Schwidefsky | 1f6b83e | 2015-01-14 17:51:17 +0100 | [diff] [blame] | 76 | case 0x2964: /* z13 */ |
| 77 | default: |
| 78 | order = 7; |
| 79 | break; |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 80 | } |
Martin Schwidefsky | 7919e91 | 2013-02-28 11:08:54 +0100 | [diff] [blame] | 81 | /* Limit number of empty zero pages for small memory sizes */ |
Martin Schwidefsky | 1f6b83e | 2015-01-14 17:51:17 +0100 | [diff] [blame] | 82 | while (order > 2 && (totalram_pages >> 10) < (1UL << order)) |
| 83 | order--; |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 84 | |
| 85 | empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); |
| 86 | if (!empty_zero_page) |
| 87 | panic("Out of memory in setup_zero_pages"); |
| 88 | |
| 89 | page = virt_to_page((void *) empty_zero_page); |
| 90 | split_page(page, order); |
| 91 | for (i = 1 << order; i > 0; i--) { |
Jiang Liu | 0999f11 | 2013-04-29 15:06:48 -0700 | [diff] [blame] | 92 | mark_page_reserved(page); |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 93 | page++; |
| 94 | } |
| 95 | |
Jiang Liu | 0999f11 | 2013-04-29 15:06:48 -0700 | [diff] [blame] | 96 | zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; |
Martin Schwidefsky | 238ec4e | 2010-10-25 16:10:07 +0200 | [diff] [blame] | 97 | } |
| 98 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | /* |
| 100 | * paging_init() sets up the page tables |
| 101 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | void __init paging_init(void) |
| 103 | { |
Heiko Carstens | 39b742f | 2006-12-08 15:56:10 +0100 | [diff] [blame] | 104 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 105 | unsigned long pgd_type, asce_bits; |
Heiko Carstens | d882b17 | 2006-07-01 04:36:31 -0700 | [diff] [blame] | 106 | |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 107 | init_mm.pgd = swapper_pg_dir; |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 108 | if (VMALLOC_END > (1UL << 42)) { |
| 109 | asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; |
| 110 | pgd_type = _REGION2_ENTRY_EMPTY; |
| 111 | } else { |
| 112 | asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; |
| 113 | pgd_type = _REGION3_ENTRY_EMPTY; |
| 114 | } |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 115 | S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 116 | clear_table((unsigned long *) init_mm.pgd, pgd_type, |
| 117 | sizeof(unsigned long)*2048); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 118 | vmem_map_init(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | /* enable virtual mapping in kernel mode */ |
Martin Schwidefsky | 3610cce | 2007-10-22 12:52:47 +0200 | [diff] [blame] | 121 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); |
| 122 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); |
| 123 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); |
David Howells | df9ee29 | 2010-10-07 14:08:55 +0100 | [diff] [blame] | 124 | arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 126 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
| 127 | sparse_init(); |
Heiko Carstens | 39b742f | 2006-12-08 15:56:10 +0100 | [diff] [blame] | 128 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
| 129 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); |
| 130 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
| 131 | free_area_init_nodes(max_zone_pfns); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | |
| 134 | void __init mem_init(void) |
| 135 | { |
Martin Schwidefsky | 1b948d6 | 2014-04-03 13:55:01 +0200 | [diff] [blame] | 136 | if (MACHINE_HAS_TLB_LC) |
| 137 | cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask); |
| 138 | cpumask_set_cpu(0, mm_cpumask(&init_mm)); |
| 139 | atomic_set(&init_mm.context.attach_count, 1); |
| 140 | |
Jiang Liu | a18d0e2 | 2013-07-03 15:04:10 -0700 | [diff] [blame] | 141 | max_mapnr = max_low_pfn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
| 143 | |
Martin Schwidefsky | 45e576b | 2008-05-07 09:22:59 +0200 | [diff] [blame] | 144 | /* Setup guest page hinting */ |
| 145 | cmma_init(); |
| 146 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | /* this will put all low memory onto the freelists */ |
Jiang Liu | 0c98853 | 2013-07-03 15:03:24 -0700 | [diff] [blame] | 148 | free_all_bootmem(); |
Jiang Liu | 0999f11 | 2013-04-29 15:06:48 -0700 | [diff] [blame] | 149 | setup_zero_pages(); /* Setup zeroed pages. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
Jiang Liu | a18d0e2 | 2013-07-03 15:04:10 -0700 | [diff] [blame] | 151 | mem_init_print_info(NULL); |
Heiko Carstens | d882b17 | 2006-07-01 04:36:31 -0700 | [diff] [blame] | 152 | printk("Write protected kernel read-only data: %#lx - %#lx\n", |
Heiko Carstens | 162e006 | 2007-02-05 21:18:41 +0100 | [diff] [blame] | 153 | (unsigned long)&_stext, |
| 154 | PFN_ALIGN((unsigned long)&_eshared) - 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | } |
| 156 | |
Heiko Carstens | d96221a | 2010-02-26 22:37:42 +0100 | [diff] [blame] | 157 | void free_initmem(void) |
| 158 | { |
Jiang Liu | dbe67df | 2013-07-03 15:02:51 -0700 | [diff] [blame] | 159 | free_initmem_default(POISON_FREE_INITMEM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | } |
| 161 | |
| 162 | #ifdef CONFIG_BLK_DEV_INITRD |
Heiko Carstens | 5e249d6 | 2012-09-24 08:17:58 +0200 | [diff] [blame] | 163 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | { |
Jiang Liu | 1119969 | 2013-07-03 15:02:48 -0700 | [diff] [blame] | 165 | free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, |
| 166 | "initrd"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | } |
| 168 | #endif |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 169 | |
| 170 | #ifdef CONFIG_MEMORY_HOTPLUG |
Dan Williams | 033fbae | 2015-08-09 15:29:06 -0400 | [diff] [blame^] | 171 | int arch_add_memory(int nid, u64 start, u64 size, bool for_device) |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 172 | { |
Gerald Schaefer | 892365a | 2012-02-24 18:01:29 +0100 | [diff] [blame] | 173 | unsigned long zone_start_pfn, zone_end_pfn, nr_pages; |
| 174 | unsigned long start_pfn = PFN_DOWN(start); |
| 175 | unsigned long size_pages = PFN_DOWN(size); |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 176 | struct zone *zone; |
| 177 | int rc; |
| 178 | |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 179 | rc = vmem_add_mapping(start, size); |
| 180 | if (rc) |
| 181 | return rc; |
Gerald Schaefer | 892365a | 2012-02-24 18:01:29 +0100 | [diff] [blame] | 182 | for_each_zone(zone) { |
| 183 | if (zone_idx(zone) != ZONE_MOVABLE) { |
| 184 | /* Add range within existing zone limits */ |
| 185 | zone_start_pfn = zone->zone_start_pfn; |
| 186 | zone_end_pfn = zone->zone_start_pfn + |
| 187 | zone->spanned_pages; |
| 188 | } else { |
| 189 | /* Add remaining range to ZONE_MOVABLE */ |
| 190 | zone_start_pfn = start_pfn; |
| 191 | zone_end_pfn = start_pfn + size_pages; |
| 192 | } |
| 193 | if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn) |
| 194 | continue; |
| 195 | nr_pages = (start_pfn + size_pages > zone_end_pfn) ? |
| 196 | zone_end_pfn - start_pfn : size_pages; |
| 197 | rc = __add_pages(nid, zone, start_pfn, nr_pages); |
| 198 | if (rc) |
| 199 | break; |
| 200 | start_pfn += nr_pages; |
| 201 | size_pages -= nr_pages; |
| 202 | if (!size_pages) |
| 203 | break; |
| 204 | } |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 205 | if (rc) |
| 206 | vmem_remove_mapping(start, size); |
| 207 | return rc; |
| 208 | } |
Wen Congyang | 24d335c | 2013-02-22 16:32:58 -0800 | [diff] [blame] | 209 | |
Heiko Carstens | e5d709b | 2013-05-02 09:15:58 +0200 | [diff] [blame] | 210 | unsigned long memory_block_size_bytes(void) |
| 211 | { |
| 212 | /* |
| 213 | * Make sure the memory block size is always greater |
| 214 | * or equal than the memory increment size. |
| 215 | */ |
David Hildenbrand | 37c5f6c | 2015-05-06 13:18:59 +0200 | [diff] [blame] | 216 | return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm); |
Heiko Carstens | e5d709b | 2013-05-02 09:15:58 +0200 | [diff] [blame] | 217 | } |
| 218 | |
Wen Congyang | 24d335c | 2013-02-22 16:32:58 -0800 | [diff] [blame] | 219 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 220 | int arch_remove_memory(u64 start, u64 size) |
| 221 | { |
| 222 | /* |
| 223 | * There is no hardware or firmware interface which could trigger a |
| 224 | * hot memory remove on s390. So there is nothing that needs to be |
| 225 | * implemented. |
| 226 | */ |
| 227 | return -EBUSY; |
| 228 | } |
| 229 | #endif |
Heiko Carstens | 421c175 | 2008-07-14 09:59:18 +0200 | [diff] [blame] | 230 | #endif /* CONFIG_MEMORY_HOTPLUG */ |