blob: 45601cf41bee0106ff37a7ef5112cfd065c4a7bc [file] [log] [blame]
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001/*
2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
Randy.Dunlapc59ede72006-01-11 12:17:46 -08009#include <linux/capability.h>
Eric W. Biedermandc009d92005-06-25 14:57:52 -070010#include <linux/mm.h>
11#include <linux/file.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/kexec.h>
Andrew Morton8c5a1cf2008-08-15 00:40:27 -070015#include <linux/mutex.h>
Eric W. Biedermandc009d92005-06-25 14:57:52 -070016#include <linux/list.h>
17#include <linux/highmem.h>
18#include <linux/syscalls.h>
19#include <linux/reboot.h>
Eric W. Biedermandc009d92005-06-25 14:57:52 -070020#include <linux/ioport.h>
Alexander Nyberg6e274d12005-06-25 14:58:26 -070021#include <linux/hardirq.h>
Magnus Damm85916f82006-12-06 20:40:41 -080022#include <linux/elf.h>
23#include <linux/elfcore.h>
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -070024#include <linux/utsname.h>
25#include <linux/numa.h>
Huang Ying3ab83522008-07-25 19:45:07 -070026#include <linux/suspend.h>
27#include <linux/device.h>
Huang Ying89081d12008-07-25 19:45:10 -070028#include <linux/freezer.h>
29#include <linux/pm.h>
30#include <linux/cpu.h>
31#include <linux/console.h>
Luck, Tony5f41b8c2008-10-20 15:23:40 -070032#include <linux/vmalloc.h>
Amerigo Wang06a7f712009-12-15 16:47:46 -080033#include <linux/swap.h>
Rafael J. Wysocki19234c02011-04-20 00:36:11 +020034#include <linux/syscore_ops.h>
Alexander Nyberg6e274d12005-06-25 14:58:26 -070035
Eric W. Biedermandc009d92005-06-25 14:57:52 -070036#include <asm/page.h>
37#include <asm/uaccess.h>
38#include <asm/io.h>
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -070039#include <asm/sections.h>
Eric W. Biedermandc009d92005-06-25 14:57:52 -070040
Vivek Goyalcc571652006-01-09 20:51:41 -080041/* Per cpu memory for storing cpu states in case of system crash. */
Tejun Heo43cf38e2010-02-02 14:38:57 +090042note_buf_t __percpu *crash_notes;
Vivek Goyalcc571652006-01-09 20:51:41 -080043
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -070044/* vmcoreinfo stuff */
Dmitri Vorobievedb79a22009-04-02 16:58:58 -070045static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -070046u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
Ken'ichi Ohmichid7682812007-10-16 23:27:28 -070047size_t vmcoreinfo_size;
48size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -070049
Khalid Aziz4fc9bbf2013-11-27 15:19:25 -070050/* Flag to indicate we are going to kexec a new kernel */
51bool kexec_in_progress = false;
52
Eric W. Biedermandc009d92005-06-25 14:57:52 -070053/* Location of the reserved area for the crash kernel */
54struct resource crashk_res = {
55 .name = "Crash kernel",
56 .start = 0,
57 .end = 0,
58 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
59};
Yinghai Lu0212f912013-01-24 12:20:11 -080060struct resource crashk_low_res = {
Yinghai Lu157752d2013-04-15 22:23:46 -070061 .name = "Crash kernel",
Yinghai Lu0212f912013-01-24 12:20:11 -080062 .start = 0,
63 .end = 0,
64 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
65};
Eric W. Biedermandc009d92005-06-25 14:57:52 -070066
Alexander Nyberg6e274d12005-06-25 14:58:26 -070067int kexec_should_crash(struct task_struct *p)
68{
Serge E. Hallynb460cbc2007-10-18 23:39:52 -070069 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
Alexander Nyberg6e274d12005-06-25 14:58:26 -070070 return 1;
71 return 0;
72}
73
Eric W. Biedermandc009d92005-06-25 14:57:52 -070074/*
75 * When kexec transitions to the new kernel there is a one-to-one
76 * mapping between physical and virtual addresses. On processors
77 * where you can disable the MMU this is trivial, and easy. For
78 * others it is still a simple predictable page table to setup.
79 *
80 * In that environment kexec copies the new kernel to its final
81 * resting place. This means I can only support memory whose
82 * physical address can fit in an unsigned long. In particular
83 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
84 * If the assembly stub has more restrictive requirements
85 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
86 * defined more restrictively in <asm/kexec.h>.
87 *
88 * The code for the transition from the current kernel to the
89 * the new kernel is placed in the control_code_buffer, whose size
Huang Ying163f6872008-08-15 00:40:22 -070090 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
Eric W. Biedermandc009d92005-06-25 14:57:52 -070091 * page of memory is necessary, but some architectures require more.
92 * Because this memory must be identity mapped in the transition from
93 * virtual to physical addresses it must live in the range
94 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
95 * modifiable.
96 *
97 * The assembly stub in the control code buffer is passed a linked list
98 * of descriptor pages detailing the source pages of the new kernel,
99 * and the destination addresses of those source pages. As this data
100 * structure is not used in the context of the current OS, it must
101 * be self-contained.
102 *
103 * The code has been made to work with highmem pages and will use a
104 * destination page in its final resting place (if it happens
105 * to allocate it). The end product of this is that most of the
106 * physical address space, and most of RAM can be used.
107 *
108 * Future directions include:
109 * - allocating a page table with the control code buffer identity
110 * mapped, to simplify machine_kexec and make kexec_on_panic more
111 * reliable.
112 */
113
114/*
115 * KIMAGE_NO_DEST is an impossible destination address..., for
116 * allocating pages whose destination address we do not care about.
117 */
118#define KIMAGE_NO_DEST (-1UL)
119
Maneesh Soni72414d32005-06-25 14:58:28 -0700120static int kimage_is_destination_range(struct kimage *image,
121 unsigned long start, unsigned long end);
122static struct page *kimage_alloc_page(struct kimage *image,
Al Viro9796fdd2005-10-21 03:22:03 -0400123 gfp_t gfp_mask,
Maneesh Soni72414d32005-06-25 14:58:28 -0700124 unsigned long dest);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700125
126static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
Maneesh Soni72414d32005-06-25 14:58:28 -0700127 unsigned long nr_segments,
128 struct kexec_segment __user *segments)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700129{
130 size_t segment_bytes;
131 struct kimage *image;
132 unsigned long i;
133 int result;
134
135 /* Allocate a controlling structure */
136 result = -ENOMEM;
Burman Yan4668edc2006-12-06 20:38:51 -0800137 image = kzalloc(sizeof(*image), GFP_KERNEL);
Maneesh Soni72414d32005-06-25 14:58:28 -0700138 if (!image)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700139 goto out;
Maneesh Soni72414d32005-06-25 14:58:28 -0700140
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700141 image->head = 0;
142 image->entry = &image->head;
143 image->last_entry = &image->head;
144 image->control_page = ~0; /* By default this does not apply */
145 image->start = entry;
146 image->type = KEXEC_TYPE_DEFAULT;
147
148 /* Initialize the list of control pages */
149 INIT_LIST_HEAD(&image->control_pages);
150
151 /* Initialize the list of destination pages */
152 INIT_LIST_HEAD(&image->dest_pages);
153
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300154 /* Initialize the list of unusable pages */
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700155 INIT_LIST_HEAD(&image->unuseable_pages);
156
157 /* Read in the segments */
158 image->nr_segments = nr_segments;
159 segment_bytes = nr_segments * sizeof(*segments);
160 result = copy_from_user(image->segment, segments, segment_bytes);
Dan Carpenterf65a03f2010-08-10 18:03:31 -0700161 if (result) {
162 result = -EFAULT;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700163 goto out;
Dan Carpenterf65a03f2010-08-10 18:03:31 -0700164 }
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700165
166 /*
167 * Verify we have good destination addresses. The caller is
168 * responsible for making certain we don't attempt to load
169 * the new image into invalid or reserved areas of RAM. This
170 * just verifies it is an address we can use.
171 *
172 * Since the kernel does everything in page size chunks ensure
Uwe Kleine-Königb5950762010-11-01 15:38:34 -0400173 * the destination addresses are page aligned. Too many
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700174 * special cases crop of when we don't do this. The most
175 * insidious is getting overlapping destination addresses
176 * simply because addresses are changed to page size
177 * granularity.
178 */
179 result = -EADDRNOTAVAIL;
180 for (i = 0; i < nr_segments; i++) {
181 unsigned long mstart, mend;
Maneesh Soni72414d32005-06-25 14:58:28 -0700182
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700183 mstart = image->segment[i].mem;
184 mend = mstart + image->segment[i].memsz;
185 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
186 goto out;
187 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
188 goto out;
189 }
190
191 /* Verify our destination addresses do not overlap.
192 * If we alloed overlapping destination addresses
193 * through very weird things can happen with no
194 * easy explanation as one segment stops on another.
195 */
196 result = -EINVAL;
Maneesh Soni72414d32005-06-25 14:58:28 -0700197 for (i = 0; i < nr_segments; i++) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700198 unsigned long mstart, mend;
199 unsigned long j;
Maneesh Soni72414d32005-06-25 14:58:28 -0700200
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700201 mstart = image->segment[i].mem;
202 mend = mstart + image->segment[i].memsz;
Maneesh Soni72414d32005-06-25 14:58:28 -0700203 for (j = 0; j < i; j++) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700204 unsigned long pstart, pend;
205 pstart = image->segment[j].mem;
206 pend = pstart + image->segment[j].memsz;
207 /* Do the segments overlap ? */
208 if ((mend > pstart) && (mstart < pend))
209 goto out;
210 }
211 }
212
213 /* Ensure our buffer sizes are strictly less than
214 * our memory sizes. This should always be the case,
215 * and it is easier to check up front than to be surprised
216 * later on.
217 */
218 result = -EINVAL;
Maneesh Soni72414d32005-06-25 14:58:28 -0700219 for (i = 0; i < nr_segments; i++) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700220 if (image->segment[i].bufsz > image->segment[i].memsz)
221 goto out;
222 }
223
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700224 result = 0;
Maneesh Soni72414d32005-06-25 14:58:28 -0700225out:
226 if (result == 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700227 *rimage = image;
Maneesh Soni72414d32005-06-25 14:58:28 -0700228 else
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700229 kfree(image);
Maneesh Soni72414d32005-06-25 14:58:28 -0700230
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700231 return result;
232
233}
234
Zhang Yanfeib92e7e02013-02-27 17:03:29 -0800235static void kimage_free_page_list(struct list_head *list);
236
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700237static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
Maneesh Soni72414d32005-06-25 14:58:28 -0700238 unsigned long nr_segments,
239 struct kexec_segment __user *segments)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700240{
241 int result;
242 struct kimage *image;
243
244 /* Allocate and initialize a controlling structure */
245 image = NULL;
246 result = do_kimage_alloc(&image, entry, nr_segments, segments);
Maneesh Soni72414d32005-06-25 14:58:28 -0700247 if (result)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700248 goto out;
Maneesh Soni72414d32005-06-25 14:58:28 -0700249
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700250 /*
251 * Find a location for the control code buffer, and add it
252 * the vector of segments so that it's pages will also be
253 * counted as destination pages.
254 */
255 result = -ENOMEM;
256 image->control_code_page = kimage_alloc_control_pages(image,
Huang Ying163f6872008-08-15 00:40:22 -0700257 get_order(KEXEC_CONTROL_PAGE_SIZE));
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700258 if (!image->control_code_page) {
259 printk(KERN_ERR "Could not allocate control_code_buffer\n");
Zhang Yanfeib92e7e02013-02-27 17:03:29 -0800260 goto out_free;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700261 }
262
Huang Ying3ab83522008-07-25 19:45:07 -0700263 image->swap_page = kimage_alloc_control_pages(image, 0);
264 if (!image->swap_page) {
265 printk(KERN_ERR "Could not allocate swap buffer\n");
Zhang Yanfeib92e7e02013-02-27 17:03:29 -0800266 goto out_free;
Huang Ying3ab83522008-07-25 19:45:07 -0700267 }
268
Zhang Yanfeib92e7e02013-02-27 17:03:29 -0800269 *rimage = image;
270 return 0;
Maneesh Soni72414d32005-06-25 14:58:28 -0700271
Zhang Yanfeib92e7e02013-02-27 17:03:29 -0800272out_free:
273 kimage_free_page_list(&image->control_pages);
274 kfree(image);
275out:
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700276 return result;
277}
278
279static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
Maneesh Soni72414d32005-06-25 14:58:28 -0700280 unsigned long nr_segments,
Alexey Dobriyan314b6a42005-06-27 22:29:33 -0700281 struct kexec_segment __user *segments)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700282{
283 int result;
284 struct kimage *image;
285 unsigned long i;
286
287 image = NULL;
288 /* Verify we have a valid entry point */
289 if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
290 result = -EADDRNOTAVAIL;
291 goto out;
292 }
293
294 /* Allocate and initialize a controlling structure */
295 result = do_kimage_alloc(&image, entry, nr_segments, segments);
Maneesh Soni72414d32005-06-25 14:58:28 -0700296 if (result)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700297 goto out;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700298
299 /* Enable the special crash kernel control page
300 * allocation policy.
301 */
302 image->control_page = crashk_res.start;
303 image->type = KEXEC_TYPE_CRASH;
304
305 /*
306 * Verify we have good destination addresses. Normally
307 * the caller is responsible for making certain we don't
308 * attempt to load the new image into invalid or reserved
309 * areas of RAM. But crash kernels are preloaded into a
310 * reserved area of ram. We must ensure the addresses
311 * are in the reserved area otherwise preloading the
312 * kernel could corrupt things.
313 */
314 result = -EADDRNOTAVAIL;
315 for (i = 0; i < nr_segments; i++) {
316 unsigned long mstart, mend;
Maneesh Soni72414d32005-06-25 14:58:28 -0700317
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700318 mstart = image->segment[i].mem;
Vivek Goyal50cccc62005-06-25 14:57:55 -0700319 mend = mstart + image->segment[i].memsz - 1;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700320 /* Ensure we are within the crash kernel limits */
321 if ((mstart < crashk_res.start) || (mend > crashk_res.end))
Zhang Yanfei8c333ac2013-02-27 17:03:31 -0800322 goto out_free;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700323 }
324
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700325 /*
326 * Find a location for the control code buffer, and add
327 * the vector of segments so that it's pages will also be
328 * counted as destination pages.
329 */
330 result = -ENOMEM;
331 image->control_code_page = kimage_alloc_control_pages(image,
Huang Ying163f6872008-08-15 00:40:22 -0700332 get_order(KEXEC_CONTROL_PAGE_SIZE));
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700333 if (!image->control_code_page) {
334 printk(KERN_ERR "Could not allocate control_code_buffer\n");
Zhang Yanfei8c333ac2013-02-27 17:03:31 -0800335 goto out_free;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700336 }
337
Zhang Yanfei8c333ac2013-02-27 17:03:31 -0800338 *rimage = image;
339 return 0;
Maneesh Soni72414d32005-06-25 14:58:28 -0700340
Zhang Yanfei8c333ac2013-02-27 17:03:31 -0800341out_free:
342 kfree(image);
343out:
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700344 return result;
345}
346
Maneesh Soni72414d32005-06-25 14:58:28 -0700347static int kimage_is_destination_range(struct kimage *image,
348 unsigned long start,
349 unsigned long end)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700350{
351 unsigned long i;
352
353 for (i = 0; i < image->nr_segments; i++) {
354 unsigned long mstart, mend;
Maneesh Soni72414d32005-06-25 14:58:28 -0700355
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700356 mstart = image->segment[i].mem;
Maneesh Soni72414d32005-06-25 14:58:28 -0700357 mend = mstart + image->segment[i].memsz;
358 if ((end > mstart) && (start < mend))
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700359 return 1;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700360 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700361
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700362 return 0;
363}
364
Al Viro9796fdd2005-10-21 03:22:03 -0400365static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700366{
367 struct page *pages;
Maneesh Soni72414d32005-06-25 14:58:28 -0700368
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700369 pages = alloc_pages(gfp_mask, order);
370 if (pages) {
371 unsigned int count, i;
372 pages->mapping = NULL;
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700373 set_page_private(pages, order);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700374 count = 1 << order;
Maneesh Soni72414d32005-06-25 14:58:28 -0700375 for (i = 0; i < count; i++)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700376 SetPageReserved(pages + i);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700377 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700378
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700379 return pages;
380}
381
382static void kimage_free_pages(struct page *page)
383{
384 unsigned int order, count, i;
Maneesh Soni72414d32005-06-25 14:58:28 -0700385
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700386 order = page_private(page);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700387 count = 1 << order;
Maneesh Soni72414d32005-06-25 14:58:28 -0700388 for (i = 0; i < count; i++)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700389 ClearPageReserved(page + i);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700390 __free_pages(page, order);
391}
392
393static void kimage_free_page_list(struct list_head *list)
394{
395 struct list_head *pos, *next;
Maneesh Soni72414d32005-06-25 14:58:28 -0700396
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700397 list_for_each_safe(pos, next, list) {
398 struct page *page;
399
400 page = list_entry(pos, struct page, lru);
401 list_del(&page->lru);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700402 kimage_free_pages(page);
403 }
404}
405
Maneesh Soni72414d32005-06-25 14:58:28 -0700406static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
407 unsigned int order)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700408{
409 /* Control pages are special, they are the intermediaries
410 * that are needed while we copy the rest of the pages
411 * to their final resting place. As such they must
412 * not conflict with either the destination addresses
413 * or memory the kernel is already using.
414 *
415 * The only case where we really need more than one of
416 * these are for architectures where we cannot disable
417 * the MMU and must instead generate an identity mapped
418 * page table for all of the memory.
419 *
420 * At worst this runs in O(N) of the image size.
421 */
422 struct list_head extra_pages;
423 struct page *pages;
424 unsigned int count;
425
426 count = 1 << order;
427 INIT_LIST_HEAD(&extra_pages);
428
429 /* Loop while I can allocate a page and the page allocated
430 * is a destination page.
431 */
432 do {
433 unsigned long pfn, epfn, addr, eaddr;
Maneesh Soni72414d32005-06-25 14:58:28 -0700434
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700435 pages = kimage_alloc_pages(GFP_KERNEL, order);
436 if (!pages)
437 break;
438 pfn = page_to_pfn(pages);
439 epfn = pfn + count;
440 addr = pfn << PAGE_SHIFT;
441 eaddr = epfn << PAGE_SHIFT;
442 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
Maneesh Soni72414d32005-06-25 14:58:28 -0700443 kimage_is_destination_range(image, addr, eaddr)) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700444 list_add(&pages->lru, &extra_pages);
445 pages = NULL;
446 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700447 } while (!pages);
448
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700449 if (pages) {
450 /* Remember the allocated page... */
451 list_add(&pages->lru, &image->control_pages);
452
453 /* Because the page is already in it's destination
454 * location we will never allocate another page at
455 * that address. Therefore kimage_alloc_pages
456 * will not return it (again) and we don't need
457 * to give it an entry in image->segment[].
458 */
459 }
460 /* Deal with the destination pages I have inadvertently allocated.
461 *
462 * Ideally I would convert multi-page allocations into single
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300463 * page allocations, and add everything to image->dest_pages.
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700464 *
465 * For now it is simpler to just free the pages.
466 */
467 kimage_free_page_list(&extra_pages);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700468
Maneesh Soni72414d32005-06-25 14:58:28 -0700469 return pages;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700470}
471
Maneesh Soni72414d32005-06-25 14:58:28 -0700472static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
473 unsigned int order)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700474{
475 /* Control pages are special, they are the intermediaries
476 * that are needed while we copy the rest of the pages
477 * to their final resting place. As such they must
478 * not conflict with either the destination addresses
479 * or memory the kernel is already using.
480 *
481 * Control pages are also the only pags we must allocate
482 * when loading a crash kernel. All of the other pages
483 * are specified by the segments and we just memcpy
484 * into them directly.
485 *
486 * The only case where we really need more than one of
487 * these are for architectures where we cannot disable
488 * the MMU and must instead generate an identity mapped
489 * page table for all of the memory.
490 *
491 * Given the low demand this implements a very simple
492 * allocator that finds the first hole of the appropriate
493 * size in the reserved memory region, and allocates all
494 * of the memory up to and including the hole.
495 */
496 unsigned long hole_start, hole_end, size;
497 struct page *pages;
Maneesh Soni72414d32005-06-25 14:58:28 -0700498
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700499 pages = NULL;
500 size = (1 << order) << PAGE_SHIFT;
501 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
502 hole_end = hole_start + size - 1;
Maneesh Soni72414d32005-06-25 14:58:28 -0700503 while (hole_end <= crashk_res.end) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700504 unsigned long i;
Maneesh Soni72414d32005-06-25 14:58:28 -0700505
Michael Holzheu3d214fa2011-10-30 15:16:36 +0100506 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700507 break;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700508 /* See if I overlap any of the segments */
Maneesh Soni72414d32005-06-25 14:58:28 -0700509 for (i = 0; i < image->nr_segments; i++) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700510 unsigned long mstart, mend;
Maneesh Soni72414d32005-06-25 14:58:28 -0700511
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700512 mstart = image->segment[i].mem;
513 mend = mstart + image->segment[i].memsz - 1;
514 if ((hole_end >= mstart) && (hole_start <= mend)) {
515 /* Advance the hole to the end of the segment */
516 hole_start = (mend + (size - 1)) & ~(size - 1);
517 hole_end = hole_start + size - 1;
518 break;
519 }
520 }
521 /* If I don't overlap any segments I have found my hole! */
522 if (i == image->nr_segments) {
523 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
524 break;
525 }
526 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700527 if (pages)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700528 image->control_page = hole_end;
Maneesh Soni72414d32005-06-25 14:58:28 -0700529
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700530 return pages;
531}
532
533
Maneesh Soni72414d32005-06-25 14:58:28 -0700534struct page *kimage_alloc_control_pages(struct kimage *image,
535 unsigned int order)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700536{
537 struct page *pages = NULL;
Maneesh Soni72414d32005-06-25 14:58:28 -0700538
539 switch (image->type) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700540 case KEXEC_TYPE_DEFAULT:
541 pages = kimage_alloc_normal_control_pages(image, order);
542 break;
543 case KEXEC_TYPE_CRASH:
544 pages = kimage_alloc_crash_control_pages(image, order);
545 break;
546 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700547
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700548 return pages;
549}
550
551static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
552{
Maneesh Soni72414d32005-06-25 14:58:28 -0700553 if (*image->entry != 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700554 image->entry++;
Maneesh Soni72414d32005-06-25 14:58:28 -0700555
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700556 if (image->entry == image->last_entry) {
557 kimage_entry_t *ind_page;
558 struct page *page;
Maneesh Soni72414d32005-06-25 14:58:28 -0700559
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700560 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
Maneesh Soni72414d32005-06-25 14:58:28 -0700561 if (!page)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700562 return -ENOMEM;
Maneesh Soni72414d32005-06-25 14:58:28 -0700563
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700564 ind_page = page_address(page);
565 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
566 image->entry = ind_page;
Maneesh Soni72414d32005-06-25 14:58:28 -0700567 image->last_entry = ind_page +
568 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700569 }
570 *image->entry = entry;
571 image->entry++;
572 *image->entry = 0;
Maneesh Soni72414d32005-06-25 14:58:28 -0700573
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700574 return 0;
575}
576
Maneesh Soni72414d32005-06-25 14:58:28 -0700577static int kimage_set_destination(struct kimage *image,
578 unsigned long destination)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700579{
580 int result;
581
582 destination &= PAGE_MASK;
583 result = kimage_add_entry(image, destination | IND_DESTINATION);
Maneesh Soni72414d32005-06-25 14:58:28 -0700584 if (result == 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700585 image->destination = destination;
Maneesh Soni72414d32005-06-25 14:58:28 -0700586
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700587 return result;
588}
589
590
591static int kimage_add_page(struct kimage *image, unsigned long page)
592{
593 int result;
594
595 page &= PAGE_MASK;
596 result = kimage_add_entry(image, page | IND_SOURCE);
Maneesh Soni72414d32005-06-25 14:58:28 -0700597 if (result == 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700598 image->destination += PAGE_SIZE;
Maneesh Soni72414d32005-06-25 14:58:28 -0700599
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700600 return result;
601}
602
603
604static void kimage_free_extra_pages(struct kimage *image)
605{
606 /* Walk through and free any extra destination pages I may have */
607 kimage_free_page_list(&image->dest_pages);
608
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300609 /* Walk through and free any unusable pages I have cached */
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700610 kimage_free_page_list(&image->unuseable_pages);
611
612}
WANG Cong7fccf032008-07-25 19:45:02 -0700613static void kimage_terminate(struct kimage *image)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700614{
Maneesh Soni72414d32005-06-25 14:58:28 -0700615 if (*image->entry != 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700616 image->entry++;
Maneesh Soni72414d32005-06-25 14:58:28 -0700617
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700618 *image->entry = IND_DONE;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700619}
620
621#define for_each_kimage_entry(image, ptr, entry) \
622 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
623 ptr = (entry & IND_INDIRECTION)? \
624 phys_to_virt((entry & PAGE_MASK)): ptr +1)
625
626static void kimage_free_entry(kimage_entry_t entry)
627{
628 struct page *page;
629
630 page = pfn_to_page(entry >> PAGE_SHIFT);
631 kimage_free_pages(page);
632}
633
634static void kimage_free(struct kimage *image)
635{
636 kimage_entry_t *ptr, entry;
637 kimage_entry_t ind = 0;
638
639 if (!image)
640 return;
Maneesh Soni72414d32005-06-25 14:58:28 -0700641
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700642 kimage_free_extra_pages(image);
643 for_each_kimage_entry(image, ptr, entry) {
644 if (entry & IND_INDIRECTION) {
645 /* Free the previous indirection page */
Maneesh Soni72414d32005-06-25 14:58:28 -0700646 if (ind & IND_INDIRECTION)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700647 kimage_free_entry(ind);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700648 /* Save this indirection page until we are
649 * done with it.
650 */
651 ind = entry;
652 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700653 else if (entry & IND_SOURCE)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700654 kimage_free_entry(entry);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700655 }
656 /* Free the final indirection page */
Maneesh Soni72414d32005-06-25 14:58:28 -0700657 if (ind & IND_INDIRECTION)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700658 kimage_free_entry(ind);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700659
660 /* Handle any machine specific cleanup */
661 machine_kexec_cleanup(image);
662
663 /* Free the kexec control pages... */
664 kimage_free_page_list(&image->control_pages);
665 kfree(image);
666}
667
Maneesh Soni72414d32005-06-25 14:58:28 -0700668static kimage_entry_t *kimage_dst_used(struct kimage *image,
669 unsigned long page)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700670{
671 kimage_entry_t *ptr, entry;
672 unsigned long destination = 0;
673
674 for_each_kimage_entry(image, ptr, entry) {
Maneesh Soni72414d32005-06-25 14:58:28 -0700675 if (entry & IND_DESTINATION)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700676 destination = entry & PAGE_MASK;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700677 else if (entry & IND_SOURCE) {
Maneesh Soni72414d32005-06-25 14:58:28 -0700678 if (page == destination)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700679 return ptr;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700680 destination += PAGE_SIZE;
681 }
682 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700683
Alexey Dobriyan314b6a42005-06-27 22:29:33 -0700684 return NULL;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700685}
686
Maneesh Soni72414d32005-06-25 14:58:28 -0700687static struct page *kimage_alloc_page(struct kimage *image,
Al Viro9796fdd2005-10-21 03:22:03 -0400688 gfp_t gfp_mask,
Maneesh Soni72414d32005-06-25 14:58:28 -0700689 unsigned long destination)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700690{
691 /*
692 * Here we implement safeguards to ensure that a source page
693 * is not copied to its destination page before the data on
694 * the destination page is no longer useful.
695 *
696 * To do this we maintain the invariant that a source page is
697 * either its own destination page, or it is not a
698 * destination page at all.
699 *
700 * That is slightly stronger than required, but the proof
701 * that no problems will not occur is trivial, and the
702 * implementation is simply to verify.
703 *
704 * When allocating all pages normally this algorithm will run
705 * in O(N) time, but in the worst case it will run in O(N^2)
706 * time. If the runtime is a problem the data structures can
707 * be fixed.
708 */
709 struct page *page;
710 unsigned long addr;
711
712 /*
713 * Walk through the list of destination pages, and see if I
714 * have a match.
715 */
716 list_for_each_entry(page, &image->dest_pages, lru) {
717 addr = page_to_pfn(page) << PAGE_SHIFT;
718 if (addr == destination) {
719 list_del(&page->lru);
720 return page;
721 }
722 }
723 page = NULL;
724 while (1) {
725 kimage_entry_t *old;
726
727 /* Allocate a page, if we run out of memory give up */
728 page = kimage_alloc_pages(gfp_mask, 0);
Maneesh Soni72414d32005-06-25 14:58:28 -0700729 if (!page)
Alexey Dobriyan314b6a42005-06-27 22:29:33 -0700730 return NULL;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700731 /* If the page cannot be used file it away */
Maneesh Soni72414d32005-06-25 14:58:28 -0700732 if (page_to_pfn(page) >
733 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700734 list_add(&page->lru, &image->unuseable_pages);
735 continue;
736 }
737 addr = page_to_pfn(page) << PAGE_SHIFT;
738
739 /* If it is the destination page we want use it */
740 if (addr == destination)
741 break;
742
743 /* If the page is not a destination page use it */
Maneesh Soni72414d32005-06-25 14:58:28 -0700744 if (!kimage_is_destination_range(image, addr,
745 addr + PAGE_SIZE))
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700746 break;
747
748 /*
749 * I know that the page is someones destination page.
750 * See if there is already a source page for this
751 * destination page. And if so swap the source pages.
752 */
753 old = kimage_dst_used(image, addr);
754 if (old) {
755 /* If so move it */
756 unsigned long old_addr;
757 struct page *old_page;
758
759 old_addr = *old & PAGE_MASK;
760 old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
761 copy_highpage(page, old_page);
762 *old = addr | (*old & ~PAGE_MASK);
763
764 /* The old page I have found cannot be a
Jonathan Steelf9092f32008-09-22 13:57:45 -0700765 * destination page, so return it if it's
766 * gfp_flags honor the ones passed in.
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700767 */
Jonathan Steelf9092f32008-09-22 13:57:45 -0700768 if (!(gfp_mask & __GFP_HIGHMEM) &&
769 PageHighMem(old_page)) {
770 kimage_free_pages(old_page);
771 continue;
772 }
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700773 addr = old_addr;
774 page = old_page;
775 break;
776 }
777 else {
778 /* Place the page on the destination list I
779 * will use it later.
780 */
781 list_add(&page->lru, &image->dest_pages);
782 }
783 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700784
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700785 return page;
786}
787
788static int kimage_load_normal_segment(struct kimage *image,
Maneesh Soni72414d32005-06-25 14:58:28 -0700789 struct kexec_segment *segment)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700790{
791 unsigned long maddr;
Zhang Yanfei310faaa2013-04-30 15:28:21 -0700792 size_t ubytes, mbytes;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700793 int result;
Alexey Dobriyan314b6a42005-06-27 22:29:33 -0700794 unsigned char __user *buf;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700795
796 result = 0;
797 buf = segment->buf;
798 ubytes = segment->bufsz;
799 mbytes = segment->memsz;
800 maddr = segment->mem;
801
802 result = kimage_set_destination(image, maddr);
Maneesh Soni72414d32005-06-25 14:58:28 -0700803 if (result < 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700804 goto out;
Maneesh Soni72414d32005-06-25 14:58:28 -0700805
806 while (mbytes) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700807 struct page *page;
808 char *ptr;
809 size_t uchunk, mchunk;
Maneesh Soni72414d32005-06-25 14:58:28 -0700810
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700811 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
Stephen Hemmingerc80544d2007-10-18 03:07:05 -0700812 if (!page) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700813 result = -ENOMEM;
814 goto out;
815 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700816 result = kimage_add_page(image, page_to_pfn(page)
817 << PAGE_SHIFT);
818 if (result < 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700819 goto out;
Maneesh Soni72414d32005-06-25 14:58:28 -0700820
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700821 ptr = kmap(page);
822 /* Start with a clear page */
Jan Beulich3ecb01d2010-10-26 14:22:27 -0700823 clear_page(ptr);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700824 ptr += maddr & ~PAGE_MASK;
Zhang Yanfei31c3a3f2013-04-30 15:28:23 -0700825 mchunk = min_t(size_t, mbytes,
826 PAGE_SIZE - (maddr & ~PAGE_MASK));
827 uchunk = min(ubytes, mchunk);
Maneesh Soni72414d32005-06-25 14:58:28 -0700828
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700829 result = copy_from_user(ptr, buf, uchunk);
830 kunmap(page);
831 if (result) {
Dan Carpenterf65a03f2010-08-10 18:03:31 -0700832 result = -EFAULT;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700833 goto out;
834 }
835 ubytes -= uchunk;
836 maddr += mchunk;
837 buf += mchunk;
838 mbytes -= mchunk;
839 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700840out:
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700841 return result;
842}
843
844static int kimage_load_crash_segment(struct kimage *image,
Maneesh Soni72414d32005-06-25 14:58:28 -0700845 struct kexec_segment *segment)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700846{
847 /* For crash dumps kernels we simply copy the data from
848 * user space to it's destination.
849 * We do things a page at a time for the sake of kmap.
850 */
851 unsigned long maddr;
Zhang Yanfei310faaa2013-04-30 15:28:21 -0700852 size_t ubytes, mbytes;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700853 int result;
Alexey Dobriyan314b6a42005-06-27 22:29:33 -0700854 unsigned char __user *buf;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700855
856 result = 0;
857 buf = segment->buf;
858 ubytes = segment->bufsz;
859 mbytes = segment->memsz;
860 maddr = segment->mem;
Maneesh Soni72414d32005-06-25 14:58:28 -0700861 while (mbytes) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700862 struct page *page;
863 char *ptr;
864 size_t uchunk, mchunk;
Maneesh Soni72414d32005-06-25 14:58:28 -0700865
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700866 page = pfn_to_page(maddr >> PAGE_SHIFT);
Stephen Hemmingerc80544d2007-10-18 03:07:05 -0700867 if (!page) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700868 result = -ENOMEM;
869 goto out;
870 }
871 ptr = kmap(page);
872 ptr += maddr & ~PAGE_MASK;
Zhang Yanfei31c3a3f2013-04-30 15:28:23 -0700873 mchunk = min_t(size_t, mbytes,
874 PAGE_SIZE - (maddr & ~PAGE_MASK));
875 uchunk = min(ubytes, mchunk);
876 if (mchunk > uchunk) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700877 /* Zero the trailing part of the page */
878 memset(ptr + uchunk, 0, mchunk - uchunk);
879 }
880 result = copy_from_user(ptr, buf, uchunk);
Zou Nan haia79561132006-12-07 09:51:35 -0800881 kexec_flush_icache_page(page);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700882 kunmap(page);
883 if (result) {
Dan Carpenterf65a03f2010-08-10 18:03:31 -0700884 result = -EFAULT;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700885 goto out;
886 }
887 ubytes -= uchunk;
888 maddr += mchunk;
889 buf += mchunk;
890 mbytes -= mchunk;
891 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700892out:
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700893 return result;
894}
895
896static int kimage_load_segment(struct kimage *image,
Maneesh Soni72414d32005-06-25 14:58:28 -0700897 struct kexec_segment *segment)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700898{
899 int result = -ENOMEM;
Maneesh Soni72414d32005-06-25 14:58:28 -0700900
901 switch (image->type) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700902 case KEXEC_TYPE_DEFAULT:
903 result = kimage_load_normal_segment(image, segment);
904 break;
905 case KEXEC_TYPE_CRASH:
906 result = kimage_load_crash_segment(image, segment);
907 break;
908 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700909
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700910 return result;
911}
912
913/*
914 * Exec Kernel system call: for obvious reasons only root may call it.
915 *
916 * This call breaks up into three pieces.
917 * - A generic part which loads the new kernel from the current
918 * address space, and very carefully places the data in the
919 * allocated pages.
920 *
921 * - A generic part that interacts with the kernel and tells all of
922 * the devices to shut down. Preventing on-going dmas, and placing
923 * the devices in a consistent state so a later kernel can
924 * reinitialize them.
925 *
926 * - A machine specific part that includes the syscall number
Geert Uytterhoeven002ace72013-09-15 11:35:37 +0200927 * and then copies the image to it's final destination. And
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700928 * jumps into the image at entry.
929 *
930 * kexec does not sync, or unmount filesystems so if you need
931 * that to happen you need to do that yourself.
932 */
Jeff Moyerc330dda2006-06-23 02:05:07 -0700933struct kimage *kexec_image;
934struct kimage *kexec_crash_image;
Kees Cook79847542014-01-23 15:55:59 -0800935int kexec_load_disabled;
Andrew Morton8c5a1cf2008-08-15 00:40:27 -0700936
937static DEFINE_MUTEX(kexec_mutex);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700938
Heiko Carstens754fe8d2009-01-14 14:14:09 +0100939SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
940 struct kexec_segment __user *, segments, unsigned long, flags)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700941{
942 struct kimage **dest_image, *image;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700943 int result;
944
945 /* We only trust the superuser with rebooting the system. */
Kees Cook79847542014-01-23 15:55:59 -0800946 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700947 return -EPERM;
948
949 /*
950 * Verify we have a legal set of flags
951 * This leaves us room for future extensions.
952 */
953 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
954 return -EINVAL;
955
956 /* Verify we are on the appropriate architecture */
957 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
958 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700959 return -EINVAL;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700960
961 /* Put an artificial cap on the number
962 * of segments passed to kexec_load.
963 */
964 if (nr_segments > KEXEC_SEGMENT_MAX)
965 return -EINVAL;
966
967 image = NULL;
968 result = 0;
969
970 /* Because we write directly to the reserved memory
971 * region when loading crash kernels we need a mutex here to
972 * prevent multiple crash kernels from attempting to load
973 * simultaneously, and to prevent a crash kernel from loading
974 * over the top of a in use crash kernel.
975 *
976 * KISS: always take the mutex.
977 */
Andrew Morton8c5a1cf2008-08-15 00:40:27 -0700978 if (!mutex_trylock(&kexec_mutex))
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700979 return -EBUSY;
Maneesh Soni72414d32005-06-25 14:58:28 -0700980
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700981 dest_image = &kexec_image;
Maneesh Soni72414d32005-06-25 14:58:28 -0700982 if (flags & KEXEC_ON_CRASH)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700983 dest_image = &kexec_crash_image;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700984 if (nr_segments > 0) {
985 unsigned long i;
Maneesh Soni72414d32005-06-25 14:58:28 -0700986
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700987 /* Loading another kernel to reboot into */
Maneesh Soni72414d32005-06-25 14:58:28 -0700988 if ((flags & KEXEC_ON_CRASH) == 0)
989 result = kimage_normal_alloc(&image, entry,
990 nr_segments, segments);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700991 /* Loading another kernel to switch to if this one crashes */
992 else if (flags & KEXEC_ON_CRASH) {
993 /* Free any current crash dump kernel before
994 * we corrupt it.
995 */
996 kimage_free(xchg(&kexec_crash_image, NULL));
Maneesh Soni72414d32005-06-25 14:58:28 -0700997 result = kimage_crash_alloc(&image, entry,
998 nr_segments, segments);
Michael Holzheu558df722011-10-30 15:16:43 +0100999 crash_map_reserved_pages();
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001000 }
Maneesh Soni72414d32005-06-25 14:58:28 -07001001 if (result)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001002 goto out;
Maneesh Soni72414d32005-06-25 14:58:28 -07001003
Huang Ying3ab83522008-07-25 19:45:07 -07001004 if (flags & KEXEC_PRESERVE_CONTEXT)
1005 image->preserve_context = 1;
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001006 result = machine_kexec_prepare(image);
Maneesh Soni72414d32005-06-25 14:58:28 -07001007 if (result)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001008 goto out;
Maneesh Soni72414d32005-06-25 14:58:28 -07001009
1010 for (i = 0; i < nr_segments; i++) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001011 result = kimage_load_segment(image, &image->segment[i]);
Maneesh Soni72414d32005-06-25 14:58:28 -07001012 if (result)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001013 goto out;
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001014 }
WANG Cong7fccf032008-07-25 19:45:02 -07001015 kimage_terminate(image);
Michael Holzheu558df722011-10-30 15:16:43 +01001016 if (flags & KEXEC_ON_CRASH)
1017 crash_unmap_reserved_pages();
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001018 }
1019 /* Install the new kernel, and Uninstall the old */
1020 image = xchg(dest_image, image);
1021
Maneesh Soni72414d32005-06-25 14:58:28 -07001022out:
Andrew Morton8c5a1cf2008-08-15 00:40:27 -07001023 mutex_unlock(&kexec_mutex);
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001024 kimage_free(image);
Maneesh Soni72414d32005-06-25 14:58:28 -07001025
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001026 return result;
1027}
1028
Michael Holzheu558df722011-10-30 15:16:43 +01001029/*
1030 * Add and remove page tables for crashkernel memory
1031 *
1032 * Provide an empty default implementation here -- architecture
1033 * code may override this
1034 */
1035void __weak crash_map_reserved_pages(void)
1036{}
1037
1038void __weak crash_unmap_reserved_pages(void)
1039{}
1040
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001041#ifdef CONFIG_COMPAT
Heiko Carstensca2c4052014-03-04 17:13:42 +01001042COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
1043 compat_ulong_t, nr_segments,
1044 struct compat_kexec_segment __user *, segments,
1045 compat_ulong_t, flags)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001046{
1047 struct compat_kexec_segment in;
1048 struct kexec_segment out, __user *ksegments;
1049 unsigned long i, result;
1050
1051 /* Don't allow clients that don't understand the native
1052 * architecture to do anything.
1053 */
Maneesh Soni72414d32005-06-25 14:58:28 -07001054 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001055 return -EINVAL;
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001056
Maneesh Soni72414d32005-06-25 14:58:28 -07001057 if (nr_segments > KEXEC_SEGMENT_MAX)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001058 return -EINVAL;
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001059
1060 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1061 for (i=0; i < nr_segments; i++) {
1062 result = copy_from_user(&in, &segments[i], sizeof(in));
Maneesh Soni72414d32005-06-25 14:58:28 -07001063 if (result)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001064 return -EFAULT;
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001065
1066 out.buf = compat_ptr(in.buf);
1067 out.bufsz = in.bufsz;
1068 out.mem = in.mem;
1069 out.memsz = in.memsz;
1070
1071 result = copy_to_user(&ksegments[i], &out, sizeof(out));
Maneesh Soni72414d32005-06-25 14:58:28 -07001072 if (result)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001073 return -EFAULT;
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001074 }
1075
1076 return sys_kexec_load(entry, nr_segments, ksegments, flags);
1077}
1078#endif
1079
Alexander Nyberg6e274d12005-06-25 14:58:26 -07001080void crash_kexec(struct pt_regs *regs)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001081{
Andrew Morton8c5a1cf2008-08-15 00:40:27 -07001082 /* Take the kexec_mutex here to prevent sys_kexec_load
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001083 * running on one cpu from replacing the crash kernel
1084 * we are using after a panic on a different cpu.
1085 *
1086 * If the crash kernel was not located in a fixed area
1087 * of memory the xchg(&kexec_crash_image) would be
1088 * sufficient. But since I reuse the memory...
1089 */
Andrew Morton8c5a1cf2008-08-15 00:40:27 -07001090 if (mutex_trylock(&kexec_mutex)) {
David Wilderc0ce7d02006-06-23 15:29:34 -07001091 if (kexec_crash_image) {
Vivek Goyale996e582006-01-09 20:51:44 -08001092 struct pt_regs fixed_regs;
KOSAKI Motohiro0f4bd462009-12-22 03:15:43 +00001093
Vivek Goyale996e582006-01-09 20:51:44 -08001094 crash_setup_regs(&fixed_regs, regs);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001095 crash_save_vmcoreinfo();
Vivek Goyale996e582006-01-09 20:51:44 -08001096 machine_crash_shutdown(&fixed_regs);
David Wilderc0ce7d02006-06-23 15:29:34 -07001097 machine_kexec(kexec_crash_image);
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001098 }
Andrew Morton8c5a1cf2008-08-15 00:40:27 -07001099 mutex_unlock(&kexec_mutex);
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001100 }
1101}
Vivek Goyalcc571652006-01-09 20:51:41 -08001102
Amerigo Wang06a7f712009-12-15 16:47:46 -08001103size_t crash_get_memory_size(void)
1104{
Pavan Naregundie05bd332010-06-29 15:05:28 -07001105 size_t size = 0;
Amerigo Wang06a7f712009-12-15 16:47:46 -08001106 mutex_lock(&kexec_mutex);
Pavan Naregundie05bd332010-06-29 15:05:28 -07001107 if (crashk_res.end != crashk_res.start)
Joe Perches28f65c112011-06-09 09:13:32 -07001108 size = resource_size(&crashk_res);
Amerigo Wang06a7f712009-12-15 16:47:46 -08001109 mutex_unlock(&kexec_mutex);
1110 return size;
1111}
1112
Anton Blanchardc0bb9e42010-08-25 10:22:58 +10001113void __weak crash_free_reserved_phys_range(unsigned long begin,
1114 unsigned long end)
Amerigo Wang06a7f712009-12-15 16:47:46 -08001115{
1116 unsigned long addr;
1117
Jiang Liue07cee22013-04-29 15:06:58 -07001118 for (addr = begin; addr < end; addr += PAGE_SIZE)
1119 free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
Amerigo Wang06a7f712009-12-15 16:47:46 -08001120}
1121
1122int crash_shrink_memory(unsigned long new_size)
1123{
1124 int ret = 0;
1125 unsigned long start, end;
Michael Holzheubec013c2012-01-12 17:20:15 -08001126 unsigned long old_size;
Michael Holzheu6480e5a2012-01-12 17:20:14 -08001127 struct resource *ram_res;
Amerigo Wang06a7f712009-12-15 16:47:46 -08001128
1129 mutex_lock(&kexec_mutex);
1130
1131 if (kexec_crash_image) {
1132 ret = -ENOENT;
1133 goto unlock;
1134 }
1135 start = crashk_res.start;
1136 end = crashk_res.end;
Michael Holzheubec013c2012-01-12 17:20:15 -08001137 old_size = (end == 0) ? 0 : end - start + 1;
1138 if (new_size >= old_size) {
1139 ret = (new_size == old_size) ? 0 : -EINVAL;
Amerigo Wang06a7f712009-12-15 16:47:46 -08001140 goto unlock;
1141 }
1142
Michael Holzheu6480e5a2012-01-12 17:20:14 -08001143 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1144 if (!ram_res) {
1145 ret = -ENOMEM;
1146 goto unlock;
1147 }
1148
Michael Holzheu558df722011-10-30 15:16:43 +01001149 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1150 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
Amerigo Wang06a7f712009-12-15 16:47:46 -08001151
Michael Holzheu558df722011-10-30 15:16:43 +01001152 crash_map_reserved_pages();
Anton Blanchardc0bb9e42010-08-25 10:22:58 +10001153 crash_free_reserved_phys_range(end, crashk_res.end);
Amerigo Wang06a7f712009-12-15 16:47:46 -08001154
Pavan Naregundie05bd332010-06-29 15:05:28 -07001155 if ((start == end) && (crashk_res.parent != NULL))
Amerigo Wang06a7f712009-12-15 16:47:46 -08001156 release_resource(&crashk_res);
Michael Holzheu6480e5a2012-01-12 17:20:14 -08001157
1158 ram_res->start = end;
1159 ram_res->end = crashk_res.end;
1160 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1161 ram_res->name = "System RAM";
1162
Vitaly Mayatskikh475f9aa2010-05-11 14:06:51 -07001163 crashk_res.end = end - 1;
Michael Holzheu6480e5a2012-01-12 17:20:14 -08001164
1165 insert_resource(&iomem_resource, ram_res);
Michael Holzheu558df722011-10-30 15:16:43 +01001166 crash_unmap_reserved_pages();
Amerigo Wang06a7f712009-12-15 16:47:46 -08001167
1168unlock:
1169 mutex_unlock(&kexec_mutex);
1170 return ret;
1171}
1172
Magnus Damm85916f82006-12-06 20:40:41 -08001173static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1174 size_t data_len)
1175{
1176 struct elf_note note;
1177
1178 note.n_namesz = strlen(name) + 1;
1179 note.n_descsz = data_len;
1180 note.n_type = type;
1181 memcpy(buf, &note, sizeof(note));
1182 buf += (sizeof(note) + 3)/4;
1183 memcpy(buf, name, note.n_namesz);
1184 buf += (note.n_namesz + 3)/4;
1185 memcpy(buf, data, note.n_descsz);
1186 buf += (note.n_descsz + 3)/4;
1187
1188 return buf;
1189}
1190
1191static void final_note(u32 *buf)
1192{
1193 struct elf_note note;
1194
1195 note.n_namesz = 0;
1196 note.n_descsz = 0;
1197 note.n_type = 0;
1198 memcpy(buf, &note, sizeof(note));
1199}
1200
1201void crash_save_cpu(struct pt_regs *regs, int cpu)
1202{
1203 struct elf_prstatus prstatus;
1204 u32 *buf;
1205
Rusty Russell4f4b6c12009-01-01 10:12:15 +10301206 if ((cpu < 0) || (cpu >= nr_cpu_ids))
Magnus Damm85916f82006-12-06 20:40:41 -08001207 return;
1208
1209 /* Using ELF notes here is opportunistic.
1210 * I need a well defined structure format
1211 * for the data I pass, and I need tags
1212 * on the data to indicate what information I have
1213 * squirrelled away. ELF notes happen to provide
1214 * all of that, so there is no need to invent something new.
1215 */
1216 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1217 if (!buf)
1218 return;
1219 memset(&prstatus, 0, sizeof(prstatus));
1220 prstatus.pr_pid = current->pid;
Tejun Heo6cd61c02009-02-09 22:17:39 +09001221 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
Simon Horman6672f762007-05-08 00:28:22 -07001222 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1223 &prstatus, sizeof(prstatus));
Magnus Damm85916f82006-12-06 20:40:41 -08001224 final_note(buf);
1225}
1226
Vivek Goyalcc571652006-01-09 20:51:41 -08001227static int __init crash_notes_memory_init(void)
1228{
1229 /* Allocate memory for saving cpu registers. */
1230 crash_notes = alloc_percpu(note_buf_t);
1231 if (!crash_notes) {
1232 printk("Kexec: Memory allocation for saving cpu register"
1233 " states failed\n");
1234 return -ENOMEM;
1235 }
1236 return 0;
1237}
1238module_init(crash_notes_memory_init)
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001239
Bernhard Wallecba63c32007-10-18 23:40:58 -07001240
1241/*
1242 * parsing the "crashkernel" commandline
1243 *
1244 * this code is intended to be called from architecture specific code
1245 */
1246
1247
1248/*
1249 * This function parses command lines in the format
1250 *
1251 * crashkernel=ramsize-range:size[,...][@offset]
1252 *
1253 * The function returns 0 on success and -EINVAL on failure.
1254 */
1255static int __init parse_crashkernel_mem(char *cmdline,
1256 unsigned long long system_ram,
1257 unsigned long long *crash_size,
1258 unsigned long long *crash_base)
1259{
1260 char *cur = cmdline, *tmp;
1261
1262 /* for each entry of the comma-separated list */
1263 do {
1264 unsigned long long start, end = ULLONG_MAX, size;
1265
1266 /* get the start of the range */
1267 start = memparse(cur, &tmp);
1268 if (cur == tmp) {
1269 pr_warning("crashkernel: Memory value expected\n");
1270 return -EINVAL;
1271 }
1272 cur = tmp;
1273 if (*cur != '-') {
1274 pr_warning("crashkernel: '-' expected\n");
1275 return -EINVAL;
1276 }
1277 cur++;
1278
1279 /* if no ':' is here, than we read the end */
1280 if (*cur != ':') {
1281 end = memparse(cur, &tmp);
1282 if (cur == tmp) {
1283 pr_warning("crashkernel: Memory "
1284 "value expected\n");
1285 return -EINVAL;
1286 }
1287 cur = tmp;
1288 if (end <= start) {
1289 pr_warning("crashkernel: end <= start\n");
1290 return -EINVAL;
1291 }
1292 }
1293
1294 if (*cur != ':') {
1295 pr_warning("crashkernel: ':' expected\n");
1296 return -EINVAL;
1297 }
1298 cur++;
1299
1300 size = memparse(cur, &tmp);
1301 if (cur == tmp) {
1302 pr_warning("Memory value expected\n");
1303 return -EINVAL;
1304 }
1305 cur = tmp;
1306 if (size >= system_ram) {
1307 pr_warning("crashkernel: invalid size\n");
1308 return -EINVAL;
1309 }
1310
1311 /* match ? */
Michael Ellermanbe089d792008-05-01 04:34:49 -07001312 if (system_ram >= start && system_ram < end) {
Bernhard Wallecba63c32007-10-18 23:40:58 -07001313 *crash_size = size;
1314 break;
1315 }
1316 } while (*cur++ == ',');
1317
1318 if (*crash_size > 0) {
Hidetoshi Seto11c7da42009-07-29 15:02:08 -07001319 while (*cur && *cur != ' ' && *cur != '@')
Bernhard Wallecba63c32007-10-18 23:40:58 -07001320 cur++;
1321 if (*cur == '@') {
1322 cur++;
1323 *crash_base = memparse(cur, &tmp);
1324 if (cur == tmp) {
1325 pr_warning("Memory value expected "
1326 "after '@'\n");
1327 return -EINVAL;
1328 }
1329 }
1330 }
1331
1332 return 0;
1333}
1334
1335/*
1336 * That function parses "simple" (old) crashkernel command lines like
1337 *
1338 * crashkernel=size[@offset]
1339 *
1340 * It returns 0 on success and -EINVAL on failure.
1341 */
1342static int __init parse_crashkernel_simple(char *cmdline,
1343 unsigned long long *crash_size,
1344 unsigned long long *crash_base)
1345{
1346 char *cur = cmdline;
1347
1348 *crash_size = memparse(cmdline, &cur);
1349 if (cmdline == cur) {
1350 pr_warning("crashkernel: memory value expected\n");
1351 return -EINVAL;
1352 }
1353
1354 if (*cur == '@')
1355 *crash_base = memparse(cur+1, &cur);
Zhenzhong Duaneaa3be62012-03-28 14:42:47 -07001356 else if (*cur != ' ' && *cur != '\0') {
1357 pr_warning("crashkernel: unrecognized char\n");
1358 return -EINVAL;
1359 }
Bernhard Wallecba63c32007-10-18 23:40:58 -07001360
1361 return 0;
1362}
1363
Yinghai Luadbc7422013-04-15 22:23:48 -07001364#define SUFFIX_HIGH 0
1365#define SUFFIX_LOW 1
1366#define SUFFIX_NULL 2
1367static __initdata char *suffix_tbl[] = {
1368 [SUFFIX_HIGH] = ",high",
1369 [SUFFIX_LOW] = ",low",
1370 [SUFFIX_NULL] = NULL,
1371};
1372
Bernhard Wallecba63c32007-10-18 23:40:58 -07001373/*
Yinghai Luadbc7422013-04-15 22:23:48 -07001374 * That function parses "suffix" crashkernel command lines like
1375 *
1376 * crashkernel=size,[high|low]
1377 *
1378 * It returns 0 on success and -EINVAL on failure.
Bernhard Wallecba63c32007-10-18 23:40:58 -07001379 */
Yinghai Luadbc7422013-04-15 22:23:48 -07001380static int __init parse_crashkernel_suffix(char *cmdline,
1381 unsigned long long *crash_size,
1382 unsigned long long *crash_base,
1383 const char *suffix)
1384{
1385 char *cur = cmdline;
1386
1387 *crash_size = memparse(cmdline, &cur);
1388 if (cmdline == cur) {
1389 pr_warn("crashkernel: memory value expected\n");
1390 return -EINVAL;
1391 }
1392
1393 /* check with suffix */
1394 if (strncmp(cur, suffix, strlen(suffix))) {
1395 pr_warn("crashkernel: unrecognized char\n");
1396 return -EINVAL;
1397 }
1398 cur += strlen(suffix);
1399 if (*cur != ' ' && *cur != '\0') {
1400 pr_warn("crashkernel: unrecognized char\n");
1401 return -EINVAL;
1402 }
1403
1404 return 0;
1405}
1406
1407static __init char *get_last_crashkernel(char *cmdline,
1408 const char *name,
1409 const char *suffix)
1410{
1411 char *p = cmdline, *ck_cmdline = NULL;
1412
1413 /* find crashkernel and use the last one if there are more */
1414 p = strstr(p, name);
1415 while (p) {
1416 char *end_p = strchr(p, ' ');
1417 char *q;
1418
1419 if (!end_p)
1420 end_p = p + strlen(p);
1421
1422 if (!suffix) {
1423 int i;
1424
1425 /* skip the one with any known suffix */
1426 for (i = 0; suffix_tbl[i]; i++) {
1427 q = end_p - strlen(suffix_tbl[i]);
1428 if (!strncmp(q, suffix_tbl[i],
1429 strlen(suffix_tbl[i])))
1430 goto next;
1431 }
1432 ck_cmdline = p;
1433 } else {
1434 q = end_p - strlen(suffix);
1435 if (!strncmp(q, suffix, strlen(suffix)))
1436 ck_cmdline = p;
1437 }
1438next:
1439 p = strstr(p+1, name);
1440 }
1441
1442 if (!ck_cmdline)
1443 return NULL;
1444
1445 return ck_cmdline;
1446}
1447
Yinghai Lu0212f912013-01-24 12:20:11 -08001448static int __init __parse_crashkernel(char *cmdline,
Bernhard Wallecba63c32007-10-18 23:40:58 -07001449 unsigned long long system_ram,
1450 unsigned long long *crash_size,
Yinghai Lu0212f912013-01-24 12:20:11 -08001451 unsigned long long *crash_base,
Yinghai Luadbc7422013-04-15 22:23:48 -07001452 const char *name,
1453 const char *suffix)
Bernhard Wallecba63c32007-10-18 23:40:58 -07001454{
Bernhard Wallecba63c32007-10-18 23:40:58 -07001455 char *first_colon, *first_space;
Yinghai Luadbc7422013-04-15 22:23:48 -07001456 char *ck_cmdline;
Bernhard Wallecba63c32007-10-18 23:40:58 -07001457
1458 BUG_ON(!crash_size || !crash_base);
1459 *crash_size = 0;
1460 *crash_base = 0;
1461
Yinghai Luadbc7422013-04-15 22:23:48 -07001462 ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
Bernhard Wallecba63c32007-10-18 23:40:58 -07001463
1464 if (!ck_cmdline)
1465 return -EINVAL;
1466
Yinghai Lu0212f912013-01-24 12:20:11 -08001467 ck_cmdline += strlen(name);
Bernhard Wallecba63c32007-10-18 23:40:58 -07001468
Yinghai Luadbc7422013-04-15 22:23:48 -07001469 if (suffix)
1470 return parse_crashkernel_suffix(ck_cmdline, crash_size,
1471 crash_base, suffix);
Bernhard Wallecba63c32007-10-18 23:40:58 -07001472 /*
1473 * if the commandline contains a ':', then that's the extended
1474 * syntax -- if not, it must be the classic syntax
1475 */
1476 first_colon = strchr(ck_cmdline, ':');
1477 first_space = strchr(ck_cmdline, ' ');
1478 if (first_colon && (!first_space || first_colon < first_space))
1479 return parse_crashkernel_mem(ck_cmdline, system_ram,
1480 crash_size, crash_base);
Bernhard Wallecba63c32007-10-18 23:40:58 -07001481
Xishi Qiu80c74f62013-09-11 14:24:47 -07001482 return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
Bernhard Wallecba63c32007-10-18 23:40:58 -07001483}
1484
Yinghai Luadbc7422013-04-15 22:23:48 -07001485/*
1486 * That function is the entry point for command line parsing and should be
1487 * called from the arch-specific code.
1488 */
Yinghai Lu0212f912013-01-24 12:20:11 -08001489int __init parse_crashkernel(char *cmdline,
1490 unsigned long long system_ram,
1491 unsigned long long *crash_size,
1492 unsigned long long *crash_base)
1493{
1494 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
Yinghai Luadbc7422013-04-15 22:23:48 -07001495 "crashkernel=", NULL);
Yinghai Lu0212f912013-01-24 12:20:11 -08001496}
1497
Yinghai Lu55a20ee2013-04-15 22:23:47 -07001498int __init parse_crashkernel_high(char *cmdline,
1499 unsigned long long system_ram,
1500 unsigned long long *crash_size,
1501 unsigned long long *crash_base)
1502{
1503 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
Yinghai Luadbc7422013-04-15 22:23:48 -07001504 "crashkernel=", suffix_tbl[SUFFIX_HIGH]);
Yinghai Lu55a20ee2013-04-15 22:23:47 -07001505}
1506
Yinghai Lu0212f912013-01-24 12:20:11 -08001507int __init parse_crashkernel_low(char *cmdline,
1508 unsigned long long system_ram,
1509 unsigned long long *crash_size,
1510 unsigned long long *crash_base)
1511{
1512 return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
Yinghai Luadbc7422013-04-15 22:23:48 -07001513 "crashkernel=", suffix_tbl[SUFFIX_LOW]);
Yinghai Lu0212f912013-01-24 12:20:11 -08001514}
Bernhard Wallecba63c32007-10-18 23:40:58 -07001515
Michael Holzheufa8ff292011-10-30 15:16:41 +01001516static void update_vmcoreinfo_note(void)
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001517{
Michael Holzheufa8ff292011-10-30 15:16:41 +01001518 u32 *buf = vmcoreinfo_note;
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001519
1520 if (!vmcoreinfo_size)
1521 return;
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001522 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1523 vmcoreinfo_size);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001524 final_note(buf);
1525}
1526
Michael Holzheufa8ff292011-10-30 15:16:41 +01001527void crash_save_vmcoreinfo(void)
1528{
Vivek Goyal63dca8d2012-07-30 14:42:36 -07001529 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
Michael Holzheufa8ff292011-10-30 15:16:41 +01001530 update_vmcoreinfo_note();
1531}
1532
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001533void vmcoreinfo_append_str(const char *fmt, ...)
1534{
1535 va_list args;
1536 char buf[0x50];
Zhang Yanfei310faaa2013-04-30 15:28:21 -07001537 size_t r;
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001538
1539 va_start(args, fmt);
Chen Ganga19428e2014-01-27 17:07:13 -08001540 r = vscnprintf(buf, sizeof(buf), fmt, args);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001541 va_end(args);
1542
Zhang Yanfei31c3a3f2013-04-30 15:28:23 -07001543 r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001544
1545 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1546
1547 vmcoreinfo_size += r;
1548}
1549
1550/*
1551 * provide an empty default implementation here -- architecture
1552 * code may override this
1553 */
1554void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
1555{}
1556
1557unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1558{
1559 return __pa((unsigned long)(char *)&vmcoreinfo_note);
1560}
1561
1562static int __init crash_save_vmcoreinfo_init(void)
1563{
Ken'ichi Ohmichibba1f602008-02-07 00:15:22 -08001564 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1565 VMCOREINFO_PAGESIZE(PAGE_SIZE);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001566
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001567 VMCOREINFO_SYMBOL(init_uts_ns);
1568 VMCOREINFO_SYMBOL(node_online_map);
Will Deacond034cfa2012-03-28 14:42:47 -07001569#ifdef CONFIG_MMU
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001570 VMCOREINFO_SYMBOL(swapper_pg_dir);
Will Deacond034cfa2012-03-28 14:42:47 -07001571#endif
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001572 VMCOREINFO_SYMBOL(_stext);
Joonsoo Kimf1c40692013-04-29 15:07:37 -07001573 VMCOREINFO_SYMBOL(vmap_area_list);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001574
1575#ifndef CONFIG_NEED_MULTIPLE_NODES
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001576 VMCOREINFO_SYMBOL(mem_map);
1577 VMCOREINFO_SYMBOL(contig_page_data);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001578#endif
1579#ifdef CONFIG_SPARSEMEM
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001580 VMCOREINFO_SYMBOL(mem_section);
1581 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
Ken'ichi Ohmichic76f8602008-02-07 00:15:20 -08001582 VMCOREINFO_STRUCT_SIZE(mem_section);
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001583 VMCOREINFO_OFFSET(mem_section, section_mem_map);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001584#endif
Ken'ichi Ohmichic76f8602008-02-07 00:15:20 -08001585 VMCOREINFO_STRUCT_SIZE(page);
1586 VMCOREINFO_STRUCT_SIZE(pglist_data);
1587 VMCOREINFO_STRUCT_SIZE(zone);
1588 VMCOREINFO_STRUCT_SIZE(free_area);
1589 VMCOREINFO_STRUCT_SIZE(list_head);
1590 VMCOREINFO_SIZE(nodemask_t);
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001591 VMCOREINFO_OFFSET(page, flags);
1592 VMCOREINFO_OFFSET(page, _count);
1593 VMCOREINFO_OFFSET(page, mapping);
1594 VMCOREINFO_OFFSET(page, lru);
Atsushi Kumagai8d670912013-02-27 17:03:25 -08001595 VMCOREINFO_OFFSET(page, _mapcount);
1596 VMCOREINFO_OFFSET(page, private);
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001597 VMCOREINFO_OFFSET(pglist_data, node_zones);
1598 VMCOREINFO_OFFSET(pglist_data, nr_zones);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001599#ifdef CONFIG_FLAT_NODE_MEM_MAP
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001600 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001601#endif
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001602 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1603 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1604 VMCOREINFO_OFFSET(pglist_data, node_id);
1605 VMCOREINFO_OFFSET(zone, free_area);
1606 VMCOREINFO_OFFSET(zone, vm_stat);
1607 VMCOREINFO_OFFSET(zone, spanned_pages);
1608 VMCOREINFO_OFFSET(free_area, free_list);
1609 VMCOREINFO_OFFSET(list_head, next);
1610 VMCOREINFO_OFFSET(list_head, prev);
Atsushi Kumagai13ba3fc2013-04-29 15:07:40 -07001611 VMCOREINFO_OFFSET(vmap_area, va_start);
1612 VMCOREINFO_OFFSET(vmap_area, list);
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001613 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
Neil Horman04d491a2009-04-02 16:58:57 -07001614 log_buf_kexec_setup();
Ken'ichi Ohmichi83a08e72008-01-08 15:33:05 -08001615 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001616 VMCOREINFO_NUMBER(NR_FREE_PAGES);
Ken'ichi Ohmichi122c7a52008-04-28 02:13:04 -07001617 VMCOREINFO_NUMBER(PG_lru);
1618 VMCOREINFO_NUMBER(PG_private);
1619 VMCOREINFO_NUMBER(PG_swapcache);
Atsushi Kumagai8d670912013-02-27 17:03:25 -08001620 VMCOREINFO_NUMBER(PG_slab);
Mitsuhiro Tanino0d0bf662013-02-27 17:03:27 -08001621#ifdef CONFIG_MEMORY_FAILURE
1622 VMCOREINFO_NUMBER(PG_hwpoison);
1623#endif
Atsushi Kumagai8d670912013-02-27 17:03:25 -08001624 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001625
1626 arch_crash_save_vmcoreinfo();
Michael Holzheufa8ff292011-10-30 15:16:41 +01001627 update_vmcoreinfo_note();
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001628
1629 return 0;
1630}
1631
1632module_init(crash_save_vmcoreinfo_init)
Huang Ying3ab83522008-07-25 19:45:07 -07001633
Huang Ying7ade3fc2008-08-15 00:40:21 -07001634/*
1635 * Move into place and start executing a preloaded standalone
1636 * executable. If nothing was preloaded return an error.
Huang Ying3ab83522008-07-25 19:45:07 -07001637 */
1638int kernel_kexec(void)
1639{
1640 int error = 0;
1641
Andrew Morton8c5a1cf2008-08-15 00:40:27 -07001642 if (!mutex_trylock(&kexec_mutex))
Huang Ying3ab83522008-07-25 19:45:07 -07001643 return -EBUSY;
1644 if (!kexec_image) {
1645 error = -EINVAL;
1646 goto Unlock;
1647 }
1648
Huang Ying3ab83522008-07-25 19:45:07 -07001649#ifdef CONFIG_KEXEC_JUMP
Huang Ying7ade3fc2008-08-15 00:40:21 -07001650 if (kexec_image->preserve_context) {
Srivatsa S. Bhatbcda53f2011-12-07 22:29:54 +01001651 lock_system_sleep();
Huang Ying89081d12008-07-25 19:45:10 -07001652 pm_prepare_console();
1653 error = freeze_processes();
1654 if (error) {
1655 error = -EBUSY;
1656 goto Restore_console;
1657 }
1658 suspend_console();
Alan Sternd1616302009-05-24 22:05:42 +02001659 error = dpm_suspend_start(PMSG_FREEZE);
Huang Ying89081d12008-07-25 19:45:10 -07001660 if (error)
1661 goto Resume_console;
Alan Sternd1616302009-05-24 22:05:42 +02001662 /* At this point, dpm_suspend_start() has been called,
Rafael J. Wysockicf579df2012-01-29 20:38:29 +01001663 * but *not* dpm_suspend_end(). We *must* call
1664 * dpm_suspend_end() now. Otherwise, drivers for
Huang Ying89081d12008-07-25 19:45:10 -07001665 * some devices (e.g. interrupt controllers) become
1666 * desynchronized with the actual state of the
1667 * hardware at resume time, and evil weirdness ensues.
1668 */
Rafael J. Wysockicf579df2012-01-29 20:38:29 +01001669 error = dpm_suspend_end(PMSG_FREEZE);
Huang Ying89081d12008-07-25 19:45:10 -07001670 if (error)
Rafael J. Wysocki749b0af2009-03-16 22:34:35 +01001671 goto Resume_devices;
1672 error = disable_nonboot_cpus();
1673 if (error)
1674 goto Enable_cpus;
Rafael J. Wysocki2ed8d2b2009-03-16 22:34:06 +01001675 local_irq_disable();
Rafael J. Wysocki2e711c02011-04-26 19:15:07 +02001676 error = syscore_suspend();
Rafael J. Wysocki770824b2009-02-22 18:38:50 +01001677 if (error)
Rafael J. Wysocki749b0af2009-03-16 22:34:35 +01001678 goto Enable_irqs;
Huang Ying7ade3fc2008-08-15 00:40:21 -07001679 } else
Huang Ying3ab83522008-07-25 19:45:07 -07001680#endif
Huang Ying7ade3fc2008-08-15 00:40:21 -07001681 {
Khalid Aziz4fc9bbf2013-11-27 15:19:25 -07001682 kexec_in_progress = true;
Huang Yingca195b72008-08-15 00:40:24 -07001683 kernel_restart_prepare(NULL);
Vivek Goyalc97102b2013-12-18 17:08:31 -08001684 migrate_to_reboot_cpu();
Huang Ying3ab83522008-07-25 19:45:07 -07001685 printk(KERN_EMERG "Starting new kernel\n");
1686 machine_shutdown();
1687 }
1688
1689 machine_kexec(kexec_image);
1690
Huang Ying3ab83522008-07-25 19:45:07 -07001691#ifdef CONFIG_KEXEC_JUMP
Huang Ying7ade3fc2008-08-15 00:40:21 -07001692 if (kexec_image->preserve_context) {
Rafael J. Wysocki19234c02011-04-20 00:36:11 +02001693 syscore_resume();
Rafael J. Wysocki749b0af2009-03-16 22:34:35 +01001694 Enable_irqs:
Huang Ying3ab83522008-07-25 19:45:07 -07001695 local_irq_enable();
Rafael J. Wysocki749b0af2009-03-16 22:34:35 +01001696 Enable_cpus:
Huang Ying89081d12008-07-25 19:45:10 -07001697 enable_nonboot_cpus();
Rafael J. Wysockicf579df2012-01-29 20:38:29 +01001698 dpm_resume_start(PMSG_RESTORE);
Huang Ying89081d12008-07-25 19:45:10 -07001699 Resume_devices:
Alan Sternd1616302009-05-24 22:05:42 +02001700 dpm_resume_end(PMSG_RESTORE);
Huang Ying89081d12008-07-25 19:45:10 -07001701 Resume_console:
1702 resume_console();
1703 thaw_processes();
1704 Restore_console:
1705 pm_restore_console();
Srivatsa S. Bhatbcda53f2011-12-07 22:29:54 +01001706 unlock_system_sleep();
Huang Ying3ab83522008-07-25 19:45:07 -07001707 }
Huang Ying7ade3fc2008-08-15 00:40:21 -07001708#endif
Huang Ying3ab83522008-07-25 19:45:07 -07001709
1710 Unlock:
Andrew Morton8c5a1cf2008-08-15 00:40:27 -07001711 mutex_unlock(&kexec_mutex);
Huang Ying3ab83522008-07-25 19:45:07 -07001712 return error;
1713}