blob: a0d920915b38ca77f451238dc15f9d31f260bee2 [file] [log] [blame]
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001/*
2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
Randy.Dunlapc59ede72006-01-11 12:17:46 -08009#include <linux/capability.h>
Eric W. Biedermandc009d92005-06-25 14:57:52 -070010#include <linux/mm.h>
11#include <linux/file.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/kexec.h>
15#include <linux/spinlock.h>
16#include <linux/list.h>
17#include <linux/highmem.h>
18#include <linux/syscalls.h>
19#include <linux/reboot.h>
Eric W. Biedermandc009d92005-06-25 14:57:52 -070020#include <linux/ioport.h>
Alexander Nyberg6e274d12005-06-25 14:58:26 -070021#include <linux/hardirq.h>
Magnus Damm85916f82006-12-06 20:40:41 -080022#include <linux/elf.h>
23#include <linux/elfcore.h>
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -070024#include <linux/utsrelease.h>
25#include <linux/utsname.h>
26#include <linux/numa.h>
Huang Ying3ab83522008-07-25 19:45:07 -070027#include <linux/suspend.h>
28#include <linux/device.h>
Alexander Nyberg6e274d12005-06-25 14:58:26 -070029
Eric W. Biedermandc009d92005-06-25 14:57:52 -070030#include <asm/page.h>
31#include <asm/uaccess.h>
32#include <asm/io.h>
33#include <asm/system.h>
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -070034#include <asm/sections.h>
Eric W. Biedermandc009d92005-06-25 14:57:52 -070035
Vivek Goyalcc571652006-01-09 20:51:41 -080036/* Per cpu memory for storing cpu states in case of system crash. */
37note_buf_t* crash_notes;
38
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -070039/* vmcoreinfo stuff */
40unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
41u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
Ken'ichi Ohmichid7682812007-10-16 23:27:28 -070042size_t vmcoreinfo_size;
43size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -070044
Eric W. Biedermandc009d92005-06-25 14:57:52 -070045/* Location of the reserved area for the crash kernel */
46struct resource crashk_res = {
47 .name = "Crash kernel",
48 .start = 0,
49 .end = 0,
50 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
51};
52
Alexander Nyberg6e274d12005-06-25 14:58:26 -070053int kexec_should_crash(struct task_struct *p)
54{
Serge E. Hallynb460cbc2007-10-18 23:39:52 -070055 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
Alexander Nyberg6e274d12005-06-25 14:58:26 -070056 return 1;
57 return 0;
58}
59
Eric W. Biedermandc009d92005-06-25 14:57:52 -070060/*
61 * When kexec transitions to the new kernel there is a one-to-one
62 * mapping between physical and virtual addresses. On processors
63 * where you can disable the MMU this is trivial, and easy. For
64 * others it is still a simple predictable page table to setup.
65 *
66 * In that environment kexec copies the new kernel to its final
67 * resting place. This means I can only support memory whose
68 * physical address can fit in an unsigned long. In particular
69 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
70 * If the assembly stub has more restrictive requirements
71 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
72 * defined more restrictively in <asm/kexec.h>.
73 *
74 * The code for the transition from the current kernel to the
75 * the new kernel is placed in the control_code_buffer, whose size
76 * is given by KEXEC_CONTROL_CODE_SIZE. In the best case only a single
77 * page of memory is necessary, but some architectures require more.
78 * Because this memory must be identity mapped in the transition from
79 * virtual to physical addresses it must live in the range
80 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
81 * modifiable.
82 *
83 * The assembly stub in the control code buffer is passed a linked list
84 * of descriptor pages detailing the source pages of the new kernel,
85 * and the destination addresses of those source pages. As this data
86 * structure is not used in the context of the current OS, it must
87 * be self-contained.
88 *
89 * The code has been made to work with highmem pages and will use a
90 * destination page in its final resting place (if it happens
91 * to allocate it). The end product of this is that most of the
92 * physical address space, and most of RAM can be used.
93 *
94 * Future directions include:
95 * - allocating a page table with the control code buffer identity
96 * mapped, to simplify machine_kexec and make kexec_on_panic more
97 * reliable.
98 */
99
100/*
101 * KIMAGE_NO_DEST is an impossible destination address..., for
102 * allocating pages whose destination address we do not care about.
103 */
104#define KIMAGE_NO_DEST (-1UL)
105
Maneesh Soni72414d32005-06-25 14:58:28 -0700106static int kimage_is_destination_range(struct kimage *image,
107 unsigned long start, unsigned long end);
108static struct page *kimage_alloc_page(struct kimage *image,
Al Viro9796fdd2005-10-21 03:22:03 -0400109 gfp_t gfp_mask,
Maneesh Soni72414d32005-06-25 14:58:28 -0700110 unsigned long dest);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700111
112static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
Maneesh Soni72414d32005-06-25 14:58:28 -0700113 unsigned long nr_segments,
114 struct kexec_segment __user *segments)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700115{
116 size_t segment_bytes;
117 struct kimage *image;
118 unsigned long i;
119 int result;
120
121 /* Allocate a controlling structure */
122 result = -ENOMEM;
Burman Yan4668edc2006-12-06 20:38:51 -0800123 image = kzalloc(sizeof(*image), GFP_KERNEL);
Maneesh Soni72414d32005-06-25 14:58:28 -0700124 if (!image)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700125 goto out;
Maneesh Soni72414d32005-06-25 14:58:28 -0700126
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700127 image->head = 0;
128 image->entry = &image->head;
129 image->last_entry = &image->head;
130 image->control_page = ~0; /* By default this does not apply */
131 image->start = entry;
132 image->type = KEXEC_TYPE_DEFAULT;
133
134 /* Initialize the list of control pages */
135 INIT_LIST_HEAD(&image->control_pages);
136
137 /* Initialize the list of destination pages */
138 INIT_LIST_HEAD(&image->dest_pages);
139
140 /* Initialize the list of unuseable pages */
141 INIT_LIST_HEAD(&image->unuseable_pages);
142
143 /* Read in the segments */
144 image->nr_segments = nr_segments;
145 segment_bytes = nr_segments * sizeof(*segments);
146 result = copy_from_user(image->segment, segments, segment_bytes);
147 if (result)
148 goto out;
149
150 /*
151 * Verify we have good destination addresses. The caller is
152 * responsible for making certain we don't attempt to load
153 * the new image into invalid or reserved areas of RAM. This
154 * just verifies it is an address we can use.
155 *
156 * Since the kernel does everything in page size chunks ensure
157 * the destination addreses are page aligned. Too many
158 * special cases crop of when we don't do this. The most
159 * insidious is getting overlapping destination addresses
160 * simply because addresses are changed to page size
161 * granularity.
162 */
163 result = -EADDRNOTAVAIL;
164 for (i = 0; i < nr_segments; i++) {
165 unsigned long mstart, mend;
Maneesh Soni72414d32005-06-25 14:58:28 -0700166
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700167 mstart = image->segment[i].mem;
168 mend = mstart + image->segment[i].memsz;
169 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
170 goto out;
171 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
172 goto out;
173 }
174
175 /* Verify our destination addresses do not overlap.
176 * If we alloed overlapping destination addresses
177 * through very weird things can happen with no
178 * easy explanation as one segment stops on another.
179 */
180 result = -EINVAL;
Maneesh Soni72414d32005-06-25 14:58:28 -0700181 for (i = 0; i < nr_segments; i++) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700182 unsigned long mstart, mend;
183 unsigned long j;
Maneesh Soni72414d32005-06-25 14:58:28 -0700184
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700185 mstart = image->segment[i].mem;
186 mend = mstart + image->segment[i].memsz;
Maneesh Soni72414d32005-06-25 14:58:28 -0700187 for (j = 0; j < i; j++) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700188 unsigned long pstart, pend;
189 pstart = image->segment[j].mem;
190 pend = pstart + image->segment[j].memsz;
191 /* Do the segments overlap ? */
192 if ((mend > pstart) && (mstart < pend))
193 goto out;
194 }
195 }
196
197 /* Ensure our buffer sizes are strictly less than
198 * our memory sizes. This should always be the case,
199 * and it is easier to check up front than to be surprised
200 * later on.
201 */
202 result = -EINVAL;
Maneesh Soni72414d32005-06-25 14:58:28 -0700203 for (i = 0; i < nr_segments; i++) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700204 if (image->segment[i].bufsz > image->segment[i].memsz)
205 goto out;
206 }
207
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700208 result = 0;
Maneesh Soni72414d32005-06-25 14:58:28 -0700209out:
210 if (result == 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700211 *rimage = image;
Maneesh Soni72414d32005-06-25 14:58:28 -0700212 else
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700213 kfree(image);
Maneesh Soni72414d32005-06-25 14:58:28 -0700214
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700215 return result;
216
217}
218
219static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
Maneesh Soni72414d32005-06-25 14:58:28 -0700220 unsigned long nr_segments,
221 struct kexec_segment __user *segments)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700222{
223 int result;
224 struct kimage *image;
225
226 /* Allocate and initialize a controlling structure */
227 image = NULL;
228 result = do_kimage_alloc(&image, entry, nr_segments, segments);
Maneesh Soni72414d32005-06-25 14:58:28 -0700229 if (result)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700230 goto out;
Maneesh Soni72414d32005-06-25 14:58:28 -0700231
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700232 *rimage = image;
233
234 /*
235 * Find a location for the control code buffer, and add it
236 * the vector of segments so that it's pages will also be
237 * counted as destination pages.
238 */
239 result = -ENOMEM;
240 image->control_code_page = kimage_alloc_control_pages(image,
Maneesh Soni72414d32005-06-25 14:58:28 -0700241 get_order(KEXEC_CONTROL_CODE_SIZE));
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700242 if (!image->control_code_page) {
243 printk(KERN_ERR "Could not allocate control_code_buffer\n");
244 goto out;
245 }
246
Huang Ying3ab83522008-07-25 19:45:07 -0700247 image->swap_page = kimage_alloc_control_pages(image, 0);
248 if (!image->swap_page) {
249 printk(KERN_ERR "Could not allocate swap buffer\n");
250 goto out;
251 }
252
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700253 result = 0;
254 out:
Maneesh Soni72414d32005-06-25 14:58:28 -0700255 if (result == 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700256 *rimage = image;
Maneesh Soni72414d32005-06-25 14:58:28 -0700257 else
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700258 kfree(image);
Maneesh Soni72414d32005-06-25 14:58:28 -0700259
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700260 return result;
261}
262
263static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
Maneesh Soni72414d32005-06-25 14:58:28 -0700264 unsigned long nr_segments,
Alexey Dobriyan314b6a42005-06-27 22:29:33 -0700265 struct kexec_segment __user *segments)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700266{
267 int result;
268 struct kimage *image;
269 unsigned long i;
270
271 image = NULL;
272 /* Verify we have a valid entry point */
273 if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
274 result = -EADDRNOTAVAIL;
275 goto out;
276 }
277
278 /* Allocate and initialize a controlling structure */
279 result = do_kimage_alloc(&image, entry, nr_segments, segments);
Maneesh Soni72414d32005-06-25 14:58:28 -0700280 if (result)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700281 goto out;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700282
283 /* Enable the special crash kernel control page
284 * allocation policy.
285 */
286 image->control_page = crashk_res.start;
287 image->type = KEXEC_TYPE_CRASH;
288
289 /*
290 * Verify we have good destination addresses. Normally
291 * the caller is responsible for making certain we don't
292 * attempt to load the new image into invalid or reserved
293 * areas of RAM. But crash kernels are preloaded into a
294 * reserved area of ram. We must ensure the addresses
295 * are in the reserved area otherwise preloading the
296 * kernel could corrupt things.
297 */
298 result = -EADDRNOTAVAIL;
299 for (i = 0; i < nr_segments; i++) {
300 unsigned long mstart, mend;
Maneesh Soni72414d32005-06-25 14:58:28 -0700301
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700302 mstart = image->segment[i].mem;
Vivek Goyal50cccc62005-06-25 14:57:55 -0700303 mend = mstart + image->segment[i].memsz - 1;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700304 /* Ensure we are within the crash kernel limits */
305 if ((mstart < crashk_res.start) || (mend > crashk_res.end))
306 goto out;
307 }
308
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700309 /*
310 * Find a location for the control code buffer, and add
311 * the vector of segments so that it's pages will also be
312 * counted as destination pages.
313 */
314 result = -ENOMEM;
315 image->control_code_page = kimage_alloc_control_pages(image,
Maneesh Soni72414d32005-06-25 14:58:28 -0700316 get_order(KEXEC_CONTROL_CODE_SIZE));
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700317 if (!image->control_code_page) {
318 printk(KERN_ERR "Could not allocate control_code_buffer\n");
319 goto out;
320 }
321
322 result = 0;
Maneesh Soni72414d32005-06-25 14:58:28 -0700323out:
324 if (result == 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700325 *rimage = image;
Maneesh Soni72414d32005-06-25 14:58:28 -0700326 else
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700327 kfree(image);
Maneesh Soni72414d32005-06-25 14:58:28 -0700328
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700329 return result;
330}
331
Maneesh Soni72414d32005-06-25 14:58:28 -0700332static int kimage_is_destination_range(struct kimage *image,
333 unsigned long start,
334 unsigned long end)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700335{
336 unsigned long i;
337
338 for (i = 0; i < image->nr_segments; i++) {
339 unsigned long mstart, mend;
Maneesh Soni72414d32005-06-25 14:58:28 -0700340
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700341 mstart = image->segment[i].mem;
Maneesh Soni72414d32005-06-25 14:58:28 -0700342 mend = mstart + image->segment[i].memsz;
343 if ((end > mstart) && (start < mend))
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700344 return 1;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700345 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700346
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700347 return 0;
348}
349
Al Viro9796fdd2005-10-21 03:22:03 -0400350static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700351{
352 struct page *pages;
Maneesh Soni72414d32005-06-25 14:58:28 -0700353
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700354 pages = alloc_pages(gfp_mask, order);
355 if (pages) {
356 unsigned int count, i;
357 pages->mapping = NULL;
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700358 set_page_private(pages, order);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700359 count = 1 << order;
Maneesh Soni72414d32005-06-25 14:58:28 -0700360 for (i = 0; i < count; i++)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700361 SetPageReserved(pages + i);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700362 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700363
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700364 return pages;
365}
366
367static void kimage_free_pages(struct page *page)
368{
369 unsigned int order, count, i;
Maneesh Soni72414d32005-06-25 14:58:28 -0700370
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700371 order = page_private(page);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700372 count = 1 << order;
Maneesh Soni72414d32005-06-25 14:58:28 -0700373 for (i = 0; i < count; i++)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700374 ClearPageReserved(page + i);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700375 __free_pages(page, order);
376}
377
378static void kimage_free_page_list(struct list_head *list)
379{
380 struct list_head *pos, *next;
Maneesh Soni72414d32005-06-25 14:58:28 -0700381
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700382 list_for_each_safe(pos, next, list) {
383 struct page *page;
384
385 page = list_entry(pos, struct page, lru);
386 list_del(&page->lru);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700387 kimage_free_pages(page);
388 }
389}
390
Maneesh Soni72414d32005-06-25 14:58:28 -0700391static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
392 unsigned int order)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700393{
394 /* Control pages are special, they are the intermediaries
395 * that are needed while we copy the rest of the pages
396 * to their final resting place. As such they must
397 * not conflict with either the destination addresses
398 * or memory the kernel is already using.
399 *
400 * The only case where we really need more than one of
401 * these are for architectures where we cannot disable
402 * the MMU and must instead generate an identity mapped
403 * page table for all of the memory.
404 *
405 * At worst this runs in O(N) of the image size.
406 */
407 struct list_head extra_pages;
408 struct page *pages;
409 unsigned int count;
410
411 count = 1 << order;
412 INIT_LIST_HEAD(&extra_pages);
413
414 /* Loop while I can allocate a page and the page allocated
415 * is a destination page.
416 */
417 do {
418 unsigned long pfn, epfn, addr, eaddr;
Maneesh Soni72414d32005-06-25 14:58:28 -0700419
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700420 pages = kimage_alloc_pages(GFP_KERNEL, order);
421 if (!pages)
422 break;
423 pfn = page_to_pfn(pages);
424 epfn = pfn + count;
425 addr = pfn << PAGE_SHIFT;
426 eaddr = epfn << PAGE_SHIFT;
427 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
Maneesh Soni72414d32005-06-25 14:58:28 -0700428 kimage_is_destination_range(image, addr, eaddr)) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700429 list_add(&pages->lru, &extra_pages);
430 pages = NULL;
431 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700432 } while (!pages);
433
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700434 if (pages) {
435 /* Remember the allocated page... */
436 list_add(&pages->lru, &image->control_pages);
437
438 /* Because the page is already in it's destination
439 * location we will never allocate another page at
440 * that address. Therefore kimage_alloc_pages
441 * will not return it (again) and we don't need
442 * to give it an entry in image->segment[].
443 */
444 }
445 /* Deal with the destination pages I have inadvertently allocated.
446 *
447 * Ideally I would convert multi-page allocations into single
448 * page allocations, and add everyting to image->dest_pages.
449 *
450 * For now it is simpler to just free the pages.
451 */
452 kimage_free_page_list(&extra_pages);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700453
Maneesh Soni72414d32005-06-25 14:58:28 -0700454 return pages;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700455}
456
Maneesh Soni72414d32005-06-25 14:58:28 -0700457static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
458 unsigned int order)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700459{
460 /* Control pages are special, they are the intermediaries
461 * that are needed while we copy the rest of the pages
462 * to their final resting place. As such they must
463 * not conflict with either the destination addresses
464 * or memory the kernel is already using.
465 *
466 * Control pages are also the only pags we must allocate
467 * when loading a crash kernel. All of the other pages
468 * are specified by the segments and we just memcpy
469 * into them directly.
470 *
471 * The only case where we really need more than one of
472 * these are for architectures where we cannot disable
473 * the MMU and must instead generate an identity mapped
474 * page table for all of the memory.
475 *
476 * Given the low demand this implements a very simple
477 * allocator that finds the first hole of the appropriate
478 * size in the reserved memory region, and allocates all
479 * of the memory up to and including the hole.
480 */
481 unsigned long hole_start, hole_end, size;
482 struct page *pages;
Maneesh Soni72414d32005-06-25 14:58:28 -0700483
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700484 pages = NULL;
485 size = (1 << order) << PAGE_SHIFT;
486 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
487 hole_end = hole_start + size - 1;
Maneesh Soni72414d32005-06-25 14:58:28 -0700488 while (hole_end <= crashk_res.end) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700489 unsigned long i;
Maneesh Soni72414d32005-06-25 14:58:28 -0700490
491 if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700492 break;
Maneesh Soni72414d32005-06-25 14:58:28 -0700493 if (hole_end > crashk_res.end)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700494 break;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700495 /* See if I overlap any of the segments */
Maneesh Soni72414d32005-06-25 14:58:28 -0700496 for (i = 0; i < image->nr_segments; i++) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700497 unsigned long mstart, mend;
Maneesh Soni72414d32005-06-25 14:58:28 -0700498
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700499 mstart = image->segment[i].mem;
500 mend = mstart + image->segment[i].memsz - 1;
501 if ((hole_end >= mstart) && (hole_start <= mend)) {
502 /* Advance the hole to the end of the segment */
503 hole_start = (mend + (size - 1)) & ~(size - 1);
504 hole_end = hole_start + size - 1;
505 break;
506 }
507 }
508 /* If I don't overlap any segments I have found my hole! */
509 if (i == image->nr_segments) {
510 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
511 break;
512 }
513 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700514 if (pages)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700515 image->control_page = hole_end;
Maneesh Soni72414d32005-06-25 14:58:28 -0700516
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700517 return pages;
518}
519
520
Maneesh Soni72414d32005-06-25 14:58:28 -0700521struct page *kimage_alloc_control_pages(struct kimage *image,
522 unsigned int order)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700523{
524 struct page *pages = NULL;
Maneesh Soni72414d32005-06-25 14:58:28 -0700525
526 switch (image->type) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700527 case KEXEC_TYPE_DEFAULT:
528 pages = kimage_alloc_normal_control_pages(image, order);
529 break;
530 case KEXEC_TYPE_CRASH:
531 pages = kimage_alloc_crash_control_pages(image, order);
532 break;
533 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700534
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700535 return pages;
536}
537
538static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
539{
Maneesh Soni72414d32005-06-25 14:58:28 -0700540 if (*image->entry != 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700541 image->entry++;
Maneesh Soni72414d32005-06-25 14:58:28 -0700542
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700543 if (image->entry == image->last_entry) {
544 kimage_entry_t *ind_page;
545 struct page *page;
Maneesh Soni72414d32005-06-25 14:58:28 -0700546
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700547 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
Maneesh Soni72414d32005-06-25 14:58:28 -0700548 if (!page)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700549 return -ENOMEM;
Maneesh Soni72414d32005-06-25 14:58:28 -0700550
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700551 ind_page = page_address(page);
552 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
553 image->entry = ind_page;
Maneesh Soni72414d32005-06-25 14:58:28 -0700554 image->last_entry = ind_page +
555 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700556 }
557 *image->entry = entry;
558 image->entry++;
559 *image->entry = 0;
Maneesh Soni72414d32005-06-25 14:58:28 -0700560
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700561 return 0;
562}
563
Maneesh Soni72414d32005-06-25 14:58:28 -0700564static int kimage_set_destination(struct kimage *image,
565 unsigned long destination)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700566{
567 int result;
568
569 destination &= PAGE_MASK;
570 result = kimage_add_entry(image, destination | IND_DESTINATION);
Maneesh Soni72414d32005-06-25 14:58:28 -0700571 if (result == 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700572 image->destination = destination;
Maneesh Soni72414d32005-06-25 14:58:28 -0700573
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700574 return result;
575}
576
577
578static int kimage_add_page(struct kimage *image, unsigned long page)
579{
580 int result;
581
582 page &= PAGE_MASK;
583 result = kimage_add_entry(image, page | IND_SOURCE);
Maneesh Soni72414d32005-06-25 14:58:28 -0700584 if (result == 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700585 image->destination += PAGE_SIZE;
Maneesh Soni72414d32005-06-25 14:58:28 -0700586
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700587 return result;
588}
589
590
591static void kimage_free_extra_pages(struct kimage *image)
592{
593 /* Walk through and free any extra destination pages I may have */
594 kimage_free_page_list(&image->dest_pages);
595
596 /* Walk through and free any unuseable pages I have cached */
597 kimage_free_page_list(&image->unuseable_pages);
598
599}
WANG Cong7fccf032008-07-25 19:45:02 -0700600static void kimage_terminate(struct kimage *image)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700601{
Maneesh Soni72414d32005-06-25 14:58:28 -0700602 if (*image->entry != 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700603 image->entry++;
Maneesh Soni72414d32005-06-25 14:58:28 -0700604
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700605 *image->entry = IND_DONE;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700606}
607
608#define for_each_kimage_entry(image, ptr, entry) \
609 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
610 ptr = (entry & IND_INDIRECTION)? \
611 phys_to_virt((entry & PAGE_MASK)): ptr +1)
612
613static void kimage_free_entry(kimage_entry_t entry)
614{
615 struct page *page;
616
617 page = pfn_to_page(entry >> PAGE_SHIFT);
618 kimage_free_pages(page);
619}
620
621static void kimage_free(struct kimage *image)
622{
623 kimage_entry_t *ptr, entry;
624 kimage_entry_t ind = 0;
625
626 if (!image)
627 return;
Maneesh Soni72414d32005-06-25 14:58:28 -0700628
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700629 kimage_free_extra_pages(image);
630 for_each_kimage_entry(image, ptr, entry) {
631 if (entry & IND_INDIRECTION) {
632 /* Free the previous indirection page */
Maneesh Soni72414d32005-06-25 14:58:28 -0700633 if (ind & IND_INDIRECTION)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700634 kimage_free_entry(ind);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700635 /* Save this indirection page until we are
636 * done with it.
637 */
638 ind = entry;
639 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700640 else if (entry & IND_SOURCE)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700641 kimage_free_entry(entry);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700642 }
643 /* Free the final indirection page */
Maneesh Soni72414d32005-06-25 14:58:28 -0700644 if (ind & IND_INDIRECTION)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700645 kimage_free_entry(ind);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700646
647 /* Handle any machine specific cleanup */
648 machine_kexec_cleanup(image);
649
650 /* Free the kexec control pages... */
651 kimage_free_page_list(&image->control_pages);
652 kfree(image);
653}
654
Maneesh Soni72414d32005-06-25 14:58:28 -0700655static kimage_entry_t *kimage_dst_used(struct kimage *image,
656 unsigned long page)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700657{
658 kimage_entry_t *ptr, entry;
659 unsigned long destination = 0;
660
661 for_each_kimage_entry(image, ptr, entry) {
Maneesh Soni72414d32005-06-25 14:58:28 -0700662 if (entry & IND_DESTINATION)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700663 destination = entry & PAGE_MASK;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700664 else if (entry & IND_SOURCE) {
Maneesh Soni72414d32005-06-25 14:58:28 -0700665 if (page == destination)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700666 return ptr;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700667 destination += PAGE_SIZE;
668 }
669 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700670
Alexey Dobriyan314b6a42005-06-27 22:29:33 -0700671 return NULL;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700672}
673
Maneesh Soni72414d32005-06-25 14:58:28 -0700674static struct page *kimage_alloc_page(struct kimage *image,
Al Viro9796fdd2005-10-21 03:22:03 -0400675 gfp_t gfp_mask,
Maneesh Soni72414d32005-06-25 14:58:28 -0700676 unsigned long destination)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700677{
678 /*
679 * Here we implement safeguards to ensure that a source page
680 * is not copied to its destination page before the data on
681 * the destination page is no longer useful.
682 *
683 * To do this we maintain the invariant that a source page is
684 * either its own destination page, or it is not a
685 * destination page at all.
686 *
687 * That is slightly stronger than required, but the proof
688 * that no problems will not occur is trivial, and the
689 * implementation is simply to verify.
690 *
691 * When allocating all pages normally this algorithm will run
692 * in O(N) time, but in the worst case it will run in O(N^2)
693 * time. If the runtime is a problem the data structures can
694 * be fixed.
695 */
696 struct page *page;
697 unsigned long addr;
698
699 /*
700 * Walk through the list of destination pages, and see if I
701 * have a match.
702 */
703 list_for_each_entry(page, &image->dest_pages, lru) {
704 addr = page_to_pfn(page) << PAGE_SHIFT;
705 if (addr == destination) {
706 list_del(&page->lru);
707 return page;
708 }
709 }
710 page = NULL;
711 while (1) {
712 kimage_entry_t *old;
713
714 /* Allocate a page, if we run out of memory give up */
715 page = kimage_alloc_pages(gfp_mask, 0);
Maneesh Soni72414d32005-06-25 14:58:28 -0700716 if (!page)
Alexey Dobriyan314b6a42005-06-27 22:29:33 -0700717 return NULL;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700718 /* If the page cannot be used file it away */
Maneesh Soni72414d32005-06-25 14:58:28 -0700719 if (page_to_pfn(page) >
720 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700721 list_add(&page->lru, &image->unuseable_pages);
722 continue;
723 }
724 addr = page_to_pfn(page) << PAGE_SHIFT;
725
726 /* If it is the destination page we want use it */
727 if (addr == destination)
728 break;
729
730 /* If the page is not a destination page use it */
Maneesh Soni72414d32005-06-25 14:58:28 -0700731 if (!kimage_is_destination_range(image, addr,
732 addr + PAGE_SIZE))
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700733 break;
734
735 /*
736 * I know that the page is someones destination page.
737 * See if there is already a source page for this
738 * destination page. And if so swap the source pages.
739 */
740 old = kimage_dst_used(image, addr);
741 if (old) {
742 /* If so move it */
743 unsigned long old_addr;
744 struct page *old_page;
745
746 old_addr = *old & PAGE_MASK;
747 old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
748 copy_highpage(page, old_page);
749 *old = addr | (*old & ~PAGE_MASK);
750
751 /* The old page I have found cannot be a
752 * destination page, so return it.
753 */
754 addr = old_addr;
755 page = old_page;
756 break;
757 }
758 else {
759 /* Place the page on the destination list I
760 * will use it later.
761 */
762 list_add(&page->lru, &image->dest_pages);
763 }
764 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700765
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700766 return page;
767}
768
769static int kimage_load_normal_segment(struct kimage *image,
Maneesh Soni72414d32005-06-25 14:58:28 -0700770 struct kexec_segment *segment)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700771{
772 unsigned long maddr;
773 unsigned long ubytes, mbytes;
774 int result;
Alexey Dobriyan314b6a42005-06-27 22:29:33 -0700775 unsigned char __user *buf;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700776
777 result = 0;
778 buf = segment->buf;
779 ubytes = segment->bufsz;
780 mbytes = segment->memsz;
781 maddr = segment->mem;
782
783 result = kimage_set_destination(image, maddr);
Maneesh Soni72414d32005-06-25 14:58:28 -0700784 if (result < 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700785 goto out;
Maneesh Soni72414d32005-06-25 14:58:28 -0700786
787 while (mbytes) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700788 struct page *page;
789 char *ptr;
790 size_t uchunk, mchunk;
Maneesh Soni72414d32005-06-25 14:58:28 -0700791
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700792 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
Stephen Hemmingerc80544d2007-10-18 03:07:05 -0700793 if (!page) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700794 result = -ENOMEM;
795 goto out;
796 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700797 result = kimage_add_page(image, page_to_pfn(page)
798 << PAGE_SHIFT);
799 if (result < 0)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700800 goto out;
Maneesh Soni72414d32005-06-25 14:58:28 -0700801
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700802 ptr = kmap(page);
803 /* Start with a clear page */
804 memset(ptr, 0, PAGE_SIZE);
805 ptr += maddr & ~PAGE_MASK;
806 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
Maneesh Soni72414d32005-06-25 14:58:28 -0700807 if (mchunk > mbytes)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700808 mchunk = mbytes;
Maneesh Soni72414d32005-06-25 14:58:28 -0700809
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700810 uchunk = mchunk;
Maneesh Soni72414d32005-06-25 14:58:28 -0700811 if (uchunk > ubytes)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700812 uchunk = ubytes;
Maneesh Soni72414d32005-06-25 14:58:28 -0700813
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700814 result = copy_from_user(ptr, buf, uchunk);
815 kunmap(page);
816 if (result) {
817 result = (result < 0) ? result : -EIO;
818 goto out;
819 }
820 ubytes -= uchunk;
821 maddr += mchunk;
822 buf += mchunk;
823 mbytes -= mchunk;
824 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700825out:
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700826 return result;
827}
828
829static int kimage_load_crash_segment(struct kimage *image,
Maneesh Soni72414d32005-06-25 14:58:28 -0700830 struct kexec_segment *segment)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700831{
832 /* For crash dumps kernels we simply copy the data from
833 * user space to it's destination.
834 * We do things a page at a time for the sake of kmap.
835 */
836 unsigned long maddr;
837 unsigned long ubytes, mbytes;
838 int result;
Alexey Dobriyan314b6a42005-06-27 22:29:33 -0700839 unsigned char __user *buf;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700840
841 result = 0;
842 buf = segment->buf;
843 ubytes = segment->bufsz;
844 mbytes = segment->memsz;
845 maddr = segment->mem;
Maneesh Soni72414d32005-06-25 14:58:28 -0700846 while (mbytes) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700847 struct page *page;
848 char *ptr;
849 size_t uchunk, mchunk;
Maneesh Soni72414d32005-06-25 14:58:28 -0700850
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700851 page = pfn_to_page(maddr >> PAGE_SHIFT);
Stephen Hemmingerc80544d2007-10-18 03:07:05 -0700852 if (!page) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700853 result = -ENOMEM;
854 goto out;
855 }
856 ptr = kmap(page);
857 ptr += maddr & ~PAGE_MASK;
858 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
Maneesh Soni72414d32005-06-25 14:58:28 -0700859 if (mchunk > mbytes)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700860 mchunk = mbytes;
Maneesh Soni72414d32005-06-25 14:58:28 -0700861
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700862 uchunk = mchunk;
863 if (uchunk > ubytes) {
864 uchunk = ubytes;
865 /* Zero the trailing part of the page */
866 memset(ptr + uchunk, 0, mchunk - uchunk);
867 }
868 result = copy_from_user(ptr, buf, uchunk);
Zou Nan haia79561132006-12-07 09:51:35 -0800869 kexec_flush_icache_page(page);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700870 kunmap(page);
871 if (result) {
872 result = (result < 0) ? result : -EIO;
873 goto out;
874 }
875 ubytes -= uchunk;
876 maddr += mchunk;
877 buf += mchunk;
878 mbytes -= mchunk;
879 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700880out:
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700881 return result;
882}
883
884static int kimage_load_segment(struct kimage *image,
Maneesh Soni72414d32005-06-25 14:58:28 -0700885 struct kexec_segment *segment)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700886{
887 int result = -ENOMEM;
Maneesh Soni72414d32005-06-25 14:58:28 -0700888
889 switch (image->type) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700890 case KEXEC_TYPE_DEFAULT:
891 result = kimage_load_normal_segment(image, segment);
892 break;
893 case KEXEC_TYPE_CRASH:
894 result = kimage_load_crash_segment(image, segment);
895 break;
896 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700897
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700898 return result;
899}
900
901/*
902 * Exec Kernel system call: for obvious reasons only root may call it.
903 *
904 * This call breaks up into three pieces.
905 * - A generic part which loads the new kernel from the current
906 * address space, and very carefully places the data in the
907 * allocated pages.
908 *
909 * - A generic part that interacts with the kernel and tells all of
910 * the devices to shut down. Preventing on-going dmas, and placing
911 * the devices in a consistent state so a later kernel can
912 * reinitialize them.
913 *
914 * - A machine specific part that includes the syscall number
915 * and the copies the image to it's final destination. And
916 * jumps into the image at entry.
917 *
918 * kexec does not sync, or unmount filesystems so if you need
919 * that to happen you need to do that yourself.
920 */
Jeff Moyerc330dda2006-06-23 02:05:07 -0700921struct kimage *kexec_image;
922struct kimage *kexec_crash_image;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700923/*
924 * A home grown binary mutex.
925 * Nothing can wait so this mutex is safe to use
926 * in interrupt context :)
927 */
Jeff Moyerc330dda2006-06-23 02:05:07 -0700928static int kexec_lock;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700929
Maneesh Soni72414d32005-06-25 14:58:28 -0700930asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
931 struct kexec_segment __user *segments,
932 unsigned long flags)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700933{
934 struct kimage **dest_image, *image;
935 int locked;
936 int result;
937
938 /* We only trust the superuser with rebooting the system. */
939 if (!capable(CAP_SYS_BOOT))
940 return -EPERM;
941
942 /*
943 * Verify we have a legal set of flags
944 * This leaves us room for future extensions.
945 */
946 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
947 return -EINVAL;
948
949 /* Verify we are on the appropriate architecture */
950 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
951 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700952 return -EINVAL;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700953
954 /* Put an artificial cap on the number
955 * of segments passed to kexec_load.
956 */
957 if (nr_segments > KEXEC_SEGMENT_MAX)
958 return -EINVAL;
959
960 image = NULL;
961 result = 0;
962
963 /* Because we write directly to the reserved memory
964 * region when loading crash kernels we need a mutex here to
965 * prevent multiple crash kernels from attempting to load
966 * simultaneously, and to prevent a crash kernel from loading
967 * over the top of a in use crash kernel.
968 *
969 * KISS: always take the mutex.
970 */
971 locked = xchg(&kexec_lock, 1);
Maneesh Soni72414d32005-06-25 14:58:28 -0700972 if (locked)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700973 return -EBUSY;
Maneesh Soni72414d32005-06-25 14:58:28 -0700974
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700975 dest_image = &kexec_image;
Maneesh Soni72414d32005-06-25 14:58:28 -0700976 if (flags & KEXEC_ON_CRASH)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700977 dest_image = &kexec_crash_image;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700978 if (nr_segments > 0) {
979 unsigned long i;
Maneesh Soni72414d32005-06-25 14:58:28 -0700980
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700981 /* Loading another kernel to reboot into */
Maneesh Soni72414d32005-06-25 14:58:28 -0700982 if ((flags & KEXEC_ON_CRASH) == 0)
983 result = kimage_normal_alloc(&image, entry,
984 nr_segments, segments);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700985 /* Loading another kernel to switch to if this one crashes */
986 else if (flags & KEXEC_ON_CRASH) {
987 /* Free any current crash dump kernel before
988 * we corrupt it.
989 */
990 kimage_free(xchg(&kexec_crash_image, NULL));
Maneesh Soni72414d32005-06-25 14:58:28 -0700991 result = kimage_crash_alloc(&image, entry,
992 nr_segments, segments);
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700993 }
Maneesh Soni72414d32005-06-25 14:58:28 -0700994 if (result)
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700995 goto out;
Maneesh Soni72414d32005-06-25 14:58:28 -0700996
Huang Ying3ab83522008-07-25 19:45:07 -0700997 if (flags & KEXEC_PRESERVE_CONTEXT)
998 image->preserve_context = 1;
Eric W. Biedermandc009d92005-06-25 14:57:52 -0700999 result = machine_kexec_prepare(image);
Maneesh Soni72414d32005-06-25 14:58:28 -07001000 if (result)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001001 goto out;
Maneesh Soni72414d32005-06-25 14:58:28 -07001002
1003 for (i = 0; i < nr_segments; i++) {
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001004 result = kimage_load_segment(image, &image->segment[i]);
Maneesh Soni72414d32005-06-25 14:58:28 -07001005 if (result)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001006 goto out;
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001007 }
WANG Cong7fccf032008-07-25 19:45:02 -07001008 kimage_terminate(image);
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001009 }
1010 /* Install the new kernel, and Uninstall the old */
1011 image = xchg(dest_image, image);
1012
Maneesh Soni72414d32005-06-25 14:58:28 -07001013out:
Roland McGrath0b4a8a72006-09-29 02:00:39 -07001014 locked = xchg(&kexec_lock, 0); /* Release the mutex */
1015 BUG_ON(!locked);
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001016 kimage_free(image);
Maneesh Soni72414d32005-06-25 14:58:28 -07001017
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001018 return result;
1019}
1020
1021#ifdef CONFIG_COMPAT
1022asmlinkage long compat_sys_kexec_load(unsigned long entry,
Maneesh Soni72414d32005-06-25 14:58:28 -07001023 unsigned long nr_segments,
1024 struct compat_kexec_segment __user *segments,
1025 unsigned long flags)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001026{
1027 struct compat_kexec_segment in;
1028 struct kexec_segment out, __user *ksegments;
1029 unsigned long i, result;
1030
1031 /* Don't allow clients that don't understand the native
1032 * architecture to do anything.
1033 */
Maneesh Soni72414d32005-06-25 14:58:28 -07001034 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001035 return -EINVAL;
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001036
Maneesh Soni72414d32005-06-25 14:58:28 -07001037 if (nr_segments > KEXEC_SEGMENT_MAX)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001038 return -EINVAL;
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001039
1040 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1041 for (i=0; i < nr_segments; i++) {
1042 result = copy_from_user(&in, &segments[i], sizeof(in));
Maneesh Soni72414d32005-06-25 14:58:28 -07001043 if (result)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001044 return -EFAULT;
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001045
1046 out.buf = compat_ptr(in.buf);
1047 out.bufsz = in.bufsz;
1048 out.mem = in.mem;
1049 out.memsz = in.memsz;
1050
1051 result = copy_to_user(&ksegments[i], &out, sizeof(out));
Maneesh Soni72414d32005-06-25 14:58:28 -07001052 if (result)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001053 return -EFAULT;
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001054 }
1055
1056 return sys_kexec_load(entry, nr_segments, ksegments, flags);
1057}
1058#endif
1059
Alexander Nyberg6e274d12005-06-25 14:58:26 -07001060void crash_kexec(struct pt_regs *regs)
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001061{
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001062 int locked;
1063
1064
1065 /* Take the kexec_lock here to prevent sys_kexec_load
1066 * running on one cpu from replacing the crash kernel
1067 * we are using after a panic on a different cpu.
1068 *
1069 * If the crash kernel was not located in a fixed area
1070 * of memory the xchg(&kexec_crash_image) would be
1071 * sufficient. But since I reuse the memory...
1072 */
1073 locked = xchg(&kexec_lock, 1);
1074 if (!locked) {
David Wilderc0ce7d02006-06-23 15:29:34 -07001075 if (kexec_crash_image) {
Vivek Goyale996e582006-01-09 20:51:44 -08001076 struct pt_regs fixed_regs;
1077 crash_setup_regs(&fixed_regs, regs);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001078 crash_save_vmcoreinfo();
Vivek Goyale996e582006-01-09 20:51:44 -08001079 machine_crash_shutdown(&fixed_regs);
David Wilderc0ce7d02006-06-23 15:29:34 -07001080 machine_kexec(kexec_crash_image);
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001081 }
Roland McGrath0b4a8a72006-09-29 02:00:39 -07001082 locked = xchg(&kexec_lock, 0);
1083 BUG_ON(!locked);
Eric W. Biedermandc009d92005-06-25 14:57:52 -07001084 }
1085}
Vivek Goyalcc571652006-01-09 20:51:41 -08001086
Magnus Damm85916f82006-12-06 20:40:41 -08001087static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1088 size_t data_len)
1089{
1090 struct elf_note note;
1091
1092 note.n_namesz = strlen(name) + 1;
1093 note.n_descsz = data_len;
1094 note.n_type = type;
1095 memcpy(buf, &note, sizeof(note));
1096 buf += (sizeof(note) + 3)/4;
1097 memcpy(buf, name, note.n_namesz);
1098 buf += (note.n_namesz + 3)/4;
1099 memcpy(buf, data, note.n_descsz);
1100 buf += (note.n_descsz + 3)/4;
1101
1102 return buf;
1103}
1104
1105static void final_note(u32 *buf)
1106{
1107 struct elf_note note;
1108
1109 note.n_namesz = 0;
1110 note.n_descsz = 0;
1111 note.n_type = 0;
1112 memcpy(buf, &note, sizeof(note));
1113}
1114
1115void crash_save_cpu(struct pt_regs *regs, int cpu)
1116{
1117 struct elf_prstatus prstatus;
1118 u32 *buf;
1119
1120 if ((cpu < 0) || (cpu >= NR_CPUS))
1121 return;
1122
1123 /* Using ELF notes here is opportunistic.
1124 * I need a well defined structure format
1125 * for the data I pass, and I need tags
1126 * on the data to indicate what information I have
1127 * squirrelled away. ELF notes happen to provide
1128 * all of that, so there is no need to invent something new.
1129 */
1130 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1131 if (!buf)
1132 return;
1133 memset(&prstatus, 0, sizeof(prstatus));
1134 prstatus.pr_pid = current->pid;
1135 elf_core_copy_regs(&prstatus.pr_reg, regs);
Simon Horman6672f762007-05-08 00:28:22 -07001136 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1137 &prstatus, sizeof(prstatus));
Magnus Damm85916f82006-12-06 20:40:41 -08001138 final_note(buf);
1139}
1140
Vivek Goyalcc571652006-01-09 20:51:41 -08001141static int __init crash_notes_memory_init(void)
1142{
1143 /* Allocate memory for saving cpu registers. */
1144 crash_notes = alloc_percpu(note_buf_t);
1145 if (!crash_notes) {
1146 printk("Kexec: Memory allocation for saving cpu register"
1147 " states failed\n");
1148 return -ENOMEM;
1149 }
1150 return 0;
1151}
1152module_init(crash_notes_memory_init)
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001153
Bernhard Wallecba63c32007-10-18 23:40:58 -07001154
1155/*
1156 * parsing the "crashkernel" commandline
1157 *
1158 * this code is intended to be called from architecture specific code
1159 */
1160
1161
1162/*
1163 * This function parses command lines in the format
1164 *
1165 * crashkernel=ramsize-range:size[,...][@offset]
1166 *
1167 * The function returns 0 on success and -EINVAL on failure.
1168 */
1169static int __init parse_crashkernel_mem(char *cmdline,
1170 unsigned long long system_ram,
1171 unsigned long long *crash_size,
1172 unsigned long long *crash_base)
1173{
1174 char *cur = cmdline, *tmp;
1175
1176 /* for each entry of the comma-separated list */
1177 do {
1178 unsigned long long start, end = ULLONG_MAX, size;
1179
1180 /* get the start of the range */
1181 start = memparse(cur, &tmp);
1182 if (cur == tmp) {
1183 pr_warning("crashkernel: Memory value expected\n");
1184 return -EINVAL;
1185 }
1186 cur = tmp;
1187 if (*cur != '-') {
1188 pr_warning("crashkernel: '-' expected\n");
1189 return -EINVAL;
1190 }
1191 cur++;
1192
1193 /* if no ':' is here, than we read the end */
1194 if (*cur != ':') {
1195 end = memparse(cur, &tmp);
1196 if (cur == tmp) {
1197 pr_warning("crashkernel: Memory "
1198 "value expected\n");
1199 return -EINVAL;
1200 }
1201 cur = tmp;
1202 if (end <= start) {
1203 pr_warning("crashkernel: end <= start\n");
1204 return -EINVAL;
1205 }
1206 }
1207
1208 if (*cur != ':') {
1209 pr_warning("crashkernel: ':' expected\n");
1210 return -EINVAL;
1211 }
1212 cur++;
1213
1214 size = memparse(cur, &tmp);
1215 if (cur == tmp) {
1216 pr_warning("Memory value expected\n");
1217 return -EINVAL;
1218 }
1219 cur = tmp;
1220 if (size >= system_ram) {
1221 pr_warning("crashkernel: invalid size\n");
1222 return -EINVAL;
1223 }
1224
1225 /* match ? */
Michael Ellermanbe089d792008-05-01 04:34:49 -07001226 if (system_ram >= start && system_ram < end) {
Bernhard Wallecba63c32007-10-18 23:40:58 -07001227 *crash_size = size;
1228 break;
1229 }
1230 } while (*cur++ == ',');
1231
1232 if (*crash_size > 0) {
1233 while (*cur != ' ' && *cur != '@')
1234 cur++;
1235 if (*cur == '@') {
1236 cur++;
1237 *crash_base = memparse(cur, &tmp);
1238 if (cur == tmp) {
1239 pr_warning("Memory value expected "
1240 "after '@'\n");
1241 return -EINVAL;
1242 }
1243 }
1244 }
1245
1246 return 0;
1247}
1248
1249/*
1250 * That function parses "simple" (old) crashkernel command lines like
1251 *
1252 * crashkernel=size[@offset]
1253 *
1254 * It returns 0 on success and -EINVAL on failure.
1255 */
1256static int __init parse_crashkernel_simple(char *cmdline,
1257 unsigned long long *crash_size,
1258 unsigned long long *crash_base)
1259{
1260 char *cur = cmdline;
1261
1262 *crash_size = memparse(cmdline, &cur);
1263 if (cmdline == cur) {
1264 pr_warning("crashkernel: memory value expected\n");
1265 return -EINVAL;
1266 }
1267
1268 if (*cur == '@')
1269 *crash_base = memparse(cur+1, &cur);
1270
1271 return 0;
1272}
1273
1274/*
1275 * That function is the entry point for command line parsing and should be
1276 * called from the arch-specific code.
1277 */
1278int __init parse_crashkernel(char *cmdline,
1279 unsigned long long system_ram,
1280 unsigned long long *crash_size,
1281 unsigned long long *crash_base)
1282{
1283 char *p = cmdline, *ck_cmdline = NULL;
1284 char *first_colon, *first_space;
1285
1286 BUG_ON(!crash_size || !crash_base);
1287 *crash_size = 0;
1288 *crash_base = 0;
1289
1290 /* find crashkernel and use the last one if there are more */
1291 p = strstr(p, "crashkernel=");
1292 while (p) {
1293 ck_cmdline = p;
1294 p = strstr(p+1, "crashkernel=");
1295 }
1296
1297 if (!ck_cmdline)
1298 return -EINVAL;
1299
1300 ck_cmdline += 12; /* strlen("crashkernel=") */
1301
1302 /*
1303 * if the commandline contains a ':', then that's the extended
1304 * syntax -- if not, it must be the classic syntax
1305 */
1306 first_colon = strchr(ck_cmdline, ':');
1307 first_space = strchr(ck_cmdline, ' ');
1308 if (first_colon && (!first_space || first_colon < first_space))
1309 return parse_crashkernel_mem(ck_cmdline, system_ram,
1310 crash_size, crash_base);
1311 else
1312 return parse_crashkernel_simple(ck_cmdline, crash_size,
1313 crash_base);
1314
1315 return 0;
1316}
1317
1318
1319
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001320void crash_save_vmcoreinfo(void)
1321{
1322 u32 *buf;
1323
1324 if (!vmcoreinfo_size)
1325 return;
1326
Ken'ichi Ohmichid7682812007-10-16 23:27:28 -07001327 vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001328
1329 buf = (u32 *)vmcoreinfo_note;
1330
1331 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1332 vmcoreinfo_size);
1333
1334 final_note(buf);
1335}
1336
1337void vmcoreinfo_append_str(const char *fmt, ...)
1338{
1339 va_list args;
1340 char buf[0x50];
1341 int r;
1342
1343 va_start(args, fmt);
1344 r = vsnprintf(buf, sizeof(buf), fmt, args);
1345 va_end(args);
1346
1347 if (r + vmcoreinfo_size > vmcoreinfo_max_size)
1348 r = vmcoreinfo_max_size - vmcoreinfo_size;
1349
1350 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1351
1352 vmcoreinfo_size += r;
1353}
1354
1355/*
1356 * provide an empty default implementation here -- architecture
1357 * code may override this
1358 */
1359void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
1360{}
1361
1362unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1363{
1364 return __pa((unsigned long)(char *)&vmcoreinfo_note);
1365}
1366
1367static int __init crash_save_vmcoreinfo_init(void)
1368{
Ken'ichi Ohmichibba1f602008-02-07 00:15:22 -08001369 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1370 VMCOREINFO_PAGESIZE(PAGE_SIZE);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001371
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001372 VMCOREINFO_SYMBOL(init_uts_ns);
1373 VMCOREINFO_SYMBOL(node_online_map);
1374 VMCOREINFO_SYMBOL(swapper_pg_dir);
1375 VMCOREINFO_SYMBOL(_stext);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001376
1377#ifndef CONFIG_NEED_MULTIPLE_NODES
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001378 VMCOREINFO_SYMBOL(mem_map);
1379 VMCOREINFO_SYMBOL(contig_page_data);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001380#endif
1381#ifdef CONFIG_SPARSEMEM
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001382 VMCOREINFO_SYMBOL(mem_section);
1383 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
Ken'ichi Ohmichic76f8602008-02-07 00:15:20 -08001384 VMCOREINFO_STRUCT_SIZE(mem_section);
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001385 VMCOREINFO_OFFSET(mem_section, section_mem_map);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001386#endif
Ken'ichi Ohmichic76f8602008-02-07 00:15:20 -08001387 VMCOREINFO_STRUCT_SIZE(page);
1388 VMCOREINFO_STRUCT_SIZE(pglist_data);
1389 VMCOREINFO_STRUCT_SIZE(zone);
1390 VMCOREINFO_STRUCT_SIZE(free_area);
1391 VMCOREINFO_STRUCT_SIZE(list_head);
1392 VMCOREINFO_SIZE(nodemask_t);
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001393 VMCOREINFO_OFFSET(page, flags);
1394 VMCOREINFO_OFFSET(page, _count);
1395 VMCOREINFO_OFFSET(page, mapping);
1396 VMCOREINFO_OFFSET(page, lru);
1397 VMCOREINFO_OFFSET(pglist_data, node_zones);
1398 VMCOREINFO_OFFSET(pglist_data, nr_zones);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001399#ifdef CONFIG_FLAT_NODE_MEM_MAP
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001400 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001401#endif
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001402 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1403 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1404 VMCOREINFO_OFFSET(pglist_data, node_id);
1405 VMCOREINFO_OFFSET(zone, free_area);
1406 VMCOREINFO_OFFSET(zone, vm_stat);
1407 VMCOREINFO_OFFSET(zone, spanned_pages);
1408 VMCOREINFO_OFFSET(free_area, free_list);
1409 VMCOREINFO_OFFSET(list_head, next);
1410 VMCOREINFO_OFFSET(list_head, prev);
1411 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
Ken'ichi Ohmichi83a08e72008-01-08 15:33:05 -08001412 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
Ken'ichi Ohmichibcbba6c2007-10-16 23:27:30 -07001413 VMCOREINFO_NUMBER(NR_FREE_PAGES);
Ken'ichi Ohmichi122c7a52008-04-28 02:13:04 -07001414 VMCOREINFO_NUMBER(PG_lru);
1415 VMCOREINFO_NUMBER(PG_private);
1416 VMCOREINFO_NUMBER(PG_swapcache);
Ken'ichi Ohmichifd59d232007-10-16 23:27:27 -07001417
1418 arch_crash_save_vmcoreinfo();
1419
1420 return 0;
1421}
1422
1423module_init(crash_save_vmcoreinfo_init)
Huang Ying3ab83522008-07-25 19:45:07 -07001424
1425/**
1426 * kernel_kexec - reboot the system
1427 *
1428 * Move into place and start executing a preloaded standalone
1429 * executable. If nothing was preloaded return an error.
1430 */
1431int kernel_kexec(void)
1432{
1433 int error = 0;
1434
1435 if (xchg(&kexec_lock, 1))
1436 return -EBUSY;
1437 if (!kexec_image) {
1438 error = -EINVAL;
1439 goto Unlock;
1440 }
1441
1442 if (kexec_image->preserve_context) {
1443#ifdef CONFIG_KEXEC_JUMP
1444 local_irq_disable();
1445 save_processor_state();
1446#endif
1447 } else {
1448 blocking_notifier_call_chain(&reboot_notifier_list,
1449 SYS_RESTART, NULL);
1450 system_state = SYSTEM_RESTART;
1451 device_shutdown();
1452 sysdev_shutdown();
1453 printk(KERN_EMERG "Starting new kernel\n");
1454 machine_shutdown();
1455 }
1456
1457 machine_kexec(kexec_image);
1458
1459 if (kexec_image->preserve_context) {
1460#ifdef CONFIG_KEXEC_JUMP
1461 restore_processor_state();
1462 local_irq_enable();
1463#endif
1464 }
1465
1466 Unlock:
1467 xchg(&kexec_lock, 0);
1468
1469 return error;
1470}