blob: 43622c652fafc27b0665de5bfeac57b03bf2c283 [file] [log] [blame]
Andi Kleen6a460792009-09-16 11:50:15 +02001/*
2 * Copyright (C) 2008, 2009 Intel Corporation
3 * Authors: Andi Kleen, Fengguang Wu
4 *
5 * This software may be redistributed and/or modified under the terms of
6 * the GNU General Public License ("GPL") version 2 only as published by the
7 * Free Software Foundation.
8 *
9 * High level machine check handler. Handles pages reported by the
Andi Kleen1c80b992010-09-27 23:09:51 +020010 * hardware as being corrupted usually due to a multi-bit ECC memory or cache
Andi Kleen6a460792009-09-16 11:50:15 +020011 * failure.
Andi Kleen1c80b992010-09-27 23:09:51 +020012 *
13 * In addition there is a "soft offline" entry point that allows stop using
14 * not-yet-corrupted-by-suspicious pages without killing anything.
Andi Kleen6a460792009-09-16 11:50:15 +020015 *
16 * Handles page cache pages in various states. The tricky part
Andi Kleen1c80b992010-09-27 23:09:51 +020017 * here is that we can access any page asynchronously in respect to
18 * other VM users, because memory failures could happen anytime and
19 * anywhere. This could violate some of their assumptions. This is why
20 * this code has to be extremely careful. Generally it tries to use
21 * normal locking rules, as in get the standard locks, even if that means
22 * the error handling takes potentially a long time.
Andi Kleene0de78d2015-06-24 16:56:02 -070023 *
24 * It can be very tempting to add handling for obscure cases here.
25 * In general any code for handling new cases should only be added iff:
26 * - You know how to test it.
27 * - You have a test that can be added to mce-test
28 * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/
29 * - The case actually shows up as a frequent (top 10) page state in
30 * tools/vm/page-types when running a real workload.
Andi Kleen1c80b992010-09-27 23:09:51 +020031 *
32 * There are several operations here with exponential complexity because
33 * of unsuitable VM data structures. For example the operation to map back
34 * from RMAP chains to processes has to walk the complete process list and
35 * has non linear complexity with the number. But since memory corruptions
36 * are rare we hope to get away with this. This avoids impacting the core
37 * VM.
Andi Kleen6a460792009-09-16 11:50:15 +020038 */
Andi Kleen6a460792009-09-16 11:50:15 +020039#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/page-flags.h>
Wu Fengguang478c5ff2009-12-16 12:19:59 +010042#include <linux/kernel-page-flags.h>
Andi Kleen6a460792009-09-16 11:50:15 +020043#include <linux/sched.h>
Hugh Dickins01e00f82009-10-13 15:02:11 +010044#include <linux/ksm.h>
Andi Kleen6a460792009-09-16 11:50:15 +020045#include <linux/rmap.h>
Paul Gortmakerb9e15ba2011-05-26 16:00:52 -040046#include <linux/export.h>
Andi Kleen6a460792009-09-16 11:50:15 +020047#include <linux/pagemap.h>
48#include <linux/swap.h>
49#include <linux/backing-dev.h>
Andi Kleenfacb6012009-12-16 12:20:00 +010050#include <linux/migrate.h>
51#include <linux/page-isolation.h>
52#include <linux/suspend.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090053#include <linux/slab.h>
Huang Yingbf998152010-05-31 14:28:19 +080054#include <linux/swapops.h>
Naoya Horiguchi7af446a2010-05-28 09:29:17 +090055#include <linux/hugetlb.h>
KOSAKI Motohiro20d6c962010-12-02 14:31:19 -080056#include <linux/memory_hotplug.h>
Minchan Kim5db8a732011-06-15 15:08:48 -070057#include <linux/mm_inline.h>
Huang Yingea8f5fb2011-07-13 13:14:27 +080058#include <linux/kfifo.h>
Naoya Horiguchia5f65102015-11-05 18:47:26 -080059#include <linux/ratelimit.h>
Andi Kleen6a460792009-09-16 11:50:15 +020060#include "internal.h"
Xie XiuQi97f0b132015-06-24 16:57:36 -070061#include "ras/ras_event.h"
Andi Kleen6a460792009-09-16 11:50:15 +020062
63int sysctl_memory_failure_early_kill __read_mostly = 0;
64
65int sysctl_memory_failure_recovery __read_mostly = 1;
66
Xishi Qiu293c07e2013-02-22 16:34:02 -080067atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
Andi Kleen6a460792009-09-16 11:50:15 +020068
Andi Kleen27df5062009-12-21 19:56:42 +010069#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70
Haicheng Li1bfe5fe2009-12-16 12:19:59 +010071u32 hwpoison_filter_enable = 0;
Wu Fengguang7c116f22009-12-16 12:19:59 +010072u32 hwpoison_filter_dev_major = ~0U;
73u32 hwpoison_filter_dev_minor = ~0U;
Wu Fengguang478c5ff2009-12-16 12:19:59 +010074u64 hwpoison_filter_flags_mask;
75u64 hwpoison_filter_flags_value;
Haicheng Li1bfe5fe2009-12-16 12:19:59 +010076EXPORT_SYMBOL_GPL(hwpoison_filter_enable);
Wu Fengguang7c116f22009-12-16 12:19:59 +010077EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major);
78EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor);
Wu Fengguang478c5ff2009-12-16 12:19:59 +010079EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask);
80EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value);
Wu Fengguang7c116f22009-12-16 12:19:59 +010081
82static int hwpoison_filter_dev(struct page *p)
83{
84 struct address_space *mapping;
85 dev_t dev;
86
87 if (hwpoison_filter_dev_major == ~0U &&
88 hwpoison_filter_dev_minor == ~0U)
89 return 0;
90
91 /*
Andi Kleen1c80b992010-09-27 23:09:51 +020092 * page_mapping() does not accept slab pages.
Wu Fengguang7c116f22009-12-16 12:19:59 +010093 */
94 if (PageSlab(p))
95 return -EINVAL;
96
97 mapping = page_mapping(p);
98 if (mapping == NULL || mapping->host == NULL)
99 return -EINVAL;
100
101 dev = mapping->host->i_sb->s_dev;
102 if (hwpoison_filter_dev_major != ~0U &&
103 hwpoison_filter_dev_major != MAJOR(dev))
104 return -EINVAL;
105 if (hwpoison_filter_dev_minor != ~0U &&
106 hwpoison_filter_dev_minor != MINOR(dev))
107 return -EINVAL;
108
109 return 0;
110}
111
Wu Fengguang478c5ff2009-12-16 12:19:59 +0100112static int hwpoison_filter_flags(struct page *p)
113{
114 if (!hwpoison_filter_flags_mask)
115 return 0;
116
117 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) ==
118 hwpoison_filter_flags_value)
119 return 0;
120 else
121 return -EINVAL;
122}
123
Andi Kleen4fd466e2009-12-16 12:19:59 +0100124/*
125 * This allows stress tests to limit test scope to a collection of tasks
126 * by putting them under some memcg. This prevents killing unrelated/important
127 * processes such as /sbin/init. Note that the target task may share clean
128 * pages with init (eg. libc text), which is harmless. If the target task
129 * share _dirty_ pages with another task B, the test scheme must make sure B
130 * is also included in the memcg. At last, due to race conditions this filter
131 * can only guarantee that the page either belongs to the memcg tasks, or is
132 * a freed page.
133 */
Vladimir Davydov94a59fb2015-09-09 15:35:31 -0700134#ifdef CONFIG_MEMCG
Andi Kleen4fd466e2009-12-16 12:19:59 +0100135u64 hwpoison_filter_memcg;
136EXPORT_SYMBOL_GPL(hwpoison_filter_memcg);
137static int hwpoison_filter_task(struct page *p)
138{
Andi Kleen4fd466e2009-12-16 12:19:59 +0100139 if (!hwpoison_filter_memcg)
140 return 0;
141
Vladimir Davydov94a59fb2015-09-09 15:35:31 -0700142 if (page_cgroup_ino(p) != hwpoison_filter_memcg)
Andi Kleen4fd466e2009-12-16 12:19:59 +0100143 return -EINVAL;
144
145 return 0;
146}
147#else
148static int hwpoison_filter_task(struct page *p) { return 0; }
149#endif
150
Wu Fengguang7c116f22009-12-16 12:19:59 +0100151int hwpoison_filter(struct page *p)
152{
Haicheng Li1bfe5fe2009-12-16 12:19:59 +0100153 if (!hwpoison_filter_enable)
154 return 0;
155
Wu Fengguang7c116f22009-12-16 12:19:59 +0100156 if (hwpoison_filter_dev(p))
157 return -EINVAL;
158
Wu Fengguang478c5ff2009-12-16 12:19:59 +0100159 if (hwpoison_filter_flags(p))
160 return -EINVAL;
161
Andi Kleen4fd466e2009-12-16 12:19:59 +0100162 if (hwpoison_filter_task(p))
163 return -EINVAL;
164
Wu Fengguang7c116f22009-12-16 12:19:59 +0100165 return 0;
166}
Andi Kleen27df5062009-12-21 19:56:42 +0100167#else
168int hwpoison_filter(struct page *p)
169{
170 return 0;
171}
172#endif
173
Wu Fengguang7c116f22009-12-16 12:19:59 +0100174EXPORT_SYMBOL_GPL(hwpoison_filter);
175
Andi Kleen6a460792009-09-16 11:50:15 +0200176/*
Tony Luck7329bbe2011-12-13 09:27:58 -0800177 * Send all the processes who have the page mapped a signal.
178 * ``action optional'' if they are not immediately affected by the error
179 * ``action required'' if error happened in current execution context
Andi Kleen6a460792009-09-16 11:50:15 +0200180 */
Tony Luck7329bbe2011-12-13 09:27:58 -0800181static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
182 unsigned long pfn, struct page *page, int flags)
Andi Kleen6a460792009-09-16 11:50:15 +0200183{
184 struct siginfo si;
185 int ret;
186
Chen Yucong495367c02016-05-20 16:57:32 -0700187 pr_err("Memory failure: %#lx: Killing %s:%d due to hardware memory corruption\n",
188 pfn, t->comm, t->pid);
Andi Kleen6a460792009-09-16 11:50:15 +0200189 si.si_signo = SIGBUS;
190 si.si_errno = 0;
Andi Kleen6a460792009-09-16 11:50:15 +0200191 si.si_addr = (void *)addr;
192#ifdef __ARCH_SI_TRAPNO
193 si.si_trapno = trapno;
194#endif
Wanpeng Lif9121152013-09-11 14:22:52 -0700195 si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
Tony Luck7329bbe2011-12-13 09:27:58 -0800196
Tony Lucka70ffca2014-06-04 16:10:59 -0700197 if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
Tony Luck7329bbe2011-12-13 09:27:58 -0800198 si.si_code = BUS_MCEERR_AR;
Tony Lucka70ffca2014-06-04 16:10:59 -0700199 ret = force_sig_info(SIGBUS, &si, current);
Tony Luck7329bbe2011-12-13 09:27:58 -0800200 } else {
201 /*
202 * Don't use force here, it's convenient if the signal
203 * can be temporarily blocked.
204 * This could cause a loop when the user sets SIGBUS
205 * to SIG_IGN, but hopefully no one will do that?
206 */
207 si.si_code = BUS_MCEERR_AO;
208 ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */
209 }
Andi Kleen6a460792009-09-16 11:50:15 +0200210 if (ret < 0)
Chen Yucong495367c02016-05-20 16:57:32 -0700211 pr_info("Memory failure: Error sending signal to %s:%d: %d\n",
Joe Perches11705322016-03-17 14:19:50 -0700212 t->comm, t->pid, ret);
Andi Kleen6a460792009-09-16 11:50:15 +0200213 return ret;
214}
215
216/*
Andi Kleen588f9ce2009-12-16 12:19:57 +0100217 * When a unknown page type is encountered drain as many buffers as possible
218 * in the hope to turn the page into a LRU or free page, which we can handle.
219 */
Andi Kleenfacb6012009-12-16 12:20:00 +0100220void shake_page(struct page *p, int access)
Andi Kleen588f9ce2009-12-16 12:19:57 +0100221{
222 if (!PageSlab(p)) {
223 lru_add_drain_all();
224 if (PageLRU(p))
225 return;
Vlastimil Babkac0554322014-12-10 15:43:10 -0800226 drain_all_pages(page_zone(p));
Andi Kleen588f9ce2009-12-16 12:19:57 +0100227 if (PageLRU(p) || is_free_buddy_page(p))
228 return;
229 }
Andi Kleenfacb6012009-12-16 12:20:00 +0100230
Andi Kleen588f9ce2009-12-16 12:19:57 +0100231 /*
Johannes Weiner6b4f7792014-12-12 16:56:13 -0800232 * Only call shrink_node_slabs here (which would also shrink
233 * other caches) if access is not potentially fatal.
Andi Kleen588f9ce2009-12-16 12:19:57 +0100234 */
Vladimir Davydovcb731d62015-02-12 14:58:54 -0800235 if (access)
236 drop_slab_node(page_to_nid(p));
Andi Kleen588f9ce2009-12-16 12:19:57 +0100237}
238EXPORT_SYMBOL_GPL(shake_page);
239
240/*
Andi Kleen6a460792009-09-16 11:50:15 +0200241 * Kill all processes that have a poisoned page mapped and then isolate
242 * the page.
243 *
244 * General strategy:
245 * Find all processes having the page mapped and kill them.
246 * But we keep a page reference around so that the page is not
247 * actually freed yet.
248 * Then stash the page away
249 *
250 * There's no convenient way to get back to mapped processes
251 * from the VMAs. So do a brute-force search over all
252 * running processes.
253 *
254 * Remember that machine checks are not common (or rather
255 * if they are common you have other problems), so this shouldn't
256 * be a performance issue.
257 *
258 * Also there are some races possible while we get from the
259 * error detection to actually handle it.
260 */
261
262struct to_kill {
263 struct list_head nd;
264 struct task_struct *tsk;
265 unsigned long addr;
Andi Kleen9033ae12010-09-27 23:36:05 +0200266 char addr_valid;
Andi Kleen6a460792009-09-16 11:50:15 +0200267};
268
269/*
270 * Failure handling: if we can't find or can't kill a process there's
271 * not much we can do. We just print a message and ignore otherwise.
272 */
273
274/*
275 * Schedule a process for later kill.
276 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM.
277 * TBD would GFP_NOIO be enough?
278 */
279static void add_to_kill(struct task_struct *tsk, struct page *p,
280 struct vm_area_struct *vma,
281 struct list_head *to_kill,
282 struct to_kill **tkc)
283{
284 struct to_kill *tk;
285
286 if (*tkc) {
287 tk = *tkc;
288 *tkc = NULL;
289 } else {
290 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
291 if (!tk) {
Chen Yucong495367c02016-05-20 16:57:32 -0700292 pr_err("Memory failure: Out of memory while machine check handling\n");
Andi Kleen6a460792009-09-16 11:50:15 +0200293 return;
294 }
295 }
296 tk->addr = page_address_in_vma(p, vma);
297 tk->addr_valid = 1;
298
299 /*
300 * In theory we don't have to kill when the page was
301 * munmaped. But it could be also a mremap. Since that's
302 * likely very rare kill anyways just out of paranoia, but use
303 * a SIGKILL because the error is not contained anymore.
304 */
305 if (tk->addr == -EFAULT) {
Chen Yucong495367c02016-05-20 16:57:32 -0700306 pr_info("Memory failure: Unable to find user space address %lx in %s\n",
Andi Kleen6a460792009-09-16 11:50:15 +0200307 page_to_pfn(p), tsk->comm);
308 tk->addr_valid = 0;
309 }
310 get_task_struct(tsk);
311 tk->tsk = tsk;
312 list_add_tail(&tk->nd, to_kill);
313}
314
315/*
316 * Kill the processes that have been collected earlier.
317 *
318 * Only do anything when DOIT is set, otherwise just free the list
319 * (this is used for clean pages which do not need killing)
320 * Also when FAIL is set do a force kill because something went
321 * wrong earlier.
322 */
Tony Luck6751ed62012-07-11 10:20:47 -0700323static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
Tony Luck7329bbe2011-12-13 09:27:58 -0800324 int fail, struct page *page, unsigned long pfn,
325 int flags)
Andi Kleen6a460792009-09-16 11:50:15 +0200326{
327 struct to_kill *tk, *next;
328
329 list_for_each_entry_safe (tk, next, to_kill, nd) {
Tony Luck6751ed62012-07-11 10:20:47 -0700330 if (forcekill) {
Andi Kleen6a460792009-09-16 11:50:15 +0200331 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200332 * In case something went wrong with munmapping
Andi Kleen6a460792009-09-16 11:50:15 +0200333 * make sure the process doesn't catch the
334 * signal and then access the memory. Just kill it.
Andi Kleen6a460792009-09-16 11:50:15 +0200335 */
336 if (fail || tk->addr_valid == 0) {
Chen Yucong495367c02016-05-20 16:57:32 -0700337 pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
Joe Perches11705322016-03-17 14:19:50 -0700338 pfn, tk->tsk->comm, tk->tsk->pid);
Andi Kleen6a460792009-09-16 11:50:15 +0200339 force_sig(SIGKILL, tk->tsk);
340 }
341
342 /*
343 * In theory the process could have mapped
344 * something else on the address in-between. We could
345 * check for that, but we need to tell the
346 * process anyways.
347 */
Tony Luck7329bbe2011-12-13 09:27:58 -0800348 else if (kill_proc(tk->tsk, tk->addr, trapno,
349 pfn, page, flags) < 0)
Chen Yucong495367c02016-05-20 16:57:32 -0700350 pr_err("Memory failure: %#lx: Cannot send advisory machine check signal to %s:%d\n",
Joe Perches11705322016-03-17 14:19:50 -0700351 pfn, tk->tsk->comm, tk->tsk->pid);
Andi Kleen6a460792009-09-16 11:50:15 +0200352 }
353 put_task_struct(tk->tsk);
354 kfree(tk);
355 }
356}
357
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700358/*
359 * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
360 * on behalf of the thread group. Return task_struct of the (first found)
361 * dedicated thread if found, and return NULL otherwise.
362 *
363 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
364 * have to call rcu_read_lock/unlock() in this function.
365 */
366static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
Andi Kleen6a460792009-09-16 11:50:15 +0200367{
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700368 struct task_struct *t;
369
370 for_each_thread(tsk, t)
371 if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
372 return t;
373 return NULL;
374}
375
376/*
377 * Determine whether a given process is "early kill" process which expects
378 * to be signaled when some page under the process is hwpoisoned.
379 * Return task_struct of the dedicated thread (main thread unless explicitly
380 * specified) if the process is "early kill," and otherwise returns NULL.
381 */
382static struct task_struct *task_early_kill(struct task_struct *tsk,
383 int force_early)
384{
385 struct task_struct *t;
Andi Kleen6a460792009-09-16 11:50:15 +0200386 if (!tsk->mm)
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700387 return NULL;
Tony Luck74614de2014-06-04 16:11:01 -0700388 if (force_early)
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700389 return tsk;
390 t = find_early_kill_thread(tsk);
391 if (t)
392 return t;
393 if (sysctl_memory_failure_early_kill)
394 return tsk;
395 return NULL;
Andi Kleen6a460792009-09-16 11:50:15 +0200396}
397
398/*
399 * Collect processes when the error hit an anonymous page.
400 */
401static void collect_procs_anon(struct page *page, struct list_head *to_kill,
Tony Luck74614de2014-06-04 16:11:01 -0700402 struct to_kill **tkc, int force_early)
Andi Kleen6a460792009-09-16 11:50:15 +0200403{
404 struct vm_area_struct *vma;
405 struct task_struct *tsk;
406 struct anon_vma *av;
Michel Lespinassebf181b92012-10-08 16:31:39 -0700407 pgoff_t pgoff;
Andi Kleen6a460792009-09-16 11:50:15 +0200408
Ingo Molnar4fc3f1d2012-12-02 19:56:50 +0000409 av = page_lock_anon_vma_read(page);
Andi Kleen6a460792009-09-16 11:50:15 +0200410 if (av == NULL) /* Not actually mapped anymore */
Peter Zijlstra9b679322011-06-27 16:18:09 -0700411 return;
412
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700413 pgoff = page_to_pgoff(page);
Peter Zijlstra9b679322011-06-27 16:18:09 -0700414 read_lock(&tasklist_lock);
Andi Kleen6a460792009-09-16 11:50:15 +0200415 for_each_process (tsk) {
Rik van Riel5beb4932010-03-05 13:42:07 -0800416 struct anon_vma_chain *vmac;
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700417 struct task_struct *t = task_early_kill(tsk, force_early);
Rik van Riel5beb4932010-03-05 13:42:07 -0800418
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700419 if (!t)
Andi Kleen6a460792009-09-16 11:50:15 +0200420 continue;
Michel Lespinassebf181b92012-10-08 16:31:39 -0700421 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
422 pgoff, pgoff) {
Rik van Riel5beb4932010-03-05 13:42:07 -0800423 vma = vmac->vma;
Andi Kleen6a460792009-09-16 11:50:15 +0200424 if (!page_mapped_in_vma(page, vma))
425 continue;
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700426 if (vma->vm_mm == t->mm)
427 add_to_kill(t, page, vma, to_kill, tkc);
Andi Kleen6a460792009-09-16 11:50:15 +0200428 }
429 }
Andi Kleen6a460792009-09-16 11:50:15 +0200430 read_unlock(&tasklist_lock);
Ingo Molnar4fc3f1d2012-12-02 19:56:50 +0000431 page_unlock_anon_vma_read(av);
Andi Kleen6a460792009-09-16 11:50:15 +0200432}
433
434/*
435 * Collect processes when the error hit a file mapped page.
436 */
437static void collect_procs_file(struct page *page, struct list_head *to_kill,
Tony Luck74614de2014-06-04 16:11:01 -0700438 struct to_kill **tkc, int force_early)
Andi Kleen6a460792009-09-16 11:50:15 +0200439{
440 struct vm_area_struct *vma;
441 struct task_struct *tsk;
Andi Kleen6a460792009-09-16 11:50:15 +0200442 struct address_space *mapping = page->mapping;
443
Davidlohr Buesod28eb9c2014-12-12 16:54:36 -0800444 i_mmap_lock_read(mapping);
Peter Zijlstra9b679322011-06-27 16:18:09 -0700445 read_lock(&tasklist_lock);
Andi Kleen6a460792009-09-16 11:50:15 +0200446 for_each_process(tsk) {
Naoya Horiguchia0f7a752014-07-23 14:00:01 -0700447 pgoff_t pgoff = page_to_pgoff(page);
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700448 struct task_struct *t = task_early_kill(tsk, force_early);
Andi Kleen6a460792009-09-16 11:50:15 +0200449
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700450 if (!t)
Andi Kleen6a460792009-09-16 11:50:15 +0200451 continue;
Michel Lespinasse6b2dbba2012-10-08 16:31:25 -0700452 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
Andi Kleen6a460792009-09-16 11:50:15 +0200453 pgoff) {
454 /*
455 * Send early kill signal to tasks where a vma covers
456 * the page but the corrupted page is not necessarily
457 * mapped it in its pte.
458 * Assume applications who requested early kill want
459 * to be informed of all such data corruptions.
460 */
Naoya Horiguchi3ba08122014-06-04 16:11:02 -0700461 if (vma->vm_mm == t->mm)
462 add_to_kill(t, page, vma, to_kill, tkc);
Andi Kleen6a460792009-09-16 11:50:15 +0200463 }
464 }
Andi Kleen6a460792009-09-16 11:50:15 +0200465 read_unlock(&tasklist_lock);
Davidlohr Buesod28eb9c2014-12-12 16:54:36 -0800466 i_mmap_unlock_read(mapping);
Andi Kleen6a460792009-09-16 11:50:15 +0200467}
468
469/*
470 * Collect the processes who have the corrupted page mapped to kill.
471 * This is done in two steps for locking reasons.
472 * First preallocate one tokill structure outside the spin locks,
473 * so that we can kill at least one process reasonably reliable.
474 */
Tony Luck74614de2014-06-04 16:11:01 -0700475static void collect_procs(struct page *page, struct list_head *tokill,
476 int force_early)
Andi Kleen6a460792009-09-16 11:50:15 +0200477{
478 struct to_kill *tk;
479
480 if (!page->mapping)
481 return;
482
483 tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
484 if (!tk)
485 return;
486 if (PageAnon(page))
Tony Luck74614de2014-06-04 16:11:01 -0700487 collect_procs_anon(page, tokill, &tk, force_early);
Andi Kleen6a460792009-09-16 11:50:15 +0200488 else
Tony Luck74614de2014-06-04 16:11:01 -0700489 collect_procs_file(page, tokill, &tk, force_early);
Andi Kleen6a460792009-09-16 11:50:15 +0200490 kfree(tk);
491}
492
Andi Kleen6a460792009-09-16 11:50:15 +0200493static const char *action_name[] = {
Xie XiuQicc637b12015-06-24 16:57:30 -0700494 [MF_IGNORED] = "Ignored",
495 [MF_FAILED] = "Failed",
496 [MF_DELAYED] = "Delayed",
497 [MF_RECOVERED] = "Recovered",
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700498};
499
500static const char * const action_page_types[] = {
Xie XiuQicc637b12015-06-24 16:57:30 -0700501 [MF_MSG_KERNEL] = "reserved kernel page",
502 [MF_MSG_KERNEL_HIGH_ORDER] = "high-order kernel page",
503 [MF_MSG_SLAB] = "kernel slab page",
504 [MF_MSG_DIFFERENT_COMPOUND] = "different compound page after locking",
505 [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned",
506 [MF_MSG_HUGE] = "huge page",
507 [MF_MSG_FREE_HUGE] = "free huge page",
508 [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
509 [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
510 [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
511 [MF_MSG_DIRTY_MLOCKED_LRU] = "dirty mlocked LRU page",
512 [MF_MSG_CLEAN_MLOCKED_LRU] = "clean mlocked LRU page",
513 [MF_MSG_DIRTY_UNEVICTABLE_LRU] = "dirty unevictable LRU page",
514 [MF_MSG_CLEAN_UNEVICTABLE_LRU] = "clean unevictable LRU page",
515 [MF_MSG_DIRTY_LRU] = "dirty LRU page",
516 [MF_MSG_CLEAN_LRU] = "clean LRU page",
517 [MF_MSG_TRUNCATED_LRU] = "already truncated LRU page",
518 [MF_MSG_BUDDY] = "free buddy page",
519 [MF_MSG_BUDDY_2ND] = "free buddy page (2nd try)",
520 [MF_MSG_UNKNOWN] = "unknown page",
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700521};
522
Andi Kleen6a460792009-09-16 11:50:15 +0200523/*
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100524 * XXX: It is possible that a page is isolated from LRU cache,
525 * and then kept in swap cache or failed to remove from page cache.
526 * The page count will stop it from being freed by unpoison.
527 * Stress tests should be aware of this memory leak problem.
528 */
529static int delete_from_lru_cache(struct page *p)
530{
531 if (!isolate_lru_page(p)) {
532 /*
533 * Clear sensible page flags, so that the buddy system won't
534 * complain when the page is unpoison-and-freed.
535 */
536 ClearPageActive(p);
537 ClearPageUnevictable(p);
Michal Hockobc0e2172017-05-12 15:46:26 -0700538
539 /*
540 * Poisoned page might never drop its ref count to 0 so we have
541 * to uncharge it manually from its memcg.
542 */
543 mem_cgroup_uncharge(p);
544
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100545 /*
546 * drop the page count elevated by isolate_lru_page()
547 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300548 put_page(p);
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100549 return 0;
550 }
551 return -EIO;
552}
553
554/*
Andi Kleen6a460792009-09-16 11:50:15 +0200555 * Error hit kernel page.
556 * Do nothing, try to be lucky and not touch this instead. For a few cases we
557 * could be more sophisticated.
558 */
559static int me_kernel(struct page *p, unsigned long pfn)
560{
Xie XiuQicc637b12015-06-24 16:57:30 -0700561 return MF_IGNORED;
Andi Kleen6a460792009-09-16 11:50:15 +0200562}
563
564/*
565 * Page in unknown state. Do nothing.
566 */
567static int me_unknown(struct page *p, unsigned long pfn)
568{
Chen Yucong495367c02016-05-20 16:57:32 -0700569 pr_err("Memory failure: %#lx: Unknown page state\n", pfn);
Xie XiuQicc637b12015-06-24 16:57:30 -0700570 return MF_FAILED;
Andi Kleen6a460792009-09-16 11:50:15 +0200571}
572
573/*
Andi Kleen6a460792009-09-16 11:50:15 +0200574 * Clean (or cleaned) page cache page.
575 */
576static int me_pagecache_clean(struct page *p, unsigned long pfn)
577{
578 int err;
Xie XiuQicc637b12015-06-24 16:57:30 -0700579 int ret = MF_FAILED;
Andi Kleen6a460792009-09-16 11:50:15 +0200580 struct address_space *mapping;
581
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100582 delete_from_lru_cache(p);
583
Andi Kleen6a460792009-09-16 11:50:15 +0200584 /*
585 * For anonymous pages we're done the only reference left
586 * should be the one m_f() holds.
587 */
588 if (PageAnon(p))
Xie XiuQicc637b12015-06-24 16:57:30 -0700589 return MF_RECOVERED;
Andi Kleen6a460792009-09-16 11:50:15 +0200590
591 /*
592 * Now truncate the page in the page cache. This is really
593 * more like a "temporary hole punch"
594 * Don't do this for block devices when someone else
595 * has a reference, because it could be file system metadata
596 * and that's not safe to truncate.
597 */
598 mapping = page_mapping(p);
599 if (!mapping) {
600 /*
601 * Page has been teared down in the meanwhile
602 */
Xie XiuQicc637b12015-06-24 16:57:30 -0700603 return MF_FAILED;
Andi Kleen6a460792009-09-16 11:50:15 +0200604 }
605
606 /*
607 * Truncation is a bit tricky. Enable it per file system for now.
608 *
609 * Open: to take i_mutex or not for this? Right now we don't.
610 */
611 if (mapping->a_ops->error_remove_page) {
612 err = mapping->a_ops->error_remove_page(mapping, p);
613 if (err != 0) {
Chen Yucong495367c02016-05-20 16:57:32 -0700614 pr_info("Memory failure: %#lx: Failed to punch page: %d\n",
Joe Perches11705322016-03-17 14:19:50 -0700615 pfn, err);
Andi Kleen6a460792009-09-16 11:50:15 +0200616 } else if (page_has_private(p) &&
617 !try_to_release_page(p, GFP_NOIO)) {
Chen Yucong495367c02016-05-20 16:57:32 -0700618 pr_info("Memory failure: %#lx: failed to release buffers\n",
619 pfn);
Andi Kleen6a460792009-09-16 11:50:15 +0200620 } else {
Xie XiuQicc637b12015-06-24 16:57:30 -0700621 ret = MF_RECOVERED;
Andi Kleen6a460792009-09-16 11:50:15 +0200622 }
623 } else {
624 /*
625 * If the file system doesn't support it just invalidate
626 * This fails on dirty or anything with private pages
627 */
628 if (invalidate_inode_page(p))
Xie XiuQicc637b12015-06-24 16:57:30 -0700629 ret = MF_RECOVERED;
Andi Kleen6a460792009-09-16 11:50:15 +0200630 else
Chen Yucong495367c02016-05-20 16:57:32 -0700631 pr_info("Memory failure: %#lx: Failed to invalidate\n",
632 pfn);
Andi Kleen6a460792009-09-16 11:50:15 +0200633 }
634 return ret;
635}
636
637/*
Zhi Yong Wu549543d2014-01-21 15:49:08 -0800638 * Dirty pagecache page
Andi Kleen6a460792009-09-16 11:50:15 +0200639 * Issues: when the error hit a hole page the error is not properly
640 * propagated.
641 */
642static int me_pagecache_dirty(struct page *p, unsigned long pfn)
643{
644 struct address_space *mapping = page_mapping(p);
645
646 SetPageError(p);
647 /* TBD: print more information about the file. */
648 if (mapping) {
649 /*
650 * IO error will be reported by write(), fsync(), etc.
651 * who check the mapping.
652 * This way the application knows that something went
653 * wrong with its dirty file data.
654 *
655 * There's one open issue:
656 *
657 * The EIO will be only reported on the next IO
658 * operation and then cleared through the IO map.
659 * Normally Linux has two mechanisms to pass IO error
660 * first through the AS_EIO flag in the address space
661 * and then through the PageError flag in the page.
662 * Since we drop pages on memory failure handling the
663 * only mechanism open to use is through AS_AIO.
664 *
665 * This has the disadvantage that it gets cleared on
666 * the first operation that returns an error, while
667 * the PageError bit is more sticky and only cleared
668 * when the page is reread or dropped. If an
669 * application assumes it will always get error on
670 * fsync, but does other operations on the fd before
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300671 * and the page is dropped between then the error
Andi Kleen6a460792009-09-16 11:50:15 +0200672 * will not be properly reported.
673 *
674 * This can already happen even without hwpoisoned
675 * pages: first on metadata IO errors (which only
676 * report through AS_EIO) or when the page is dropped
677 * at the wrong time.
678 *
679 * So right now we assume that the application DTRT on
680 * the first EIO, but we're not worse than other parts
681 * of the kernel.
682 */
683 mapping_set_error(mapping, EIO);
684 }
685
686 return me_pagecache_clean(p, pfn);
687}
688
689/*
690 * Clean and dirty swap cache.
691 *
692 * Dirty swap cache page is tricky to handle. The page could live both in page
693 * cache and swap cache(ie. page is freshly swapped in). So it could be
694 * referenced concurrently by 2 types of PTEs:
695 * normal PTEs and swap PTEs. We try to handle them consistently by calling
696 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs,
697 * and then
698 * - clear dirty bit to prevent IO
699 * - remove from LRU
700 * - but keep in the swap cache, so that when we return to it on
701 * a later page fault, we know the application is accessing
702 * corrupted data and shall be killed (we installed simple
703 * interception code in do_swap_page to catch it).
704 *
705 * Clean swap cache pages can be directly isolated. A later page fault will
706 * bring in the known good data from disk.
707 */
708static int me_swapcache_dirty(struct page *p, unsigned long pfn)
709{
Andi Kleen6a460792009-09-16 11:50:15 +0200710 ClearPageDirty(p);
711 /* Trigger EIO in shmem: */
712 ClearPageUptodate(p);
713
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100714 if (!delete_from_lru_cache(p))
Xie XiuQicc637b12015-06-24 16:57:30 -0700715 return MF_DELAYED;
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100716 else
Xie XiuQicc637b12015-06-24 16:57:30 -0700717 return MF_FAILED;
Andi Kleen6a460792009-09-16 11:50:15 +0200718}
719
720static int me_swapcache_clean(struct page *p, unsigned long pfn)
721{
Andi Kleen6a460792009-09-16 11:50:15 +0200722 delete_from_swap_cache(p);
Wu Fengguange43c3af2009-09-29 13:16:20 +0800723
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100724 if (!delete_from_lru_cache(p))
Xie XiuQicc637b12015-06-24 16:57:30 -0700725 return MF_RECOVERED;
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +0100726 else
Xie XiuQicc637b12015-06-24 16:57:30 -0700727 return MF_FAILED;
Andi Kleen6a460792009-09-16 11:50:15 +0200728}
729
730/*
731 * Huge pages. Needs work.
732 * Issues:
Naoya Horiguchi93f70f92010-05-28 09:29:20 +0900733 * - Error on hugepage is contained in hugepage unit (not in raw page unit.)
734 * To narrow down kill region to one page, we need to break up pmd.
Andi Kleen6a460792009-09-16 11:50:15 +0200735 */
736static int me_huge_page(struct page *p, unsigned long pfn)
737{
Naoya Horiguchi6de2b1a2010-09-08 10:19:36 +0900738 int res = 0;
Naoya Horiguchi93f70f92010-05-28 09:29:20 +0900739 struct page *hpage = compound_head(p);
Naoya Horiguchi2491ffe2015-06-24 16:56:53 -0700740
741 if (!PageHuge(hpage))
742 return MF_DELAYED;
743
Naoya Horiguchi93f70f92010-05-28 09:29:20 +0900744 /*
745 * We can safely recover from error on free or reserved (i.e.
746 * not in-use) hugepage by dequeuing it from freelist.
747 * To check whether a hugepage is in-use or not, we can't use
748 * page->lru because it can be used in other hugepage operations,
749 * such as __unmap_hugepage_range() and gather_surplus_pages().
750 * So instead we use page_mapping() and PageAnon().
Naoya Horiguchi93f70f92010-05-28 09:29:20 +0900751 */
752 if (!(page_mapping(hpage) || PageAnon(hpage))) {
Naoya Horiguchi6de2b1a2010-09-08 10:19:36 +0900753 res = dequeue_hwpoisoned_huge_page(hpage);
754 if (!res)
Xie XiuQicc637b12015-06-24 16:57:30 -0700755 return MF_RECOVERED;
Naoya Horiguchi93f70f92010-05-28 09:29:20 +0900756 }
Xie XiuQicc637b12015-06-24 16:57:30 -0700757 return MF_DELAYED;
Andi Kleen6a460792009-09-16 11:50:15 +0200758}
759
760/*
761 * Various page states we can handle.
762 *
763 * A page state is defined by its current page->flags bits.
764 * The table matches them in order and calls the right handler.
765 *
766 * This is quite tricky because we can access page at any time
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300767 * in its live cycle, so all accesses have to be extremely careful.
Andi Kleen6a460792009-09-16 11:50:15 +0200768 *
769 * This is not complete. More states could be added.
770 * For any missing state don't attempt recovery.
771 */
772
773#define dirty (1UL << PG_dirty)
774#define sc (1UL << PG_swapcache)
775#define unevict (1UL << PG_unevictable)
776#define mlock (1UL << PG_mlocked)
777#define writeback (1UL << PG_writeback)
778#define lru (1UL << PG_lru)
779#define swapbacked (1UL << PG_swapbacked)
780#define head (1UL << PG_head)
Andi Kleen6a460792009-09-16 11:50:15 +0200781#define slab (1UL << PG_slab)
Andi Kleen6a460792009-09-16 11:50:15 +0200782#define reserved (1UL << PG_reserved)
783
784static struct page_state {
785 unsigned long mask;
786 unsigned long res;
Xie XiuQicc637b12015-06-24 16:57:30 -0700787 enum mf_action_page_type type;
Andi Kleen6a460792009-09-16 11:50:15 +0200788 int (*action)(struct page *p, unsigned long pfn);
789} error_states[] = {
Xie XiuQicc637b12015-06-24 16:57:30 -0700790 { reserved, reserved, MF_MSG_KERNEL, me_kernel },
Wu Fengguang95d01fc2009-12-16 12:19:58 +0100791 /*
792 * free pages are specially detected outside this table:
793 * PG_buddy pages only make a small fraction of all free pages.
794 */
Andi Kleen6a460792009-09-16 11:50:15 +0200795
796 /*
797 * Could in theory check if slab page is free or if we can drop
798 * currently unused objects without touching them. But just
799 * treat it as standard kernel for now.
800 */
Xie XiuQicc637b12015-06-24 16:57:30 -0700801 { slab, slab, MF_MSG_SLAB, me_kernel },
Andi Kleen6a460792009-09-16 11:50:15 +0200802
Xie XiuQicc637b12015-06-24 16:57:30 -0700803 { head, head, MF_MSG_HUGE, me_huge_page },
Andi Kleen6a460792009-09-16 11:50:15 +0200804
Xie XiuQicc637b12015-06-24 16:57:30 -0700805 { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
806 { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
Andi Kleen6a460792009-09-16 11:50:15 +0200807
Xie XiuQicc637b12015-06-24 16:57:30 -0700808 { mlock|dirty, mlock|dirty, MF_MSG_DIRTY_MLOCKED_LRU, me_pagecache_dirty },
809 { mlock|dirty, mlock, MF_MSG_CLEAN_MLOCKED_LRU, me_pagecache_clean },
Andi Kleen6a460792009-09-16 11:50:15 +0200810
Xie XiuQicc637b12015-06-24 16:57:30 -0700811 { unevict|dirty, unevict|dirty, MF_MSG_DIRTY_UNEVICTABLE_LRU, me_pagecache_dirty },
812 { unevict|dirty, unevict, MF_MSG_CLEAN_UNEVICTABLE_LRU, me_pagecache_clean },
Naoya Horiguchi5f4b9fc2013-02-22 16:35:53 -0800813
Xie XiuQicc637b12015-06-24 16:57:30 -0700814 { lru|dirty, lru|dirty, MF_MSG_DIRTY_LRU, me_pagecache_dirty },
815 { lru|dirty, lru, MF_MSG_CLEAN_LRU, me_pagecache_clean },
Andi Kleen6a460792009-09-16 11:50:15 +0200816
817 /*
818 * Catchall entry: must be at end.
819 */
Xie XiuQicc637b12015-06-24 16:57:30 -0700820 { 0, 0, MF_MSG_UNKNOWN, me_unknown },
Andi Kleen6a460792009-09-16 11:50:15 +0200821};
822
Andi Kleen2326c462009-12-16 12:20:00 +0100823#undef dirty
824#undef sc
825#undef unevict
826#undef mlock
827#undef writeback
828#undef lru
829#undef swapbacked
830#undef head
Andi Kleen2326c462009-12-16 12:20:00 +0100831#undef slab
832#undef reserved
833
Naoya Horiguchiff604cf2012-12-11 16:01:32 -0800834/*
835 * "Dirty/Clean" indication is not 100% accurate due to the possibility of
836 * setting PG_dirty outside page lock. See also comment above set_page_dirty().
837 */
Xie XiuQicc3e2af2015-06-24 16:57:33 -0700838static void action_result(unsigned long pfn, enum mf_action_page_type type,
839 enum mf_result result)
Andi Kleen6a460792009-09-16 11:50:15 +0200840{
Xie XiuQi97f0b132015-06-24 16:57:36 -0700841 trace_memory_failure_event(pfn, type, result);
842
Chen Yucong495367c02016-05-20 16:57:32 -0700843 pr_err("Memory failure: %#lx: recovery action for %s: %s\n",
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700844 pfn, action_page_types[type], action_name[result]);
Andi Kleen6a460792009-09-16 11:50:15 +0200845}
846
847static int page_action(struct page_state *ps, struct page *p,
Wu Fengguangbd1ce5f2009-12-16 12:19:57 +0100848 unsigned long pfn)
Andi Kleen6a460792009-09-16 11:50:15 +0200849{
850 int result;
Wu Fengguang7456b042009-10-19 08:15:01 +0200851 int count;
Andi Kleen6a460792009-09-16 11:50:15 +0200852
853 result = ps->action(p, pfn);
Wu Fengguang7456b042009-10-19 08:15:01 +0200854
Wu Fengguangbd1ce5f2009-12-16 12:19:57 +0100855 count = page_count(p) - 1;
Xie XiuQicc637b12015-06-24 16:57:30 -0700856 if (ps->action == me_swapcache_dirty && result == MF_DELAYED)
Wu Fengguang138ce282009-12-16 12:19:58 +0100857 count--;
858 if (count != 0) {
Chen Yucong495367c02016-05-20 16:57:32 -0700859 pr_err("Memory failure: %#lx: %s still referenced by %d users\n",
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700860 pfn, action_page_types[ps->type], count);
Xie XiuQicc637b12015-06-24 16:57:30 -0700861 result = MF_FAILED;
Wu Fengguang138ce282009-12-16 12:19:58 +0100862 }
Naoya Horiguchi64d37a22015-04-15 16:13:05 -0700863 action_result(pfn, ps->type, result);
Andi Kleen6a460792009-09-16 11:50:15 +0200864
865 /* Could do more checks here if page looks ok */
866 /*
867 * Could adjust zone counters here to correct for the missing page.
868 */
869
Xie XiuQicc637b12015-06-24 16:57:30 -0700870 return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
Andi Kleen6a460792009-09-16 11:50:15 +0200871}
872
Naoya Horiguchiead07f62015-06-24 16:56:48 -0700873/**
874 * get_hwpoison_page() - Get refcount for memory error handling:
875 * @page: raw error page (hit by memory error)
876 *
877 * Return: return 0 if failed to grab the refcount, otherwise true (some
878 * non-zero value.)
879 */
880int get_hwpoison_page(struct page *page)
881{
882 struct page *head = compound_head(page);
883
Naoya Horiguchi4e41a302016-01-15 16:54:07 -0800884 if (!PageHuge(head) && PageTransHuge(head)) {
Naoya Horiguchi98ed2b02015-08-06 15:47:04 -0700885 /*
886 * Non anonymous thp exists only in allocation/free time. We
887 * can't handle such a case correctly, so let's give it up.
888 * This should be better than triggering BUG_ON when kernel
889 * tries to touch the "partially handled" page.
890 */
891 if (!PageAnon(head)) {
Chen Yucong495367c02016-05-20 16:57:32 -0700892 pr_err("Memory failure: %#lx: non anonymous thp\n",
Naoya Horiguchi98ed2b02015-08-06 15:47:04 -0700893 page_to_pfn(page));
894 return 0;
895 }
Naoya Horiguchiead07f62015-06-24 16:56:48 -0700896 }
897
Konstantin Khlebnikovc2e7e002016-04-28 16:19:03 -0700898 if (get_page_unless_zero(head)) {
899 if (head == compound_head(page))
900 return 1;
901
Chen Yucong495367c02016-05-20 16:57:32 -0700902 pr_info("Memory failure: %#lx cannot catch tail\n",
903 page_to_pfn(page));
Konstantin Khlebnikovc2e7e002016-04-28 16:19:03 -0700904 put_page(head);
905 }
906
907 return 0;
Naoya Horiguchiead07f62015-06-24 16:56:48 -0700908}
909EXPORT_SYMBOL_GPL(get_hwpoison_page);
910
Andi Kleen6a460792009-09-16 11:50:15 +0200911/*
912 * Do all that is necessary to remove user space mappings. Unmap
913 * the pages and send SIGBUS to the processes if the data was dirty.
914 */
Wu Fengguang1668bfd2009-12-16 12:19:58 +0100915static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
Naoya Horiguchi54b9dd12014-01-23 15:53:14 -0800916 int trapno, int flags, struct page **hpagep)
Andi Kleen6a460792009-09-16 11:50:15 +0200917{
918 enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
919 struct address_space *mapping;
920 LIST_HEAD(tokill);
921 int ret;
Tony Luck6751ed62012-07-11 10:20:47 -0700922 int kill = 1, forcekill;
Naoya Horiguchi54b9dd12014-01-23 15:53:14 -0800923 struct page *hpage = *hpagep;
Andi Kleen6a460792009-09-16 11:50:15 +0200924
Naoya Horiguchi93a9eb32014-07-30 16:08:28 -0700925 /*
926 * Here we are interested only in user-mapped pages, so skip any
927 * other types of pages.
928 */
929 if (PageReserved(p) || PageSlab(p))
930 return SWAP_SUCCESS;
931 if (!(PageLRU(hpage) || PageHuge(p)))
Wu Fengguang1668bfd2009-12-16 12:19:58 +0100932 return SWAP_SUCCESS;
Andi Kleen6a460792009-09-16 11:50:15 +0200933
Andi Kleen6a460792009-09-16 11:50:15 +0200934 /*
935 * This check implies we don't kill processes if their pages
936 * are in the swap cache early. Those are always late kills.
937 */
Naoya Horiguchi7af446a2010-05-28 09:29:17 +0900938 if (!page_mapped(hpage))
Wu Fengguang1668bfd2009-12-16 12:19:58 +0100939 return SWAP_SUCCESS;
940
Naoya Horiguchi52089b12014-07-30 16:08:30 -0700941 if (PageKsm(p)) {
Chen Yucong495367c02016-05-20 16:57:32 -0700942 pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
Wu Fengguang1668bfd2009-12-16 12:19:58 +0100943 return SWAP_FAIL;
Naoya Horiguchi52089b12014-07-30 16:08:30 -0700944 }
Andi Kleen6a460792009-09-16 11:50:15 +0200945
946 if (PageSwapCache(p)) {
Chen Yucong495367c02016-05-20 16:57:32 -0700947 pr_err("Memory failure: %#lx: keeping poisoned page in swap cache\n",
948 pfn);
Andi Kleen6a460792009-09-16 11:50:15 +0200949 ttu |= TTU_IGNORE_HWPOISON;
950 }
951
952 /*
953 * Propagate the dirty bit from PTEs to struct page first, because we
954 * need this to decide if we should kill or just drop the page.
Wu Fengguangdb0480b2009-12-16 12:19:58 +0100955 * XXX: the dirty test could be racy: set_page_dirty() may not always
956 * be called inside page lock (it's recommended but not enforced).
Andi Kleen6a460792009-09-16 11:50:15 +0200957 */
Naoya Horiguchi7af446a2010-05-28 09:29:17 +0900958 mapping = page_mapping(hpage);
Tony Luck6751ed62012-07-11 10:20:47 -0700959 if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
Naoya Horiguchi7af446a2010-05-28 09:29:17 +0900960 mapping_cap_writeback_dirty(mapping)) {
961 if (page_mkclean(hpage)) {
962 SetPageDirty(hpage);
Andi Kleen6a460792009-09-16 11:50:15 +0200963 } else {
964 kill = 0;
965 ttu |= TTU_IGNORE_HWPOISON;
Chen Yucong495367c02016-05-20 16:57:32 -0700966 pr_info("Memory failure: %#lx: corrupted page was clean: dropped without side effects\n",
Andi Kleen6a460792009-09-16 11:50:15 +0200967 pfn);
968 }
969 }
970
Jin Dongminga6d30dd2011-02-01 15:52:40 -0800971 /*
Andi Kleen6a460792009-09-16 11:50:15 +0200972 * First collect all the processes that have the page
973 * mapped in dirty form. This has to be done before try_to_unmap,
974 * because ttu takes the rmap data structures down.
975 *
976 * Error handling: We ignore errors here because
977 * there's nothing that can be done.
978 */
979 if (kill)
Naoya Horiguchi415c64c2015-06-24 16:56:45 -0700980 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
Andi Kleen6a460792009-09-16 11:50:15 +0200981
Minchan Kimcd256bf2013-05-09 16:21:27 +0900982 ret = try_to_unmap(hpage, ttu, NULL);
Andi Kleen6a460792009-09-16 11:50:15 +0200983 if (ret != SWAP_SUCCESS)
Chen Yucong495367c02016-05-20 16:57:32 -0700984 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
Joe Perches11705322016-03-17 14:19:50 -0700985 pfn, page_mapcount(hpage));
Jin Dongminga6d30dd2011-02-01 15:52:40 -0800986
Andi Kleen6a460792009-09-16 11:50:15 +0200987 /*
988 * Now that the dirty bit has been propagated to the
989 * struct page and all unmaps done we can decide if
990 * killing is needed or not. Only kill when the page
Tony Luck6751ed62012-07-11 10:20:47 -0700991 * was dirty or the process is not restartable,
992 * otherwise the tokill list is merely
Andi Kleen6a460792009-09-16 11:50:15 +0200993 * freed. When there was a problem unmapping earlier
994 * use a more force-full uncatchable kill to prevent
995 * any accesses to the poisoned memory.
996 */
Naoya Horiguchi415c64c2015-06-24 16:56:45 -0700997 forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
Tony Luck6751ed62012-07-11 10:20:47 -0700998 kill_procs(&tokill, forcekill, trapno,
Tony Luck7329bbe2011-12-13 09:27:58 -0800999 ret != SWAP_SUCCESS, p, pfn, flags);
Wu Fengguang1668bfd2009-12-16 12:19:58 +01001000
1001 return ret;
Andi Kleen6a460792009-09-16 11:50:15 +02001002}
1003
Naoya Horiguchi7013feb2010-05-28 09:29:18 +09001004static void set_page_hwpoison_huge_page(struct page *hpage)
1005{
1006 int i;
Wanpeng Lif9121152013-09-11 14:22:52 -07001007 int nr_pages = 1 << compound_order(hpage);
Naoya Horiguchi7013feb2010-05-28 09:29:18 +09001008 for (i = 0; i < nr_pages; i++)
1009 SetPageHWPoison(hpage + i);
1010}
1011
1012static void clear_page_hwpoison_huge_page(struct page *hpage)
1013{
1014 int i;
Wanpeng Lif9121152013-09-11 14:22:52 -07001015 int nr_pages = 1 << compound_order(hpage);
Naoya Horiguchi7013feb2010-05-28 09:29:18 +09001016 for (i = 0; i < nr_pages; i++)
1017 ClearPageHWPoison(hpage + i);
1018}
1019
Tony Luckcd42f4a2011-12-15 10:48:12 -08001020/**
1021 * memory_failure - Handle memory failure of a page.
1022 * @pfn: Page Number of the corrupted page
1023 * @trapno: Trap number reported in the signal to user space.
1024 * @flags: fine tune action taken
1025 *
1026 * This function is called by the low level machine check code
1027 * of an architecture when it detects hardware memory corruption
1028 * of a page. It tries its best to recover, which includes
1029 * dropping pages, killing processes etc.
1030 *
1031 * The function is primarily of use for corruptions that
1032 * happen outside the current execution context (e.g. when
1033 * detected by a background scrubber)
1034 *
1035 * Must run in process context (e.g. a work queue) with interrupts
1036 * enabled and no spinlocks hold.
1037 */
1038int memory_failure(unsigned long pfn, int trapno, int flags)
Andi Kleen6a460792009-09-16 11:50:15 +02001039{
1040 struct page_state *ps;
1041 struct page *p;
Naoya Horiguchi7af446a2010-05-28 09:29:17 +09001042 struct page *hpage;
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001043 struct page *orig_head;
Andi Kleen6a460792009-09-16 11:50:15 +02001044 int res;
Naoya Horiguchic9fbdd52010-05-28 09:29:19 +09001045 unsigned int nr_pages;
Naoya Horiguchi524fca12013-02-22 16:35:51 -08001046 unsigned long page_flags;
Andi Kleen6a460792009-09-16 11:50:15 +02001047
1048 if (!sysctl_memory_failure_recovery)
1049 panic("Memory failure from trap %d on page %lx", trapno, pfn);
1050
1051 if (!pfn_valid(pfn)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001052 pr_err("Memory failure: %#lx: memory outside kernel control\n",
1053 pfn);
Wu Fengguanga7560fc2009-12-16 12:19:57 +01001054 return -ENXIO;
Andi Kleen6a460792009-09-16 11:50:15 +02001055 }
1056
1057 p = pfn_to_page(pfn);
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001058 orig_head = hpage = compound_head(p);
Andi Kleen6a460792009-09-16 11:50:15 +02001059 if (TestSetPageHWPoison(p)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001060 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1061 pfn);
Andi Kleen6a460792009-09-16 11:50:15 +02001062 return 0;
1063 }
1064
Naoya Horiguchi4db0e952013-02-22 16:34:05 -08001065 /*
1066 * Currently errors on hugetlbfs pages are measured in hugepage units,
1067 * so nr_pages should be 1 << compound_order. OTOH when errors are on
1068 * transparent hugepages, they are supposed to be split and error
1069 * measurement is done in normal page units. So nr_pages should be one
1070 * in this case.
1071 */
1072 if (PageHuge(p))
1073 nr_pages = 1 << compound_order(hpage);
1074 else /* normal page or thp */
1075 nr_pages = 1;
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001076 num_poisoned_pages_add(nr_pages);
Andi Kleen6a460792009-09-16 11:50:15 +02001077
1078 /*
1079 * We need/can do nothing about count=0 pages.
1080 * 1) it's a free page, and therefore in safe hand:
1081 * prep_new_page() will be the gate keeper.
Naoya Horiguchi8c6c2ec2010-09-08 10:19:38 +09001082 * 2) it's a free hugepage, which is also safe:
1083 * an affected hugepage will be dequeued from hugepage freelist,
1084 * so there's no concern about reusing it ever after.
1085 * 3) it's part of a non-compound high order page.
Andi Kleen6a460792009-09-16 11:50:15 +02001086 * Implies some kernel user: cannot stop them from
1087 * R/W the page; let's pray that the page has been
1088 * used and will be freed some time later.
1089 * In fact it's dangerous to directly bump up page count from 0,
1090 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch.
1091 */
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001092 if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) {
Wu Fengguang8d22ba12009-12-16 12:19:58 +01001093 if (is_free_buddy_page(p)) {
Xie XiuQicc637b12015-06-24 16:57:30 -07001094 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
Wu Fengguang8d22ba12009-12-16 12:19:58 +01001095 return 0;
Naoya Horiguchi8c6c2ec2010-09-08 10:19:38 +09001096 } else if (PageHuge(hpage)) {
1097 /*
Chen Yucongb9851942014-05-22 11:54:15 -07001098 * Check "filter hit" and "race with other subpage."
Naoya Horiguchi8c6c2ec2010-09-08 10:19:38 +09001099 */
Jens Axboe7eaceac2011-03-10 08:52:07 +01001100 lock_page(hpage);
Chen Yucongb9851942014-05-22 11:54:15 -07001101 if (PageHWPoison(hpage)) {
1102 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1103 || (p != hpage && TestSetPageHWPoison(hpage))) {
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001104 num_poisoned_pages_sub(nr_pages);
Chen Yucongb9851942014-05-22 11:54:15 -07001105 unlock_page(hpage);
1106 return 0;
1107 }
Naoya Horiguchi8c6c2ec2010-09-08 10:19:38 +09001108 }
1109 set_page_hwpoison_huge_page(hpage);
1110 res = dequeue_hwpoisoned_huge_page(hpage);
Xie XiuQicc637b12015-06-24 16:57:30 -07001111 action_result(pfn, MF_MSG_FREE_HUGE,
1112 res ? MF_IGNORED : MF_DELAYED);
Naoya Horiguchi8c6c2ec2010-09-08 10:19:38 +09001113 unlock_page(hpage);
1114 return res;
Wu Fengguang8d22ba12009-12-16 12:19:58 +01001115 } else {
Xie XiuQicc637b12015-06-24 16:57:30 -07001116 action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
Wu Fengguang8d22ba12009-12-16 12:19:58 +01001117 return -EBUSY;
1118 }
Andi Kleen6a460792009-09-16 11:50:15 +02001119 }
1120
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001121 if (!PageHuge(p) && PageTransHuge(hpage)) {
Naoya Horiguchic3901e72016-11-10 10:46:23 -08001122 lock_page(p);
1123 if (!PageAnon(p) || unlikely(split_huge_page(p))) {
1124 unlock_page(p);
1125 if (!PageAnon(p))
Chen Yucong495367c02016-05-20 16:57:32 -07001126 pr_err("Memory failure: %#lx: non anonymous thp\n",
1127 pfn);
Wanpeng Li7f6bf392015-08-14 15:35:08 -07001128 else
Chen Yucong495367c02016-05-20 16:57:32 -07001129 pr_err("Memory failure: %#lx: thp split failed\n",
1130 pfn);
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001131 if (TestClearPageHWPoison(p))
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001132 num_poisoned_pages_sub(nr_pages);
Wanpeng Li665d9da2015-09-08 15:03:21 -07001133 put_hwpoison_page(p);
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001134 return -EBUSY;
1135 }
Naoya Horiguchic3901e72016-11-10 10:46:23 -08001136 unlock_page(p);
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001137 VM_BUG_ON_PAGE(!page_count(p), p);
1138 hpage = compound_head(p);
1139 }
1140
Andi Kleen6a460792009-09-16 11:50:15 +02001141 /*
Wu Fengguange43c3af2009-09-29 13:16:20 +08001142 * We ignore non-LRU pages for good reasons.
1143 * - PG_locked is only well defined for LRU pages and a few others
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08001144 * - to avoid races with __SetPageLocked()
Wu Fengguange43c3af2009-09-29 13:16:20 +08001145 * - to avoid races with __SetPageSlab*() (and more non-atomic ops)
1146 * The check (unnecessarily) ignores LRU pages being isolated and
1147 * walked by the page reclaim code, however that's not a big loss.
1148 */
Naoya Horiguchi09789e52015-05-05 16:23:35 -07001149 if (!PageHuge(p)) {
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001150 if (!PageLRU(p))
1151 shake_page(p, 0);
1152 if (!PageLRU(p)) {
Jin Dongmingaf241a02011-02-01 15:52:41 -08001153 /*
1154 * shake_page could have turned it free.
1155 */
1156 if (is_free_buddy_page(p)) {
Wanpeng Li2d421ac2013-09-30 13:45:23 -07001157 if (flags & MF_COUNT_INCREASED)
Xie XiuQicc637b12015-06-24 16:57:30 -07001158 action_result(pfn, MF_MSG_BUDDY, MF_DELAYED);
Wanpeng Li2d421ac2013-09-30 13:45:23 -07001159 else
Xie XiuQicc637b12015-06-24 16:57:30 -07001160 action_result(pfn, MF_MSG_BUDDY_2ND,
1161 MF_DELAYED);
Jin Dongmingaf241a02011-02-01 15:52:41 -08001162 return 0;
1163 }
Andi Kleen0474a602009-12-16 12:20:00 +01001164 }
Wu Fengguange43c3af2009-09-29 13:16:20 +08001165 }
Wu Fengguange43c3af2009-09-29 13:16:20 +08001166
Jens Axboe7eaceac2011-03-10 08:52:07 +01001167 lock_page(hpage);
Wu Fengguang847ce402009-12-16 12:19:58 +01001168
1169 /*
Andi Kleenf37d4292014-08-06 16:06:49 -07001170 * The page could have changed compound pages during the locking.
1171 * If this happens just bail out.
1172 */
Naoya Horiguchi415c64c2015-06-24 16:56:45 -07001173 if (PageCompound(p) && compound_head(p) != orig_head) {
Xie XiuQicc637b12015-06-24 16:57:30 -07001174 action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
Andi Kleenf37d4292014-08-06 16:06:49 -07001175 res = -EBUSY;
1176 goto out;
1177 }
1178
1179 /*
Naoya Horiguchi524fca12013-02-22 16:35:51 -08001180 * We use page flags to determine what action should be taken, but
1181 * the flags can be modified by the error containment action. One
1182 * example is an mlocked page, where PG_mlocked is cleared by
1183 * page_remove_rmap() in try_to_unmap_one(). So to determine page status
1184 * correctly, we save a copy of the page flags at this time.
1185 */
James Morse1419b872017-06-16 14:02:29 -07001186 if (PageHuge(p))
1187 page_flags = hpage->flags;
1188 else
1189 page_flags = p->flags;
Naoya Horiguchi524fca12013-02-22 16:35:51 -08001190
1191 /*
Wu Fengguang847ce402009-12-16 12:19:58 +01001192 * unpoison always clear PG_hwpoison inside page lock
1193 */
1194 if (!PageHWPoison(p)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001195 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001196 num_poisoned_pages_sub(nr_pages);
Naoya Horiguchia09233f2015-08-06 15:46:58 -07001197 unlock_page(hpage);
Wanpeng Li665d9da2015-09-08 15:03:21 -07001198 put_hwpoison_page(hpage);
Naoya Horiguchia09233f2015-08-06 15:46:58 -07001199 return 0;
Wu Fengguang847ce402009-12-16 12:19:58 +01001200 }
Wu Fengguang7c116f22009-12-16 12:19:59 +01001201 if (hwpoison_filter(p)) {
1202 if (TestClearPageHWPoison(p))
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001203 num_poisoned_pages_sub(nr_pages);
Naoya Horiguchi7af446a2010-05-28 09:29:17 +09001204 unlock_page(hpage);
Wanpeng Li665d9da2015-09-08 15:03:21 -07001205 put_hwpoison_page(hpage);
Wu Fengguang7c116f22009-12-16 12:19:59 +01001206 return 0;
1207 }
Wu Fengguang847ce402009-12-16 12:19:58 +01001208
Chen Yucong0bc1f8b2014-07-02 15:22:37 -07001209 if (!PageHuge(p) && !PageTransTail(p) && !PageLRU(p))
1210 goto identify_page_state;
1211
Naoya Horiguchi7013feb2010-05-28 09:29:18 +09001212 /*
1213 * For error on the tail page, we should set PG_hwpoison
1214 * on the head page to show that the hugepage is hwpoisoned
1215 */
Jin Dongminga6d30dd2011-02-01 15:52:40 -08001216 if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
Xie XiuQicc637b12015-06-24 16:57:30 -07001217 action_result(pfn, MF_MSG_POISONED_HUGE, MF_IGNORED);
Naoya Horiguchi7013feb2010-05-28 09:29:18 +09001218 unlock_page(hpage);
Wanpeng Li665d9da2015-09-08 15:03:21 -07001219 put_hwpoison_page(hpage);
Naoya Horiguchi7013feb2010-05-28 09:29:18 +09001220 return 0;
1221 }
1222 /*
1223 * Set PG_hwpoison on all pages in an error hugepage,
1224 * because containment is done in hugepage unit for now.
1225 * Since we have done TestSetPageHWPoison() for the head page with
1226 * page lock held, we can safely set PG_hwpoison bits on tail pages.
1227 */
1228 if (PageHuge(p))
1229 set_page_hwpoison_huge_page(hpage);
1230
Naoya Horiguchi6edd6cc2014-06-04 16:10:35 -07001231 /*
1232 * It's very difficult to mess with pages currently under IO
1233 * and in many cases impossible, so we just avoid it here.
1234 */
Andi Kleen6a460792009-09-16 11:50:15 +02001235 wait_on_page_writeback(p);
1236
1237 /*
1238 * Now take care of user space mappings.
Minchan Kime64a7822011-03-22 16:32:44 -07001239 * Abort on fail: __delete_from_page_cache() assumes unmapped page.
Naoya Horiguchi54b9dd12014-01-23 15:53:14 -08001240 *
1241 * When the raw error page is thp tail page, hpage points to the raw
1242 * page after thp split.
Andi Kleen6a460792009-09-16 11:50:15 +02001243 */
Naoya Horiguchi54b9dd12014-01-23 15:53:14 -08001244 if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
1245 != SWAP_SUCCESS) {
Xie XiuQicc637b12015-06-24 16:57:30 -07001246 action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
Wu Fengguang1668bfd2009-12-16 12:19:58 +01001247 res = -EBUSY;
1248 goto out;
1249 }
Andi Kleen6a460792009-09-16 11:50:15 +02001250
1251 /*
1252 * Torn down by someone else?
1253 */
Wu Fengguangdc2a1cb2009-12-16 12:19:58 +01001254 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
Xie XiuQicc637b12015-06-24 16:57:30 -07001255 action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
Wu Fengguangd95ea512009-12-16 12:19:58 +01001256 res = -EBUSY;
Andi Kleen6a460792009-09-16 11:50:15 +02001257 goto out;
1258 }
1259
Chen Yucong0bc1f8b2014-07-02 15:22:37 -07001260identify_page_state:
Andi Kleen6a460792009-09-16 11:50:15 +02001261 res = -EBUSY;
Naoya Horiguchi524fca12013-02-22 16:35:51 -08001262 /*
1263 * The first check uses the current page flags which may not have any
1264 * relevant information. The second check with the saved page flagss is
1265 * carried out only if the first check can't determine the page status.
1266 */
1267 for (ps = error_states;; ps++)
1268 if ((p->flags & ps->mask) == ps->res)
Andi Kleen6a460792009-09-16 11:50:15 +02001269 break;
Wanpeng Li841fcc52013-09-11 14:22:50 -07001270
1271 page_flags |= (p->flags & (1UL << PG_dirty));
1272
Naoya Horiguchi524fca12013-02-22 16:35:51 -08001273 if (!ps->mask)
1274 for (ps = error_states;; ps++)
1275 if ((page_flags & ps->mask) == ps->res)
1276 break;
1277 res = page_action(ps, p, pfn);
Andi Kleen6a460792009-09-16 11:50:15 +02001278out:
Naoya Horiguchi7af446a2010-05-28 09:29:17 +09001279 unlock_page(hpage);
Andi Kleen6a460792009-09-16 11:50:15 +02001280 return res;
1281}
Tony Luckcd42f4a2011-12-15 10:48:12 -08001282EXPORT_SYMBOL_GPL(memory_failure);
Wu Fengguang847ce402009-12-16 12:19:58 +01001283
Huang Yingea8f5fb2011-07-13 13:14:27 +08001284#define MEMORY_FAILURE_FIFO_ORDER 4
1285#define MEMORY_FAILURE_FIFO_SIZE (1 << MEMORY_FAILURE_FIFO_ORDER)
1286
1287struct memory_failure_entry {
1288 unsigned long pfn;
1289 int trapno;
1290 int flags;
1291};
1292
1293struct memory_failure_cpu {
1294 DECLARE_KFIFO(fifo, struct memory_failure_entry,
1295 MEMORY_FAILURE_FIFO_SIZE);
1296 spinlock_t lock;
1297 struct work_struct work;
1298};
1299
1300static DEFINE_PER_CPU(struct memory_failure_cpu, memory_failure_cpu);
1301
1302/**
1303 * memory_failure_queue - Schedule handling memory failure of a page.
1304 * @pfn: Page Number of the corrupted page
1305 * @trapno: Trap number reported in the signal to user space.
1306 * @flags: Flags for memory failure handling
1307 *
1308 * This function is called by the low level hardware error handler
1309 * when it detects hardware memory corruption of a page. It schedules
1310 * the recovering of error page, including dropping pages, killing
1311 * processes etc.
1312 *
1313 * The function is primarily of use for corruptions that
1314 * happen outside the current execution context (e.g. when
1315 * detected by a background scrubber)
1316 *
1317 * Can run in IRQ context.
1318 */
1319void memory_failure_queue(unsigned long pfn, int trapno, int flags)
1320{
1321 struct memory_failure_cpu *mf_cpu;
1322 unsigned long proc_flags;
1323 struct memory_failure_entry entry = {
1324 .pfn = pfn,
1325 .trapno = trapno,
1326 .flags = flags,
1327 };
1328
1329 mf_cpu = &get_cpu_var(memory_failure_cpu);
1330 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
Stefani Seibold498d3192013-11-14 14:32:17 -08001331 if (kfifo_put(&mf_cpu->fifo, entry))
Huang Yingea8f5fb2011-07-13 13:14:27 +08001332 schedule_work_on(smp_processor_id(), &mf_cpu->work);
1333 else
Joe Perches8e33a522013-07-25 11:53:25 -07001334 pr_err("Memory failure: buffer overflow when queuing memory failure at %#lx\n",
Huang Yingea8f5fb2011-07-13 13:14:27 +08001335 pfn);
1336 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1337 put_cpu_var(memory_failure_cpu);
1338}
1339EXPORT_SYMBOL_GPL(memory_failure_queue);
1340
1341static void memory_failure_work_func(struct work_struct *work)
1342{
1343 struct memory_failure_cpu *mf_cpu;
1344 struct memory_failure_entry entry = { 0, };
1345 unsigned long proc_flags;
1346 int gotten;
1347
Christoph Lameter7c8e0182014-06-04 16:07:56 -07001348 mf_cpu = this_cpu_ptr(&memory_failure_cpu);
Huang Yingea8f5fb2011-07-13 13:14:27 +08001349 for (;;) {
1350 spin_lock_irqsave(&mf_cpu->lock, proc_flags);
1351 gotten = kfifo_get(&mf_cpu->fifo, &entry);
1352 spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
1353 if (!gotten)
1354 break;
Naveen N. Raocf870c72013-07-10 14:57:01 +05301355 if (entry.flags & MF_SOFT_OFFLINE)
1356 soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
1357 else
1358 memory_failure(entry.pfn, entry.trapno, entry.flags);
Huang Yingea8f5fb2011-07-13 13:14:27 +08001359 }
1360}
1361
1362static int __init memory_failure_init(void)
1363{
1364 struct memory_failure_cpu *mf_cpu;
1365 int cpu;
1366
1367 for_each_possible_cpu(cpu) {
1368 mf_cpu = &per_cpu(memory_failure_cpu, cpu);
1369 spin_lock_init(&mf_cpu->lock);
1370 INIT_KFIFO(mf_cpu->fifo);
1371 INIT_WORK(&mf_cpu->work, memory_failure_work_func);
1372 }
1373
1374 return 0;
1375}
1376core_initcall(memory_failure_init);
1377
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001378#define unpoison_pr_info(fmt, pfn, rs) \
1379({ \
1380 if (__ratelimit(rs)) \
1381 pr_info(fmt, pfn); \
1382})
1383
Wu Fengguang847ce402009-12-16 12:19:58 +01001384/**
1385 * unpoison_memory - Unpoison a previously poisoned page
1386 * @pfn: Page number of the to be unpoisoned page
1387 *
1388 * Software-unpoison a page that has been poisoned by
1389 * memory_failure() earlier.
1390 *
1391 * This is only done on the software-level, so it only works
1392 * for linux injected failures, not real hardware failures
1393 *
1394 * Returns 0 for success, otherwise -errno.
1395 */
1396int unpoison_memory(unsigned long pfn)
1397{
1398 struct page *page;
1399 struct page *p;
1400 int freeit = 0;
Naoya Horiguchic9fbdd52010-05-28 09:29:19 +09001401 unsigned int nr_pages;
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001402 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
1403 DEFAULT_RATELIMIT_BURST);
Wu Fengguang847ce402009-12-16 12:19:58 +01001404
1405 if (!pfn_valid(pfn))
1406 return -ENXIO;
1407
1408 p = pfn_to_page(pfn);
1409 page = compound_head(p);
1410
1411 if (!PageHWPoison(p)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001412 unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001413 pfn, &unpoison_rs);
Wu Fengguang847ce402009-12-16 12:19:58 +01001414 return 0;
1415 }
1416
Naoya Horiguchi230ac712015-09-08 15:03:29 -07001417 if (page_count(page) > 1) {
Chen Yucong495367c02016-05-20 16:57:32 -07001418 unpoison_pr_info("Unpoison: Someone grabs the hwpoison page %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001419 pfn, &unpoison_rs);
Naoya Horiguchi230ac712015-09-08 15:03:29 -07001420 return 0;
1421 }
1422
1423 if (page_mapped(page)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001424 unpoison_pr_info("Unpoison: Someone maps the hwpoison page %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001425 pfn, &unpoison_rs);
Naoya Horiguchi230ac712015-09-08 15:03:29 -07001426 return 0;
1427 }
1428
1429 if (page_mapping(page)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001430 unpoison_pr_info("Unpoison: the hwpoison page has non-NULL mapping %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001431 pfn, &unpoison_rs);
Naoya Horiguchi230ac712015-09-08 15:03:29 -07001432 return 0;
1433 }
1434
Wanpeng Li0cea3fd2013-09-11 14:22:53 -07001435 /*
1436 * unpoison_memory() can encounter thp only when the thp is being
1437 * worked by memory_failure() and the page lock is not held yet.
1438 * In such case, we yield to memory_failure() and make unpoison fail.
1439 */
Wanpeng Lie76d30e2013-09-30 13:45:22 -07001440 if (!PageHuge(page) && PageTransHuge(page)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001441 unpoison_pr_info("Unpoison: Memory failure is now running on %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001442 pfn, &unpoison_rs);
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001443 return 0;
Wanpeng Li0cea3fd2013-09-11 14:22:53 -07001444 }
1445
Wanpeng Lif9121152013-09-11 14:22:52 -07001446 nr_pages = 1 << compound_order(page);
Naoya Horiguchic9fbdd52010-05-28 09:29:19 +09001447
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001448 if (!get_hwpoison_page(p)) {
Naoya Horiguchi8c6c2ec2010-09-08 10:19:38 +09001449 /*
1450 * Since HWPoisoned hugepage should have non-zero refcount,
1451 * race between memory failure and unpoison seems to happen.
1452 * In such case unpoison fails and memory failure runs
1453 * to the end.
1454 */
1455 if (PageHuge(page)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001456 unpoison_pr_info("Unpoison: Memory failure is now running on free hugepage %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001457 pfn, &unpoison_rs);
Naoya Horiguchi8c6c2ec2010-09-08 10:19:38 +09001458 return 0;
1459 }
Wu Fengguang847ce402009-12-16 12:19:58 +01001460 if (TestClearPageHWPoison(p))
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001461 num_poisoned_pages_dec();
Chen Yucong495367c02016-05-20 16:57:32 -07001462 unpoison_pr_info("Unpoison: Software-unpoisoned free page %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001463 pfn, &unpoison_rs);
Wu Fengguang847ce402009-12-16 12:19:58 +01001464 return 0;
1465 }
1466
Jens Axboe7eaceac2011-03-10 08:52:07 +01001467 lock_page(page);
Wu Fengguang847ce402009-12-16 12:19:58 +01001468 /*
1469 * This test is racy because PG_hwpoison is set outside of page lock.
1470 * That's acceptable because that won't trigger kernel panic. Instead,
1471 * the PG_hwpoison page will be caught and isolated on the entrance to
1472 * the free buddy page pool.
1473 */
Naoya Horiguchic9fbdd52010-05-28 09:29:19 +09001474 if (TestClearPageHWPoison(page)) {
Chen Yucong495367c02016-05-20 16:57:32 -07001475 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
Naoya Horiguchia5f65102015-11-05 18:47:26 -08001476 pfn, &unpoison_rs);
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001477 num_poisoned_pages_sub(nr_pages);
Wu Fengguang847ce402009-12-16 12:19:58 +01001478 freeit = 1;
Naoya Horiguchi6a901812010-09-08 10:19:40 +09001479 if (PageHuge(page))
1480 clear_page_hwpoison_huge_page(page);
Wu Fengguang847ce402009-12-16 12:19:58 +01001481 }
1482 unlock_page(page);
1483
Wanpeng Li665d9da2015-09-08 15:03:21 -07001484 put_hwpoison_page(page);
Wanpeng Li3ba5eeb2013-09-11 14:23:01 -07001485 if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1))
Wanpeng Li665d9da2015-09-08 15:03:21 -07001486 put_hwpoison_page(page);
Wu Fengguang847ce402009-12-16 12:19:58 +01001487
1488 return 0;
1489}
1490EXPORT_SYMBOL(unpoison_memory);
Andi Kleenfacb6012009-12-16 12:20:00 +01001491
1492static struct page *new_page(struct page *p, unsigned long private, int **x)
1493{
Andi Kleen12686d12009-12-16 12:20:01 +01001494 int nid = page_to_nid(p);
Naoya Horiguchid950b952010-09-08 10:19:39 +09001495 if (PageHuge(p))
1496 return alloc_huge_page_node(page_hstate(compound_head(p)),
1497 nid);
1498 else
Vlastimil Babka96db8002015-09-08 15:03:50 -07001499 return __alloc_pages_node(nid, GFP_HIGHUSER_MOVABLE, 0);
Andi Kleenfacb6012009-12-16 12:20:00 +01001500}
1501
1502/*
1503 * Safely get reference count of an arbitrary page.
1504 * Returns 0 for a free page, -EIO for a zero refcount page
1505 * that is not free, and 1 for any other page type.
1506 * For 1 the page is returned with increased page count, otherwise not.
1507 */
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001508static int __get_any_page(struct page *p, unsigned long pfn, int flags)
Andi Kleenfacb6012009-12-16 12:20:00 +01001509{
1510 int ret;
1511
1512 if (flags & MF_COUNT_INCREASED)
1513 return 1;
1514
1515 /*
Naoya Horiguchid950b952010-09-08 10:19:39 +09001516 * When the target page is a free hugepage, just remove it
1517 * from free hugepage list.
1518 */
Naoya Horiguchiead07f62015-06-24 16:56:48 -07001519 if (!get_hwpoison_page(p)) {
Naoya Horiguchid950b952010-09-08 10:19:39 +09001520 if (PageHuge(p)) {
Borislav Petkov71dd0b82012-05-29 15:06:16 -07001521 pr_info("%s: %#lx free huge page\n", __func__, pfn);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001522 ret = 0;
Naoya Horiguchid950b952010-09-08 10:19:39 +09001523 } else if (is_free_buddy_page(p)) {
Borislav Petkov71dd0b82012-05-29 15:06:16 -07001524 pr_info("%s: %#lx free buddy page\n", __func__, pfn);
Andi Kleenfacb6012009-12-16 12:20:00 +01001525 ret = 0;
1526 } else {
Borislav Petkov71dd0b82012-05-29 15:06:16 -07001527 pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
1528 __func__, pfn, p->flags);
Andi Kleenfacb6012009-12-16 12:20:00 +01001529 ret = -EIO;
1530 }
1531 } else {
1532 /* Not a free page */
1533 ret = 1;
1534 }
Andi Kleenfacb6012009-12-16 12:20:00 +01001535 return ret;
1536}
1537
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001538static int get_any_page(struct page *page, unsigned long pfn, int flags)
1539{
1540 int ret = __get_any_page(page, pfn, flags);
1541
1542 if (ret == 1 && !PageHuge(page) && !PageLRU(page)) {
1543 /*
1544 * Try to free it.
1545 */
Wanpeng Li665d9da2015-09-08 15:03:21 -07001546 put_hwpoison_page(page);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001547 shake_page(page, 1);
1548
1549 /*
1550 * Did it turn free?
1551 */
1552 ret = __get_any_page(page, pfn, 0);
Naoya Horiguchid96b3392016-01-15 16:54:03 -08001553 if (ret == 1 && !PageLRU(page)) {
Wanpeng Li4f32be62015-08-14 15:34:56 -07001554 /* Drop page reference which is from __get_any_page() */
Wanpeng Li665d9da2015-09-08 15:03:21 -07001555 put_hwpoison_page(page);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001556 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
1557 pfn, page->flags);
1558 return -EIO;
1559 }
1560 }
1561 return ret;
1562}
1563
Naoya Horiguchid950b952010-09-08 10:19:39 +09001564static int soft_offline_huge_page(struct page *page, int flags)
1565{
1566 int ret;
1567 unsigned long pfn = page_to_pfn(page);
1568 struct page *hpage = compound_head(page);
Naoya Horiguchib8ec1ce2013-09-11 14:22:01 -07001569 LIST_HEAD(pagelist);
Naoya Horiguchid950b952010-09-08 10:19:39 +09001570
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001571 /*
1572 * This double-check of PageHWPoison is to avoid the race with
1573 * memory_failure(). See also comment in __soft_offline_page().
1574 */
1575 lock_page(hpage);
Xishi Qiu0ebff322013-02-22 16:33:59 -08001576 if (PageHWPoison(hpage)) {
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001577 unlock_page(hpage);
Wanpeng Li665d9da2015-09-08 15:03:21 -07001578 put_hwpoison_page(hpage);
Xishi Qiu0ebff322013-02-22 16:33:59 -08001579 pr_info("soft offline: %#lx hugepage already poisoned\n", pfn);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001580 return -EBUSY;
Xishi Qiu0ebff322013-02-22 16:33:59 -08001581 }
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001582 unlock_page(hpage);
Naoya Horiguchid950b952010-09-08 10:19:39 +09001583
Naoya Horiguchibcc54222015-04-15 16:14:38 -07001584 ret = isolate_huge_page(hpage, &pagelist);
Wanpeng Li03613802015-08-14 15:34:59 -07001585 /*
1586 * get_any_page() and isolate_huge_page() takes a refcount each,
1587 * so need to drop one here.
1588 */
Wanpeng Li665d9da2015-09-08 15:03:21 -07001589 put_hwpoison_page(hpage);
Wanpeng Li03613802015-08-14 15:34:59 -07001590 if (!ret) {
Naoya Horiguchibcc54222015-04-15 16:14:38 -07001591 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
1592 return -EBUSY;
1593 }
1594
David Rientjes68711a72014-06-04 16:08:25 -07001595 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
Naoya Horiguchib8ec1ce2013-09-11 14:22:01 -07001596 MIGRATE_SYNC, MR_MEMORY_FAILURE);
Naoya Horiguchid950b952010-09-08 10:19:39 +09001597 if (ret) {
Dean Nelsondd73e852011-10-31 17:09:04 -07001598 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
1599 pfn, ret, page->flags);
Punit Agrawald494cab2017-06-02 14:46:40 -07001600 if (!list_empty(&pagelist))
1601 putback_movable_pages(&pagelist);
Naoya Horiguchib8ec1ce2013-09-11 14:22:01 -07001602 if (ret > 0)
1603 ret = -EIO;
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001604 } else {
Jianguo Wua49ecbc2013-12-18 17:08:54 -08001605 /* overcommit hugetlb page will be freed to buddy */
1606 if (PageHuge(page)) {
1607 set_page_hwpoison_huge_page(hpage);
1608 dequeue_hwpoisoned_huge_page(hpage);
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001609 num_poisoned_pages_add(1 << compound_order(hpage));
Jianguo Wua49ecbc2013-12-18 17:08:54 -08001610 } else {
1611 SetPageHWPoison(page);
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001612 num_poisoned_pages_inc();
Jianguo Wua49ecbc2013-12-18 17:08:54 -08001613 }
Naoya Horiguchid950b952010-09-08 10:19:39 +09001614 }
Naoya Horiguchid950b952010-09-08 10:19:39 +09001615 return ret;
1616}
1617
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001618static int __soft_offline_page(struct page *page, int flags)
1619{
1620 int ret;
1621 unsigned long pfn = page_to_pfn(page);
Andi Kleenfacb6012009-12-16 12:20:00 +01001622
1623 /*
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001624 * Check PageHWPoison again inside page lock because PageHWPoison
1625 * is set by memory_failure() outside page lock. Note that
1626 * memory_failure() also double-checks PageHWPoison inside page lock,
1627 * so there's no race between soft_offline_page() and memory_failure().
Andi Kleenfacb6012009-12-16 12:20:00 +01001628 */
Xishi Qiu0ebff322013-02-22 16:33:59 -08001629 lock_page(page);
1630 wait_on_page_writeback(page);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001631 if (PageHWPoison(page)) {
1632 unlock_page(page);
Wanpeng Li665d9da2015-09-08 15:03:21 -07001633 put_hwpoison_page(page);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001634 pr_info("soft offline: %#lx page already poisoned\n", pfn);
1635 return -EBUSY;
1636 }
Andi Kleenfacb6012009-12-16 12:20:00 +01001637 /*
1638 * Try to invalidate first. This should work for
1639 * non dirty unmapped page cache pages.
1640 */
1641 ret = invalidate_inode_page(page);
1642 unlock_page(page);
Andi Kleenfacb6012009-12-16 12:20:00 +01001643 /*
Andi Kleenfacb6012009-12-16 12:20:00 +01001644 * RED-PEN would be better to keep it isolated here, but we
1645 * would need to fix isolation locking first.
1646 */
Andi Kleenfacb6012009-12-16 12:20:00 +01001647 if (ret == 1) {
Wanpeng Li665d9da2015-09-08 15:03:21 -07001648 put_hwpoison_page(page);
Andi Kleenfb46e732010-09-27 23:31:30 +02001649 pr_info("soft_offline: %#lx: invalidated\n", pfn);
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001650 SetPageHWPoison(page);
Naoya Horiguchi8e304562015-09-08 15:03:24 -07001651 num_poisoned_pages_inc();
Naoya Horiguchiaf8fae72013-02-22 16:34:03 -08001652 return 0;
Andi Kleenfacb6012009-12-16 12:20:00 +01001653 }
1654
1655 /*
1656 * Simple invalidation didn't work.
1657 * Try to migrate to a new page instead. migrate.c
1658 * handles a large number of cases for us.
1659 */
1660 ret = isolate_lru_page(page);
Konstantin Khlebnikovbd486282011-05-24 17:12:20 -07001661 /*
1662 * Drop page reference which is came from get_any_page()
1663 * successful isolate_lru_page() already took another one.
1664 */
Wanpeng Li665d9da2015-09-08 15:03:21 -07001665 put_hwpoison_page(page);
Andi Kleenfacb6012009-12-16 12:20:00 +01001666 if (!ret) {
1667 LIST_HEAD(pagelist);
Mel Gorman599d0c92016-07-28 15:45:31 -07001668 inc_node_page_state(page, NR_ISOLATED_ANON +
Hugh Dickins9c620e22013-02-22 16:35:14 -08001669 page_is_file_cache(page));
Andi Kleenfacb6012009-12-16 12:20:00 +01001670 list_add(&page->lru, &pagelist);
David Rientjes68711a72014-06-04 16:08:25 -07001671 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
Hugh Dickins9c620e22013-02-22 16:35:14 -08001672 MIGRATE_SYNC, MR_MEMORY_FAILURE);
Andi Kleenfacb6012009-12-16 12:20:00 +01001673 if (ret) {
Joonsoo Kim59c82b72014-01-21 15:51:17 -08001674 if (!list_empty(&pagelist)) {
1675 list_del(&page->lru);
Mel Gorman599d0c92016-07-28 15:45:31 -07001676 dec_node_page_state(page, NR_ISOLATED_ANON +
Joonsoo Kim59c82b72014-01-21 15:51:17 -08001677 page_is_file_cache(page));
1678 putback_lru_page(page);
1679 }
1680
Andi Kleenfb46e732010-09-27 23:31:30 +02001681 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
Andi Kleenfacb6012009-12-16 12:20:00 +01001682 pfn, ret, page->flags);
1683 if (ret > 0)
1684 ret = -EIO;
1685 }
1686 } else {
Andi Kleenfb46e732010-09-27 23:31:30 +02001687 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
Dean Nelsondd73e852011-10-31 17:09:04 -07001688 pfn, ret, page_count(page), page->flags);
Andi Kleenfacb6012009-12-16 12:20:00 +01001689 }
Andi Kleenfacb6012009-12-16 12:20:00 +01001690 return ret;
1691}
Wanpeng Li86e05772013-09-11 14:22:56 -07001692
Naoya Horiguchiacc14dc2016-01-15 16:57:43 -08001693static int soft_offline_in_use_page(struct page *page, int flags)
1694{
1695 int ret;
1696 struct page *hpage = compound_head(page);
1697
1698 if (!PageHuge(page) && PageTransHuge(hpage)) {
1699 lock_page(hpage);
Naoya Horiguchi98fd1ef2016-01-15 16:57:46 -08001700 if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
1701 unlock_page(hpage);
1702 if (!PageAnon(hpage))
1703 pr_info("soft offline: %#lx: non anonymous thp\n", page_to_pfn(page));
1704 else
1705 pr_info("soft offline: %#lx: thp split failed\n", page_to_pfn(page));
1706 put_hwpoison_page(hpage);
Naoya Horiguchiacc14dc2016-01-15 16:57:43 -08001707 return -EBUSY;
1708 }
Naoya Horiguchi98fd1ef2016-01-15 16:57:46 -08001709 unlock_page(hpage);
Naoya Horiguchiacc14dc2016-01-15 16:57:43 -08001710 get_hwpoison_page(page);
1711 put_hwpoison_page(hpage);
1712 }
1713
1714 if (PageHuge(page))
1715 ret = soft_offline_huge_page(page, flags);
1716 else
1717 ret = __soft_offline_page(page, flags);
1718
1719 return ret;
1720}
1721
1722static void soft_offline_free_page(struct page *page)
1723{
1724 if (PageHuge(page)) {
1725 struct page *hpage = compound_head(page);
1726
1727 set_page_hwpoison_huge_page(hpage);
1728 if (!dequeue_hwpoisoned_huge_page(hpage))
1729 num_poisoned_pages_add(1 << compound_order(hpage));
1730 } else {
1731 if (!TestSetPageHWPoison(page))
1732 num_poisoned_pages_inc();
1733 }
1734}
1735
Wanpeng Li86e05772013-09-11 14:22:56 -07001736/**
1737 * soft_offline_page - Soft offline a page.
1738 * @page: page to offline
1739 * @flags: flags. Same as memory_failure().
1740 *
1741 * Returns 0 on success, otherwise negated errno.
1742 *
1743 * Soft offline a page, by migration or invalidation,
1744 * without killing anything. This is for the case when
1745 * a page is not corrupted yet (so it's still valid to access),
1746 * but has had a number of corrected errors and is better taken
1747 * out.
1748 *
1749 * The actual policy on when to do that is maintained by
1750 * user space.
1751 *
1752 * This should never impact any application or cause data loss,
1753 * however it might take some time.
1754 *
1755 * This is not a 100% solution for all memory, but tries to be
1756 * ``good enough'' for the majority of memory.
1757 */
1758int soft_offline_page(struct page *page, int flags)
1759{
1760 int ret;
1761 unsigned long pfn = page_to_pfn(page);
Wanpeng Li86e05772013-09-11 14:22:56 -07001762
1763 if (PageHWPoison(page)) {
1764 pr_info("soft offline: %#lx page already poisoned\n", pfn);
Wanpeng Li1e0e6352015-09-08 15:03:13 -07001765 if (flags & MF_COUNT_INCREASED)
Wanpeng Li665d9da2015-09-08 15:03:21 -07001766 put_hwpoison_page(page);
Wanpeng Li86e05772013-09-11 14:22:56 -07001767 return -EBUSY;
1768 }
Wanpeng Li86e05772013-09-11 14:22:56 -07001769
Vladimir Davydovbfc8c902014-06-04 16:07:18 -07001770 get_online_mems();
Wanpeng Li86e05772013-09-11 14:22:56 -07001771 ret = get_any_page(page, pfn, flags);
Vladimir Davydovbfc8c902014-06-04 16:07:18 -07001772 put_online_mems();
Naoya Horiguchi4e41a302016-01-15 16:54:07 -08001773
Naoya Horiguchiacc14dc2016-01-15 16:57:43 -08001774 if (ret > 0)
1775 ret = soft_offline_in_use_page(page, flags);
1776 else if (ret == 0)
1777 soft_offline_free_page(page);
Naoya Horiguchi4e41a302016-01-15 16:54:07 -08001778
Wanpeng Li86e05772013-09-11 14:22:56 -07001779 return ret;
1780}