blob: ddf9ecb53cc3e23171ea889dc47339e67ba5d2c8 [file] [log] [blame]
Robert Richter1ac2e6c2011-06-07 11:49:55 +02001/*
2 * User address space access functions.
3 *
4 * For licencing details see kernel-base/COPYING
5 */
6
7#include <linux/highmem.h>
8#include <linux/module.h>
9
Linus Torvalds92ae03f2012-04-06 14:32:32 -070010#include <asm/word-at-a-time.h>
Arun Sharmadb0dc752012-04-20 15:41:36 -070011#include <linux/sched.h>
Linus Torvalds92ae03f2012-04-06 14:32:32 -070012
Robert Richter1ac2e6c2011-06-07 11:49:55 +020013/*
Peter Zijlstrae00b12e2013-10-24 12:52:06 +020014 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
15 * nested NMI paths are careful to preserve CR2.
Robert Richter1ac2e6c2011-06-07 11:49:55 +020016 */
17unsigned long
18copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
19{
Peter Zijlstrae00b12e2013-10-24 12:52:06 +020020 unsigned long ret;
Robert Richter1ac2e6c2011-06-07 11:49:55 +020021
Stephane Eranian25f42982012-06-11 15:44:26 +020022 if (__range_not_ok(from, n, TASK_SIZE))
Peter Zijlstrae00b12e2013-10-24 12:52:06 +020023 return 0;
Arun Sharmadb0dc752012-04-20 15:41:36 -070024
Peter Zijlstrae00b12e2013-10-24 12:52:06 +020025 /*
26 * Even though this function is typically called from NMI/IRQ context
27 * disable pagefaults so that its behaviour is consistent even when
28 * called form other contexts.
29 */
30 pagefault_disable();
31 ret = __copy_from_user_inatomic(to, from, n);
32 pagefault_enable();
Robert Richter1ac2e6c2011-06-07 11:49:55 +020033
Peter Zijlstra0a196842013-10-30 21:16:22 +010034 return ret;
Robert Richter1ac2e6c2011-06-07 11:49:55 +020035}
36EXPORT_SYMBOL_GPL(copy_from_user_nmi);