blob: c8c6ad0d58b89c3621d0fcf11f45e00442ebf2a2 [file] [log] [blame]
Robert Richter1ac2e6c2011-06-07 11:49:55 +02001/*
2 * User address space access functions.
3 *
4 * For licencing details see kernel-base/COPYING
5 */
6
Al Virobeba3a22017-03-25 19:33:21 -04007#include <linux/uaccess.h>
Paul Gortmakere6830142016-07-13 20:18:57 -04008#include <linux/export.h>
Robert Richter1ac2e6c2011-06-07 11:49:55 +02009
10/*
Peter Zijlstrae00b12e2013-10-24 12:52:06 +020011 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
12 * nested NMI paths are careful to preserve CR2.
Robert Richter1ac2e6c2011-06-07 11:49:55 +020013 */
14unsigned long
15copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
16{
Peter Zijlstrae00b12e2013-10-24 12:52:06 +020017 unsigned long ret;
Robert Richter1ac2e6c2011-06-07 11:49:55 +020018
Stephane Eranian25f42982012-06-11 15:44:26 +020019 if (__range_not_ok(from, n, TASK_SIZE))
Yann Droneaudebf2d262015-06-22 21:38:43 +020020 return n;
Arun Sharmadb0dc752012-04-20 15:41:36 -070021
Peter Zijlstrae00b12e2013-10-24 12:52:06 +020022 /*
23 * Even though this function is typically called from NMI/IRQ context
24 * disable pagefaults so that its behaviour is consistent even when
25 * called form other contexts.
26 */
27 pagefault_disable();
28 ret = __copy_from_user_inatomic(to, from, n);
29 pagefault_enable();
Robert Richter1ac2e6c2011-06-07 11:49:55 +020030
Peter Zijlstra0a196842013-10-30 21:16:22 +010031 return ret;
Robert Richter1ac2e6c2011-06-07 11:49:55 +020032}
33EXPORT_SYMBOL_GPL(copy_from_user_nmi);