blob: bf925cdcaca944f25367d0acb29a62f793469be0 [file] [log] [blame]
Anton Blancharda66086b2011-12-07 20:11:45 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) IBM Corporation, 2011
17 *
18 * Authors: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
19 * Anton Blanchard <anton@au.ibm.com>
20 */
21#include <linux/uaccess.h>
22#include <linux/hardirq.h>
David Howellsae3a1972012-03-28 18:30:02 +010023#include <asm/switch_to.h>
Daniel Axtens42f5b4c2016-05-18 11:16:50 +100024#include <asm/asm-prototypes.h>
Anton Blancharda66086b2011-12-07 20:11:45 +000025
Anton Blanchard6f7839e2012-05-29 19:31:24 +000026int enter_vmx_usercopy(void)
Anton Blancharda66086b2011-12-07 20:11:45 +000027{
28 if (in_interrupt())
29 return 0;
30
David Hildenbrand5f76eea2015-05-11 17:52:18 +020031 preempt_disable();
32 /*
33 * We need to disable page faults as they can call schedule and
34 * thus make us lose the VMX context. So on page faults, we just
35 * fail which will cause a fallback to the normal non-vmx copy.
Anton Blancharda66086b2011-12-07 20:11:45 +000036 */
37 pagefault_disable();
38
39 enable_kernel_altivec();
40
41 return 1;
42}
43
44/*
45 * This function must return 0 because we tail call optimise when calling
46 * from __copy_tofrom_user_power7 which returns 0 on success.
47 */
Anton Blanchard6f7839e2012-05-29 19:31:24 +000048int exit_vmx_usercopy(void)
Anton Blancharda66086b2011-12-07 20:11:45 +000049{
Anton Blancharddc4fbba2015-10-29 11:44:05 +110050 disable_kernel_altivec();
Anton Blancharda66086b2011-12-07 20:11:45 +000051 pagefault_enable();
David Hildenbrand5f76eea2015-05-11 17:52:18 +020052 preempt_enable();
Anton Blancharda66086b2011-12-07 20:11:45 +000053 return 0;
54}
Anton Blanchardfde69282012-05-29 19:33:12 +000055
56int enter_vmx_copy(void)
57{
58 if (in_interrupt())
59 return 0;
60
61 preempt_disable();
62
63 enable_kernel_altivec();
64
65 return 1;
66}
67
68/*
69 * All calls to this function will be optimised into tail calls. We are
70 * passed a pointer to the destination which we return as required by a
71 * memcpy implementation.
72 */
73void *exit_vmx_copy(void *dest)
74{
Anton Blancharddc4fbba2015-10-29 11:44:05 +110075 disable_kernel_altivec();
Anton Blanchardfde69282012-05-29 19:33:12 +000076 preempt_enable();
77 return dest;
78}