blob: 12ebfcc1d539151bd8365594befaa2e9814fb3a9 [file] [log] [blame]
David Howells9f97da72012-03-28 18:30:01 +01001#ifndef __ASM_ARM_SWITCH_TO_H
2#define __ASM_ARM_SWITCH_TO_H
3
4#include <linux/thread_info.h>
5
6/*
Will Deacon73a6fdc2013-05-13 11:39:50 +01007 * For v7 SMP cores running a preemptible kernel we may be pre-empted
8 * during a TLB maintenance operation, so execute an inner-shareable dsb
9 * to ensure that the maintenance completes in case we migrate to another
10 * CPU.
11 */
12#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
Will Deacon7baa7ae2015-07-29 12:41:49 +010013#define __complete_pending_tlbi() dsb(ish)
14#else
15#define __complete_pending_tlbi()
Will Deacon73a6fdc2013-05-13 11:39:50 +010016#endif
17
18/*
David Howells9f97da72012-03-28 18:30:01 +010019 * switch_to(prev, next) should switch from task `prev' to `next'
20 * `prev' will never be the same as `next'. schedule() itself
21 * contains the memory barrier to tell GCC not to cache `current'.
22 */
23extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
24
25#define switch_to(prev,next,last) \
26do { \
Will Deacon7baa7ae2015-07-29 12:41:49 +010027 __complete_pending_tlbi(); \
David Howells9f97da72012-03-28 18:30:01 +010028 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
29} while (0)
30
31#endif /* __ASM_ARM_SWITCH_TO_H */