David Howells | c140d87 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Low-level task switching. This is based on information published in |
| 3 | * the Processor Abstraction Layer and the System Abstraction Layer |
| 4 | * manual. |
| 5 | * |
| 6 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
| 7 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 8 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> |
| 9 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> |
| 10 | */ |
| 11 | #ifndef _ASM_IA64_SWITCH_TO_H |
| 12 | #define _ASM_IA64_SWITCH_TO_H |
| 13 | |
| 14 | #include <linux/percpu.h> |
| 15 | |
| 16 | struct task_struct; |
| 17 | |
| 18 | /* |
| 19 | * Context switch from one thread to another. If the two threads have |
| 20 | * different address spaces, schedule() has already taken care of |
| 21 | * switching to the new address space by calling switch_mm(). |
| 22 | * |
| 23 | * Disabling access to the fph partition and the debug-register |
| 24 | * context switch MUST be done before calling ia64_switch_to() since a |
| 25 | * newly created thread returns directly to |
| 26 | * ia64_ret_from_syscall_clear_r8. |
| 27 | */ |
| 28 | extern struct task_struct *ia64_switch_to (void *next_task); |
| 29 | |
| 30 | extern void ia64_save_extra (struct task_struct *task); |
| 31 | extern void ia64_load_extra (struct task_struct *task); |
| 32 | |
David Howells | c140d87 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 33 | #ifdef CONFIG_PERFMON |
| 34 | DECLARE_PER_CPU(unsigned long, pfm_syst_info); |
| 35 | # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) |
| 36 | #else |
| 37 | # define PERFMON_IS_SYSWIDE() (0) |
| 38 | #endif |
| 39 | |
| 40 | #define IA64_HAS_EXTRA_STATE(t) \ |
| 41 | ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ |
| 42 | || PERFMON_IS_SYSWIDE()) |
| 43 | |
| 44 | #define __switch_to(prev,next,last) do { \ |
David Howells | c140d87 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 45 | if (IA64_HAS_EXTRA_STATE(prev)) \ |
| 46 | ia64_save_extra(prev); \ |
| 47 | if (IA64_HAS_EXTRA_STATE(next)) \ |
| 48 | ia64_load_extra(next); \ |
| 49 | ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ |
| 50 | (last) = ia64_switch_to((next)); \ |
| 51 | } while (0) |
| 52 | |
| 53 | #ifdef CONFIG_SMP |
| 54 | /* |
| 55 | * In the SMP case, we save the fph state when context-switching away from a thread that |
| 56 | * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can |
| 57 | * pick up the state from task->thread.fph, avoiding the complication of having to fetch |
| 58 | * the latest fph state from another CPU. In other words: eager save, lazy restore. |
| 59 | */ |
| 60 | # define switch_to(prev,next,last) do { \ |
| 61 | if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ |
| 62 | ia64_psr(task_pt_regs(prev))->mfh = 0; \ |
| 63 | (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ |
| 64 | __ia64_save_fpu((prev)->thread.fph); \ |
| 65 | } \ |
| 66 | __switch_to(prev, next, last); \ |
| 67 | /* "next" in old context is "current" in new context */ \ |
| 68 | if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \ |
| 69 | (task_cpu(current) != \ |
| 70 | task_thread_info(current)->last_cpu))) { \ |
| 71 | platform_migrate(current); \ |
| 72 | task_thread_info(current)->last_cpu = task_cpu(current); \ |
| 73 | } \ |
| 74 | } while (0) |
| 75 | #else |
| 76 | # define switch_to(prev,next,last) __switch_to(prev, next, last) |
| 77 | #endif |
| 78 | |
| 79 | #endif /* _ASM_IA64_SWITCH_TO_H */ |