Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
David Howells | c140d87 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Low-level task switching. This is based on information published in |
| 4 | * the Processor Abstraction Layer and the System Abstraction Layer |
| 5 | * manual. |
| 6 | * |
| 7 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
| 8 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 9 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> |
| 10 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> |
| 11 | */ |
| 12 | #ifndef _ASM_IA64_SWITCH_TO_H |
| 13 | #define _ASM_IA64_SWITCH_TO_H |
| 14 | |
| 15 | #include <linux/percpu.h> |
| 16 | |
| 17 | struct task_struct; |
| 18 | |
| 19 | /* |
| 20 | * Context switch from one thread to another. If the two threads have |
| 21 | * different address spaces, schedule() has already taken care of |
| 22 | * switching to the new address space by calling switch_mm(). |
| 23 | * |
| 24 | * Disabling access to the fph partition and the debug-register |
| 25 | * context switch MUST be done before calling ia64_switch_to() since a |
| 26 | * newly created thread returns directly to |
| 27 | * ia64_ret_from_syscall_clear_r8. |
| 28 | */ |
| 29 | extern struct task_struct *ia64_switch_to (void *next_task); |
| 30 | |
| 31 | extern void ia64_save_extra (struct task_struct *task); |
| 32 | extern void ia64_load_extra (struct task_struct *task); |
| 33 | |
David Howells | c140d87 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 34 | #ifdef CONFIG_PERFMON |
| 35 | DECLARE_PER_CPU(unsigned long, pfm_syst_info); |
Christoph Lameter | 6065a24 | 2014-08-17 12:30:47 -0500 | [diff] [blame] | 36 | # define PERFMON_IS_SYSWIDE() (__this_cpu_read(pfm_syst_info) & 0x1) |
David Howells | c140d87 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 37 | #else |
| 38 | # define PERFMON_IS_SYSWIDE() (0) |
| 39 | #endif |
| 40 | |
| 41 | #define IA64_HAS_EXTRA_STATE(t) \ |
| 42 | ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ |
| 43 | || PERFMON_IS_SYSWIDE()) |
| 44 | |
| 45 | #define __switch_to(prev,next,last) do { \ |
David Howells | c140d87 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 46 | if (IA64_HAS_EXTRA_STATE(prev)) \ |
| 47 | ia64_save_extra(prev); \ |
| 48 | if (IA64_HAS_EXTRA_STATE(next)) \ |
| 49 | ia64_load_extra(next); \ |
| 50 | ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ |
| 51 | (last) = ia64_switch_to((next)); \ |
| 52 | } while (0) |
| 53 | |
| 54 | #ifdef CONFIG_SMP |
| 55 | /* |
| 56 | * In the SMP case, we save the fph state when context-switching away from a thread that |
| 57 | * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can |
| 58 | * pick up the state from task->thread.fph, avoiding the complication of having to fetch |
| 59 | * the latest fph state from another CPU. In other words: eager save, lazy restore. |
| 60 | */ |
| 61 | # define switch_to(prev,next,last) do { \ |
| 62 | if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ |
| 63 | ia64_psr(task_pt_regs(prev))->mfh = 0; \ |
| 64 | (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ |
| 65 | __ia64_save_fpu((prev)->thread.fph); \ |
| 66 | } \ |
| 67 | __switch_to(prev, next, last); \ |
| 68 | /* "next" in old context is "current" in new context */ \ |
| 69 | if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \ |
| 70 | (task_cpu(current) != \ |
| 71 | task_thread_info(current)->last_cpu))) { \ |
| 72 | platform_migrate(current); \ |
| 73 | task_thread_info(current)->last_cpu = task_cpu(current); \ |
| 74 | } \ |
| 75 | } while (0) |
| 76 | #else |
| 77 | # define switch_to(prev,next,last) __switch_to(prev, next, last) |
| 78 | #endif |
| 79 | |
| 80 | #endif /* _ASM_IA64_SWITCH_TO_H */ |