blob: 2c0c8c9e9516f8882d83c796a7ed9b8b4b727da0 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
H. Peter Anvin1965aae2008-10-22 22:26:29 -07002#ifndef _ASM_X86_PARAVIRT_H
3#define _ASM_X86_PARAVIRT_H
Rusty Russelld3561b72006-12-07 02:14:07 +01004/* Various instructions on x86 need to be replaced for
5 * para-virtualization: those hooks are defined here. */
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +02006
7#ifdef CONFIG_PARAVIRT
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -08008#include <asm/pgtable_types.h>
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +01009#include <asm/asm.h>
Rusty Russelld3561b72006-12-07 02:14:07 +010010
Jeremy Fitzhardingeac5672f2009-04-14 14:29:44 -070011#include <asm/paravirt_types.h>
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -080012
Rusty Russelld3561b72006-12-07 02:14:07 +010013#ifndef __ASSEMBLY__
Paul Gortmaker187f1882011-11-23 20:12:59 -050014#include <linux/bug.h>
Jeremy Fitzhardinge3dc494e2007-05-02 19:27:13 +020015#include <linux/types.h>
Jeremy Fitzhardinged4c10472007-05-02 19:27:15 +020016#include <linux/cpumask.h>
Josh Poimboeuf87b240c2016-01-21 16:49:13 -060017#include <asm/frame.h>
Jeremy Fitzhardinge1a45b7a2007-05-02 19:27:15 +020018
Andy Lutomirskida51da12017-11-02 00:59:10 -070019static inline void load_sp0(unsigned long sp0)
Rusty Russelld3561b72006-12-07 02:14:07 +010020{
Andy Lutomirskida51da12017-11-02 00:59:10 -070021 PVOP_VCALL1(pv_cpu_ops.load_sp0, sp0);
Rusty Russelld3561b72006-12-07 02:14:07 +010022}
23
Rusty Russelld3561b72006-12-07 02:14:07 +010024/* The paravirtualized CPUID instruction. */
25static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
26 unsigned int *ecx, unsigned int *edx)
27{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070028 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
Rusty Russelld3561b72006-12-07 02:14:07 +010029}
30
31/*
32 * These special macros can be used to get or set a debugging register
33 */
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020034static inline unsigned long paravirt_get_debugreg(int reg)
35{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070036 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020037}
38#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
39static inline void set_debugreg(unsigned long val, int reg)
40{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070041 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020042}
Rusty Russelld3561b72006-12-07 02:14:07 +010043
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020044static inline unsigned long read_cr0(void)
45{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070046 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020047}
Rusty Russelld3561b72006-12-07 02:14:07 +010048
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020049static inline void write_cr0(unsigned long x)
50{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070051 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020052}
Rusty Russelld3561b72006-12-07 02:14:07 +010053
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020054static inline unsigned long read_cr2(void)
55{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070056 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020057}
Rusty Russelld3561b72006-12-07 02:14:07 +010058
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020059static inline void write_cr2(unsigned long x)
60{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070061 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020062}
Rusty Russelld3561b72006-12-07 02:14:07 +010063
Andy Lutomirski6c690ee2017-06-12 10:26:14 -070064static inline unsigned long __read_cr3(void)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020065{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070066 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020067}
68
69static inline void write_cr3(unsigned long x)
70{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070071 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020072}
73
Andy Lutomirski1e02ce42014-10-24 15:58:08 -070074static inline void __write_cr4(unsigned long x)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020075{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070076 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020077}
Jeremy Fitzhardinge3dc494e2007-05-02 19:27:13 +020078
Glauber de Oliveira Costa94ea03c2008-01-30 13:33:19 +010079#ifdef CONFIG_X86_64
Glauber de Oliveira Costa4c9890c2008-01-30 13:33:19 +010080static inline unsigned long read_cr8(void)
81{
82 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
83}
84
85static inline void write_cr8(unsigned long x)
86{
87 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
88}
Glauber de Oliveira Costa94ea03c2008-01-30 13:33:19 +010089#endif
Glauber de Oliveira Costa4c9890c2008-01-30 13:33:19 +010090
David Howellsdf9ee292010-10-07 14:08:55 +010091static inline void arch_safe_halt(void)
Rusty Russelld3561b72006-12-07 02:14:07 +010092{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070093 PVOP_VCALL0(pv_irq_ops.safe_halt);
Rusty Russelld3561b72006-12-07 02:14:07 +010094}
95
96static inline void halt(void)
97{
Cliff Wickmanc8217b82010-12-13 10:51:57 -060098 PVOP_VCALL0(pv_irq_ops.halt);
Rusty Russelld3561b72006-12-07 02:14:07 +010099}
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200100
101static inline void wbinvd(void)
102{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700103 PVOP_VCALL0(pv_cpu_ops.wbinvd);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200104}
Rusty Russelld3561b72006-12-07 02:14:07 +0100105
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700106#define get_kernel_rpl() (pv_info.kernel_rpl)
Rusty Russelld3561b72006-12-07 02:14:07 +0100107
Andy Lutomirskidd2f4a02016-04-02 07:01:38 -0700108static inline u64 paravirt_read_msr(unsigned msr)
109{
110 return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr);
111}
112
113static inline void paravirt_write_msr(unsigned msr,
114 unsigned low, unsigned high)
115{
Anton Vasilyeve8ad8bc2017-06-23 19:23:13 +0300116 PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
Andy Lutomirskidd2f4a02016-04-02 07:01:38 -0700117}
118
Andy Lutomirskic2ee03b2016-04-02 07:01:36 -0700119static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200120{
Andy Lutomirskic2ee03b2016-04-02 07:01:36 -0700121 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200122}
Borislav Petkov132ec922009-08-31 09:50:09 +0200123
Andy Lutomirskic2ee03b2016-04-02 07:01:36 -0700124static inline int paravirt_write_msr_safe(unsigned msr,
125 unsigned low, unsigned high)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200126{
Andy Lutomirskic2ee03b2016-04-02 07:01:36 -0700127 return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200128}
129
Joe Perches49cd7402008-03-23 01:03:00 -0700130#define rdmsr(msr, val1, val2) \
131do { \
Andy Lutomirski4985ce12016-04-02 07:01:39 -0700132 u64 _l = paravirt_read_msr(msr); \
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200133 val1 = (u32)_l; \
134 val2 = _l >> 32; \
Joe Perches49cd7402008-03-23 01:03:00 -0700135} while (0)
Rusty Russelld3561b72006-12-07 02:14:07 +0100136
Joe Perches49cd7402008-03-23 01:03:00 -0700137#define wrmsr(msr, val1, val2) \
138do { \
Andy Lutomirski4985ce12016-04-02 07:01:39 -0700139 paravirt_write_msr(msr, val1, val2); \
Joe Perches49cd7402008-03-23 01:03:00 -0700140} while (0)
Rusty Russelld3561b72006-12-07 02:14:07 +0100141
Joe Perches49cd7402008-03-23 01:03:00 -0700142#define rdmsrl(msr, val) \
143do { \
Andy Lutomirski4985ce12016-04-02 07:01:39 -0700144 val = paravirt_read_msr(msr); \
Joe Perches49cd7402008-03-23 01:03:00 -0700145} while (0)
Rusty Russelld3561b72006-12-07 02:14:07 +0100146
Andy Lutomirski47edb652015-07-23 12:14:40 -0700147static inline void wrmsrl(unsigned msr, u64 val)
148{
149 wrmsr(msr, (u32)val, (u32)(val>>32));
150}
151
Andy Lutomirskic2ee03b2016-04-02 07:01:36 -0700152#define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b)
Rusty Russelld3561b72006-12-07 02:14:07 +0100153
154/* rdmsr with exception handling */
Andy Lutomirskic2ee03b2016-04-02 07:01:36 -0700155#define rdmsr_safe(msr, a, b) \
156({ \
157 int _err; \
158 u64 _l = paravirt_read_msr_safe(msr, &_err); \
159 (*a) = (u32)_l; \
160 (*b) = _l >> 32; \
161 _err; \
Joe Perches49cd7402008-03-23 01:03:00 -0700162})
Rusty Russelld3561b72006-12-07 02:14:07 +0100163
Andi Kleen1de87bd2008-03-22 10:59:28 +0100164static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
165{
166 int err;
167
Andy Lutomirskic2ee03b2016-04-02 07:01:36 -0700168 *p = paravirt_read_msr_safe(msr, &err);
Andi Kleen1de87bd2008-03-22 10:59:28 +0100169 return err;
170}
Borislav Petkov177fed12009-08-31 09:50:10 +0200171
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -0700172static inline unsigned long long paravirt_sched_clock(void)
173{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700174 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -0700175}
Zachary Amsden6cb9a832007-03-05 00:30:35 -0800176
Ingo Molnarc5905af2012-02-24 08:31:31 +0100177struct static_key;
178extern struct static_key paravirt_steal_enabled;
179extern struct static_key paravirt_steal_rq_enabled;
Glauber Costa3c404b52011-07-11 15:28:15 -0400180
181static inline u64 paravirt_steal_clock(int cpu)
182{
183 return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
184}
185
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200186static inline unsigned long long paravirt_read_pmc(int counter)
187{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700188 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200189}
190
Joe Perches49cd7402008-03-23 01:03:00 -0700191#define rdpmc(counter, low, high) \
192do { \
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200193 u64 _l = paravirt_read_pmc(counter); \
194 low = (u32)_l; \
195 high = _l >> 32; \
Joe Perches49cd7402008-03-23 01:03:00 -0700196} while (0)
Rusty Russelld3561b72006-12-07 02:14:07 +0100197
Andi Kleen1ff4d582012-06-05 17:56:50 -0700198#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
199
Jeremy Fitzhardinge38ffbe62008-07-23 14:21:18 -0700200static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
201{
202 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
203}
204
205static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
206{
207 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
208}
209
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200210static inline void load_TR_desc(void)
211{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700212 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200213}
Glauber de Oliveira Costa6b68f012008-01-30 13:31:12 +0100214static inline void load_gdt(const struct desc_ptr *dtr)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200215{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700216 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200217}
Glauber de Oliveira Costa6b68f012008-01-30 13:31:12 +0100218static inline void load_idt(const struct desc_ptr *dtr)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200219{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700220 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200221}
222static inline void set_ldt(const void *addr, unsigned entries)
223{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700224 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200225}
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200226static inline unsigned long paravirt_store_tr(void)
227{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700228 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200229}
230#define store_tr(tr) ((tr) = paravirt_store_tr())
231static inline void load_TLS(struct thread_struct *t, unsigned cpu)
232{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700233 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200234}
Glauber de Oliveira Costa75b8bb32008-01-30 13:31:13 +0100235
Jeremy Fitzhardinge9f9d4892008-06-25 00:19:32 -0400236#ifdef CONFIG_X86_64
237static inline void load_gs_index(unsigned int gs)
238{
239 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
240}
241#endif
242
Glauber de Oliveira Costa75b8bb32008-01-30 13:31:13 +0100243static inline void write_ldt_entry(struct desc_struct *dt, int entry,
244 const void *desc)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200245{
Glauber de Oliveira Costa75b8bb32008-01-30 13:31:13 +0100246 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200247}
Glauber de Oliveira Costa014b15b2008-01-30 13:31:13 +0100248
249static inline void write_gdt_entry(struct desc_struct *dt, int entry,
250 void *desc, int type)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200251{
Glauber de Oliveira Costa014b15b2008-01-30 13:31:13 +0100252 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200253}
Glauber de Oliveira Costa014b15b2008-01-30 13:31:13 +0100254
Glauber de Oliveira Costa8d947342008-01-30 13:31:12 +0100255static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200256{
Glauber de Oliveira Costa8d947342008-01-30 13:31:12 +0100257 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200258}
259static inline void set_iopl_mask(unsigned mask)
260{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700261 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200262}
Jeremy Fitzhardinge3dc494e2007-05-02 19:27:13 +0200263
Rusty Russelld3561b72006-12-07 02:14:07 +0100264/* The paravirtualized I/O functions */
Joe Perches49cd7402008-03-23 01:03:00 -0700265static inline void slow_down_io(void)
266{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700267 pv_cpu_ops.io_delay();
Rusty Russelld3561b72006-12-07 02:14:07 +0100268#ifdef REALLY_SLOW_IO
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700269 pv_cpu_ops.io_delay();
270 pv_cpu_ops.io_delay();
271 pv_cpu_ops.io_delay();
Rusty Russelld3561b72006-12-07 02:14:07 +0100272#endif
273}
274
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +0200275static inline void paravirt_activate_mm(struct mm_struct *prev,
276 struct mm_struct *next)
277{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700278 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +0200279}
280
Dave Hansena1ea1c02014-11-18 10:23:49 -0800281static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
282 struct mm_struct *mm)
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +0200283{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700284 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +0200285}
286
Dave Hansena1ea1c02014-11-18 10:23:49 -0800287static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +0200288{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700289 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +0200290}
291
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200292static inline void __flush_tlb(void)
293{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700294 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200295}
296static inline void __flush_tlb_global(void)
297{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700298 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200299}
300static inline void __flush_tlb_single(unsigned long addr)
301{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700302 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200303}
Rusty Russellda181a82006-12-07 02:14:08 +0100304
Rusty Russell4595f962009-01-10 21:58:09 -0800305static inline void flush_tlb_others(const struct cpumask *cpumask,
Andy Lutomirskia2055ab2017-05-28 10:00:10 -0700306 const struct flush_tlb_info *info)
Jeremy Fitzhardinged4c10472007-05-02 19:27:15 +0200307{
Andy Lutomirskia2055ab2017-05-28 10:00:10 -0700308 PVOP_VCALL2(pv_mmu_ops.flush_tlb_others, cpumask, info);
Jeremy Fitzhardinged4c10472007-05-02 19:27:15 +0200309}
310
Jeremy Fitzhardingeeba00452008-06-25 00:19:12 -0400311static inline int paravirt_pgd_alloc(struct mm_struct *mm)
312{
313 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
314}
315
316static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
317{
318 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
319}
320
Eduardo Habkostf8639932008-07-30 18:32:27 -0300321static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200322{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700323 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200324}
Eduardo Habkostf8639932008-07-30 18:32:27 -0300325static inline void paravirt_release_pte(unsigned long pfn)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200326{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700327 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200328}
Zachary Amsdenc119ecc2007-02-13 13:26:21 +0100329
Eduardo Habkostf8639932008-07-30 18:32:27 -0300330static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200331{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700332 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200333}
334
Eduardo Habkostf8639932008-07-30 18:32:27 -0300335static inline void paravirt_release_pmd(unsigned long pfn)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200336{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700337 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200338}
339
Eduardo Habkostf8639932008-07-30 18:32:27 -0300340static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge2761fa02008-03-17 16:37:02 -0700341{
342 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
343}
Eduardo Habkostf8639932008-07-30 18:32:27 -0300344static inline void paravirt_release_pud(unsigned long pfn)
Jeremy Fitzhardinge2761fa02008-03-17 16:37:02 -0700345{
346 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
347}
348
Kirill A. Shutemov335437f2017-03-30 11:07:28 +0300349static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
350{
351 PVOP_VCALL2(pv_mmu_ops.alloc_p4d, mm, pfn);
352}
353
354static inline void paravirt_release_p4d(unsigned long pfn)
355{
356 PVOP_VCALL1(pv_mmu_ops.release_p4d, pfn);
357}
358
Jeremy Fitzhardinge773221f2008-01-30 13:33:15 +0100359static inline pte_t __pte(pteval_t val)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200360{
Jeremy Fitzhardinge773221f2008-01-30 13:33:15 +0100361 pteval_t ret;
362
363 if (sizeof(pteval_t) > sizeof(long))
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800364 ret = PVOP_CALLEE2(pteval_t,
365 pv_mmu_ops.make_pte,
366 val, (u64)val >> 32);
Jeremy Fitzhardinge773221f2008-01-30 13:33:15 +0100367 else
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800368 ret = PVOP_CALLEE1(pteval_t,
369 pv_mmu_ops.make_pte,
370 val);
Jeremy Fitzhardinge773221f2008-01-30 13:33:15 +0100371
Jeremy Fitzhardingec8e53932008-01-30 13:32:57 +0100372 return (pte_t) { .pte = ret };
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200373}
374
Jeremy Fitzhardinge773221f2008-01-30 13:33:15 +0100375static inline pteval_t pte_val(pte_t pte)
376{
377 pteval_t ret;
378
379 if (sizeof(pteval_t) > sizeof(long))
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800380 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
381 pte.pte, (u64)pte.pte >> 32);
Jeremy Fitzhardinge773221f2008-01-30 13:33:15 +0100382 else
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800383 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
384 pte.pte);
Jeremy Fitzhardinge773221f2008-01-30 13:33:15 +0100385
386 return ret;
387}
388
Jeremy Fitzhardingeef385032008-01-30 13:33:15 +0100389static inline pgd_t __pgd(pgdval_t val)
390{
391 pgdval_t ret;
392
393 if (sizeof(pgdval_t) > sizeof(long))
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800394 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
395 val, (u64)val >> 32);
Jeremy Fitzhardingeef385032008-01-30 13:33:15 +0100396 else
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800397 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
398 val);
Jeremy Fitzhardingeef385032008-01-30 13:33:15 +0100399
400 return (pgd_t) { ret };
401}
402
403static inline pgdval_t pgd_val(pgd_t pgd)
404{
405 pgdval_t ret;
406
407 if (sizeof(pgdval_t) > sizeof(long))
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800408 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
409 pgd.pgd, (u64)pgd.pgd >> 32);
Jeremy Fitzhardingeef385032008-01-30 13:33:15 +0100410 else
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800411 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
412 pgd.pgd);
Jeremy Fitzhardingeef385032008-01-30 13:33:15 +0100413
414 return ret;
415}
416
Jeremy Fitzhardinge08b882c2008-06-16 04:30:01 -0700417#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
418static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
419 pte_t *ptep)
420{
421 pteval_t ret;
422
423 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
424 mm, addr, ptep);
425
426 return (pte_t) { .pte = ret };
427}
428
429static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
430 pte_t *ptep, pte_t pte)
431{
432 if (sizeof(pteval_t) > sizeof(long))
433 /* 5 arg words */
434 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
435 else
436 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
437 mm, addr, ptep, pte.pte);
438}
439
Jeremy Fitzhardinge4eed80c2008-01-30 13:33:15 +0100440static inline void set_pte(pte_t *ptep, pte_t pte)
441{
442 if (sizeof(pteval_t) > sizeof(long))
443 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
444 pte.pte, (u64)pte.pte >> 32);
445 else
446 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
447 pte.pte);
448}
449
450static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
451 pte_t *ptep, pte_t pte)
452{
453 if (sizeof(pteval_t) > sizeof(long))
454 /* 5 arg words */
455 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
456 else
457 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
458}
459
Jeremy Fitzhardinge60b3f622008-01-30 13:33:15 +0100460static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
461{
462 pmdval_t val = native_pmd_val(pmd);
463
464 if (sizeof(pmdval_t) > sizeof(long))
465 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
466 else
467 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
468}
469
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700470#if CONFIG_PGTABLE_LEVELS >= 3
Glauber de Oliveira Costa1fe91512008-01-30 13:33:19 +0100471static inline pmd_t __pmd(pmdval_t val)
472{
473 pmdval_t ret;
474
475 if (sizeof(pmdval_t) > sizeof(long))
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800476 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
477 val, (u64)val >> 32);
Glauber de Oliveira Costa1fe91512008-01-30 13:33:19 +0100478 else
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800479 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
480 val);
Glauber de Oliveira Costa1fe91512008-01-30 13:33:19 +0100481
482 return (pmd_t) { ret };
483}
484
485static inline pmdval_t pmd_val(pmd_t pmd)
486{
487 pmdval_t ret;
488
489 if (sizeof(pmdval_t) > sizeof(long))
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800490 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
491 pmd.pmd, (u64)pmd.pmd >> 32);
Glauber de Oliveira Costa1fe91512008-01-30 13:33:19 +0100492 else
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800493 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
494 pmd.pmd);
Glauber de Oliveira Costa1fe91512008-01-30 13:33:19 +0100495
496 return ret;
497}
498
499static inline void set_pud(pud_t *pudp, pud_t pud)
500{
501 pudval_t val = native_pud_val(pud);
502
503 if (sizeof(pudval_t) > sizeof(long))
504 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
505 val, (u64)val >> 32);
506 else
507 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
508 val);
509}
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300510#if CONFIG_PGTABLE_LEVELS >= 4
Eduardo Habkost90422192008-01-30 13:33:20 +0100511static inline pud_t __pud(pudval_t val)
512{
513 pudval_t ret;
514
515 if (sizeof(pudval_t) > sizeof(long))
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800516 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
517 val, (u64)val >> 32);
Eduardo Habkost90422192008-01-30 13:33:20 +0100518 else
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800519 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
520 val);
Eduardo Habkost90422192008-01-30 13:33:20 +0100521
522 return (pud_t) { ret };
523}
524
525static inline pudval_t pud_val(pud_t pud)
526{
527 pudval_t ret;
528
529 if (sizeof(pudval_t) > sizeof(long))
Jeremy Fitzhardinge4767afb2009-01-29 01:51:34 -0800530 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
531 pud.pud, (u64)pud.pud >> 32);
Eduardo Habkost90422192008-01-30 13:33:20 +0100532 else
Jeremy Fitzhardinge4767afb2009-01-29 01:51:34 -0800533 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
534 pud.pud);
Eduardo Habkost90422192008-01-30 13:33:20 +0100535
536 return ret;
537}
538
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300539static inline void pud_clear(pud_t *pudp)
540{
541 set_pud(pudp, __pud(0));
542}
543
544static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
545{
546 p4dval_t val = native_p4d_val(p4d);
547
548 if (sizeof(p4dval_t) > sizeof(long))
549 PVOP_VCALL3(pv_mmu_ops.set_p4d, p4dp,
550 val, (u64)val >> 32);
551 else
552 PVOP_VCALL2(pv_mmu_ops.set_p4d, p4dp,
553 val);
554}
555
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300556#if CONFIG_PGTABLE_LEVELS >= 5
557
Kirill A. Shutemov335437f2017-03-30 11:07:28 +0300558static inline p4d_t __p4d(p4dval_t val)
559{
560 p4dval_t ret = PVOP_CALLEE1(p4dval_t, pv_mmu_ops.make_p4d, val);
561
562 return (p4d_t) { ret };
563}
564
565static inline p4dval_t p4d_val(p4d_t p4d)
566{
567 return PVOP_CALLEE1(p4dval_t, pv_mmu_ops.p4d_val, p4d.p4d);
568}
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300569
Kirill A. Shutemov92e1c5b2018-02-16 14:49:47 +0300570static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
Eduardo Habkost90422192008-01-30 13:33:20 +0100571{
Kirill A. Shutemov92e1c5b2018-02-16 14:49:47 +0300572 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, native_pgd_val(pgd));
Eduardo Habkost90422192008-01-30 13:33:20 +0100573}
574
Kirill A. Shutemov92e1c5b2018-02-16 14:49:47 +0300575#define set_pgd(pgdp, pgdval) do { \
576 if (pgtable_l5_enabled) \
577 __set_pgd(pgdp, pgdval); \
578 else \
579 set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \
580} while (0)
581
582#define pgd_clear(pgdp) do { \
583 if (pgtable_l5_enabled) \
584 set_pgd(pgdp, __pgd(0)); \
585} while (0)
Eduardo Habkost90422192008-01-30 13:33:20 +0100586
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300587#endif /* CONFIG_PGTABLE_LEVELS == 5 */
Eduardo Habkost90422192008-01-30 13:33:20 +0100588
Kirill A. Shutemov335437f2017-03-30 11:07:28 +0300589static inline void p4d_clear(p4d_t *p4dp)
590{
591 set_p4d(p4dp, __p4d(0));
592}
593
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700594#endif /* CONFIG_PGTABLE_LEVELS == 4 */
Eduardo Habkost90422192008-01-30 13:33:20 +0100595
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700596#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
Glauber de Oliveira Costa1fe91512008-01-30 13:33:19 +0100597
Jeremy Fitzhardinge4eed80c2008-01-30 13:33:15 +0100598#ifdef CONFIG_X86_PAE
599/* Special-case pte-setting operations for PAE, which can't update a
600 64-bit pte atomically */
601static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
602{
603 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
604 pte.pte, pte.pte >> 32);
605}
606
Jeremy Fitzhardinge4eed80c2008-01-30 13:33:15 +0100607static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
608 pte_t *ptep)
609{
610 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
611}
Jeremy Fitzhardinge60b3f622008-01-30 13:33:15 +0100612
613static inline void pmd_clear(pmd_t *pmdp)
614{
615 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
616}
Jeremy Fitzhardinge4eed80c2008-01-30 13:33:15 +0100617#else /* !CONFIG_X86_PAE */
618static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
619{
620 set_pte(ptep, pte);
621}
622
Jeremy Fitzhardinge4eed80c2008-01-30 13:33:15 +0100623static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
624 pte_t *ptep)
625{
626 set_pte_at(mm, addr, ptep, __pte(0));
627}
Jeremy Fitzhardinge60b3f622008-01-30 13:33:15 +0100628
629static inline void pmd_clear(pmd_t *pmdp)
630{
631 set_pmd(pmdp, __pmd(0));
632}
Jeremy Fitzhardinge4eed80c2008-01-30 13:33:15 +0100633#endif /* CONFIG_X86_PAE */
634
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -0800635#define __HAVE_ARCH_START_CONTEXT_SWITCH
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800636static inline void arch_start_context_switch(struct task_struct *prev)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200637{
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800638 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200639}
640
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800641static inline void arch_end_context_switch(struct task_struct *next)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200642{
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800643 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200644}
645
Zachary Amsden9226d122007-02-13 13:26:21 +0100646#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200647static inline void arch_enter_lazy_mmu_mode(void)
648{
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700649 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200650}
651
652static inline void arch_leave_lazy_mmu_mode(void)
653{
Jeremy Fitzhardinge8965c1c2007-10-16 11:51:29 -0700654 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200655}
656
Boris Ostrovsky511ba862013-03-23 09:36:36 -0400657static inline void arch_flush_lazy_mmu_mode(void)
658{
659 PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
660}
Zachary Amsden9226d122007-02-13 13:26:21 +0100661
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700662static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -0700663 phys_addr_t phys, pgprot_t flags)
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700664{
665 pv_mmu_ops.set_fixmap(idx, phys, flags);
666}
667
Jeremy Fitzhardingeb4ecc122009-05-13 17:16:55 -0700668#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
Ingo Molnar4bb689e2008-07-09 14:33:33 +0200669
Peter Zijlstra (Intel)f233f7f2015-04-24 14:56:38 -0400670static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
671 u32 val)
672{
673 PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
674}
675
676static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
677{
678 PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
679}
680
681static __always_inline void pv_wait(u8 *ptr, u8 val)
682{
683 PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
684}
685
686static __always_inline void pv_kick(int cpu)
687{
688 PVOP_VCALL1(pv_lock_ops.kick, cpu);
689}
690
Waiman Long6c629852017-02-20 13:36:03 -0500691static __always_inline bool pv_vcpu_is_preempted(long cpu)
Peter Zijlstra3cded412016-11-15 16:47:06 +0100692{
693 return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
694}
695
Peter Zijlstra (Intel)f233f7f2015-04-24 14:56:38 -0400696#endif /* SMP && PARAVIRT_SPINLOCKS */
Ingo Molnar4bb689e2008-07-09 14:33:33 +0200697
Glauber de Oliveira Costa2e47d3e2008-01-30 13:32:07 +0100698#ifdef CONFIG_X86_32
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800699#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
700#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
701
702/* save and restore all caller-save registers, except return value */
Jeremy Fitzhardingee584f552009-01-30 23:17:23 -0800703#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
704#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800705
Glauber de Oliveira Costa2e47d3e2008-01-30 13:32:07 +0100706#define PV_FLAGS_ARG "0"
707#define PV_EXTRA_CLOBBERS
708#define PV_VEXTRA_CLOBBERS
709#else
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800710/* save and restore all caller-save registers, except return value */
711#define PV_SAVE_ALL_CALLER_REGS \
712 "push %rcx;" \
713 "push %rdx;" \
714 "push %rsi;" \
715 "push %rdi;" \
716 "push %r8;" \
717 "push %r9;" \
718 "push %r10;" \
719 "push %r11;"
720#define PV_RESTORE_ALL_CALLER_REGS \
721 "pop %r11;" \
722 "pop %r10;" \
723 "pop %r9;" \
724 "pop %r8;" \
725 "pop %rdi;" \
726 "pop %rsi;" \
727 "pop %rdx;" \
728 "pop %rcx;"
729
Glauber de Oliveira Costa2e47d3e2008-01-30 13:32:07 +0100730/* We save some registers, but all of them, that's too much. We clobber all
731 * caller saved registers but the argument parameter */
732#define PV_SAVE_REGS "pushq %%rdi;"
733#define PV_RESTORE_REGS "popq %%rdi;"
Jeremy Fitzhardingec24481e2008-07-08 15:07:12 -0700734#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
735#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
Glauber de Oliveira Costa2e47d3e2008-01-30 13:32:07 +0100736#define PV_FLAGS_ARG "D"
737#endif
738
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800739/*
740 * Generate a thunk around a function which saves all caller-save
741 * registers except for the return value. This allows C functions to
742 * be called from assembler code where fewer than normal registers are
743 * available. It may also help code generation around calls from C
744 * code if the common case doesn't use many registers.
745 *
746 * When a callee is wrapped in a thunk, the caller can assume that all
747 * arg regs and all scratch registers are preserved across the
748 * call. The return value in rax/eax will not be saved, even for void
749 * functions.
750 */
Josh Poimboeuf87b240c2016-01-21 16:49:13 -0600751#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800752#define PV_CALLEE_SAVE_REGS_THUNK(func) \
753 extern typeof(func) __raw_callee_save_##func; \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800754 \
755 asm(".pushsection .text;" \
Josh Poimboeuf87b240c2016-01-21 16:49:13 -0600756 ".globl " PV_THUNK_NAME(func) ";" \
757 ".type " PV_THUNK_NAME(func) ", @function;" \
758 PV_THUNK_NAME(func) ":" \
759 FRAME_BEGIN \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800760 PV_SAVE_ALL_CALLER_REGS \
761 "call " #func ";" \
762 PV_RESTORE_ALL_CALLER_REGS \
Josh Poimboeuf87b240c2016-01-21 16:49:13 -0600763 FRAME_END \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800764 "ret;" \
765 ".popsection")
766
767/* Get a reference to a callee-save function */
768#define PV_CALLEE_SAVE(func) \
769 ((struct paravirt_callee_save) { __raw_callee_save_##func })
770
771/* Promise that "func" already uses the right calling convention */
772#define __PV_IS_CALLEE_SAVE(func) \
773 ((struct paravirt_callee_save) { func })
774
Steven Rostedtb5908542010-11-10 22:29:49 -0500775static inline notrace unsigned long arch_local_save_flags(void)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100776{
Jeremy Fitzhardinge71999d92009-10-12 16:32:43 -0700777 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
Rusty Russell139ec7c2006-12-07 02:14:08 +0100778}
779
Steven Rostedtb5908542010-11-10 22:29:49 -0500780static inline notrace void arch_local_irq_restore(unsigned long f)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100781{
Jeremy Fitzhardinge71999d92009-10-12 16:32:43 -0700782 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
Rusty Russell139ec7c2006-12-07 02:14:08 +0100783}
784
Steven Rostedtb5908542010-11-10 22:29:49 -0500785static inline notrace void arch_local_irq_disable(void)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100786{
Jeremy Fitzhardinge71999d92009-10-12 16:32:43 -0700787 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
Rusty Russell139ec7c2006-12-07 02:14:08 +0100788}
789
Steven Rostedtb5908542010-11-10 22:29:49 -0500790static inline notrace void arch_local_irq_enable(void)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100791{
Jeremy Fitzhardinge71999d92009-10-12 16:32:43 -0700792 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
Rusty Russell139ec7c2006-12-07 02:14:08 +0100793}
794
Steven Rostedtb5908542010-11-10 22:29:49 -0500795static inline notrace unsigned long arch_local_irq_save(void)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100796{
797 unsigned long f;
798
David Howellsdf9ee292010-10-07 14:08:55 +0100799 f = arch_local_save_flags();
800 arch_local_irq_disable();
Rusty Russell139ec7c2006-12-07 02:14:08 +0100801 return f;
802}
803
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700804
Jeremy Fitzhardinge294688c2007-05-02 19:27:14 +0200805/* Make sure as little as possible of this mess escapes. */
Jeremy Fitzhardinged5822032007-05-02 19:27:14 +0200806#undef PARAVIRT_CALL
Jeremy Fitzhardinge1a45b7a2007-05-02 19:27:15 +0200807#undef __PVOP_CALL
808#undef __PVOP_VCALL
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200809#undef PVOP_VCALL0
810#undef PVOP_CALL0
811#undef PVOP_VCALL1
812#undef PVOP_CALL1
813#undef PVOP_VCALL2
814#undef PVOP_CALL2
815#undef PVOP_VCALL3
816#undef PVOP_CALL3
817#undef PVOP_VCALL4
818#undef PVOP_CALL4
Rusty Russell139ec7c2006-12-07 02:14:08 +0100819
Thomas Gleixner6f30c1a2009-08-20 13:19:57 +0200820extern void default_banner(void);
821
Rusty Russelld3561b72006-12-07 02:14:07 +0100822#else /* __ASSEMBLY__ */
823
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100824#define _PVSITE(ptype, clobbers, ops, word, algn) \
Rusty Russell139ec7c2006-12-07 02:14:08 +0100825771:; \
826 ops; \
827772:; \
828 .pushsection .parainstructions,"a"; \
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100829 .align algn; \
830 word 771b; \
Rusty Russell139ec7c2006-12-07 02:14:08 +0100831 .byte ptype; \
832 .byte 772b-771b; \
833 .short clobbers; \
834 .popsection
835
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100836
Jeremy Fitzhardinge9104a182009-01-28 14:35:04 -0800837#define COND_PUSH(set, mask, reg) \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800838 .if ((~(set)) & mask); push %reg; .endif
Jeremy Fitzhardinge9104a182009-01-28 14:35:04 -0800839#define COND_POP(set, mask, reg) \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800840 .if ((~(set)) & mask); pop %reg; .endif
Jeremy Fitzhardinge9104a182009-01-28 14:35:04 -0800841
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100842#ifdef CONFIG_X86_64
Jeremy Fitzhardinge9104a182009-01-28 14:35:04 -0800843
844#define PV_SAVE_REGS(set) \
845 COND_PUSH(set, CLBR_RAX, rax); \
846 COND_PUSH(set, CLBR_RCX, rcx); \
847 COND_PUSH(set, CLBR_RDX, rdx); \
848 COND_PUSH(set, CLBR_RSI, rsi); \
849 COND_PUSH(set, CLBR_RDI, rdi); \
850 COND_PUSH(set, CLBR_R8, r8); \
851 COND_PUSH(set, CLBR_R9, r9); \
852 COND_PUSH(set, CLBR_R10, r10); \
853 COND_PUSH(set, CLBR_R11, r11)
854#define PV_RESTORE_REGS(set) \
855 COND_POP(set, CLBR_R11, r11); \
856 COND_POP(set, CLBR_R10, r10); \
857 COND_POP(set, CLBR_R9, r9); \
858 COND_POP(set, CLBR_R8, r8); \
859 COND_POP(set, CLBR_RDI, rdi); \
860 COND_POP(set, CLBR_RSI, rsi); \
861 COND_POP(set, CLBR_RDX, rdx); \
862 COND_POP(set, CLBR_RCX, rcx); \
863 COND_POP(set, CLBR_RAX, rax)
864
Glauber de Oliveira Costa6057fc82008-01-30 13:32:06 +0100865#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100866#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
Jeremy Fitzhardinge491eccb2008-06-25 00:19:15 -0400867#define PARA_INDIRECT(addr) *addr(%rip)
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100868#else
Jeremy Fitzhardinge9104a182009-01-28 14:35:04 -0800869#define PV_SAVE_REGS(set) \
870 COND_PUSH(set, CLBR_EAX, eax); \
871 COND_PUSH(set, CLBR_EDI, edi); \
872 COND_PUSH(set, CLBR_ECX, ecx); \
873 COND_PUSH(set, CLBR_EDX, edx)
874#define PV_RESTORE_REGS(set) \
875 COND_POP(set, CLBR_EDX, edx); \
876 COND_POP(set, CLBR_ECX, ecx); \
877 COND_POP(set, CLBR_EDI, edi); \
878 COND_POP(set, CLBR_EAX, eax)
879
Glauber de Oliveira Costa6057fc82008-01-30 13:32:06 +0100880#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100881#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
Jeremy Fitzhardinge491eccb2008-06-25 00:19:15 -0400882#define PARA_INDIRECT(addr) *%cs:addr
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100883#endif
884
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700885#define INTERRUPT_RETURN \
886 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
Jeremy Fitzhardinge491eccb2008-06-25 00:19:15 -0400887 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
Rusty Russell139ec7c2006-12-07 02:14:08 +0100888
Jeremy Fitzhardinged5822032007-05-02 19:27:14 +0200889#define DISABLE_INTERRUPTS(clobbers) \
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700890 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800891 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
Jeremy Fitzhardinge491eccb2008-06-25 00:19:15 -0400892 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800893 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100894
Jeremy Fitzhardinged5822032007-05-02 19:27:14 +0200895#define ENABLE_INTERRUPTS(clobbers) \
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700896 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800897 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
Jeremy Fitzhardinge491eccb2008-06-25 00:19:15 -0400898 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800899 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100900
Glauber de Oliveira Costa6057fc82008-01-30 13:32:06 +0100901#ifdef CONFIG_X86_32
Jeremy Fitzhardinge491eccb2008-06-25 00:19:15 -0400902#define GET_CR0_INTO_EAX \
903 push %ecx; push %edx; \
904 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
Jeremy Fitzhardinge42c24fa2007-05-02 19:27:14 +0200905 pop %edx; pop %ecx
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400906#else /* !CONFIG_X86_32 */
Jeremy Fitzhardingea00394f2008-06-25 00:19:30 -0400907
908/*
909 * If swapgs is used while the userspace stack is still current,
910 * there's no way to call a pvop. The PV replacement *must* be
911 * inlined, or the swapgs instruction must be trapped and emulated.
912 */
913#define SWAPGS_UNSAFE_STACK \
914 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
915 swapgs)
916
Jeremy Fitzhardinge9104a182009-01-28 14:35:04 -0800917/*
918 * Note: swapgs is very special, and in practise is either going to be
919 * implemented with a single "swapgs" instruction or something very
920 * special. Either way, we don't need to save any registers for
921 * it.
922 */
Glauber de Oliveira Costae801f862008-01-30 13:32:08 +0100923#define SWAPGS \
924 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
Jeremy Fitzhardinge9104a182009-01-28 14:35:04 -0800925 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
Glauber de Oliveira Costae801f862008-01-30 13:32:08 +0100926 )
927
H. Peter Anvinffc4bc92012-04-18 17:16:48 -0700928#define GET_CR2_INTO_RAX \
929 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
Glauber de Oliveira Costa4a8c4c42008-01-30 13:32:07 +0100930
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400931#define USERGS_SYSRET64 \
932 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
Jeremy Fitzhardinged75cd222008-06-25 00:19:26 -0400933 CLBR_NONE, \
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400934 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
Boris Ostrovskye17f8232017-12-04 15:07:07 +0100935
936#ifdef CONFIG_DEBUG_ENTRY
937#define SAVE_FLAGS(clobbers) \
938 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
939 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
940 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \
941 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
942#endif
943
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400944#endif /* CONFIG_X86_32 */
Rusty Russell139ec7c2006-12-07 02:14:08 +0100945
Rusty Russelld3561b72006-12-07 02:14:07 +0100946#endif /* __ASSEMBLY__ */
Thomas Gleixner6f30c1a2009-08-20 13:19:57 +0200947#else /* CONFIG_PARAVIRT */
948# define default_banner x86_init_noop
Dave Hansena1ea1c02014-11-18 10:23:49 -0800949#ifndef __ASSEMBLY__
950static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
951 struct mm_struct *mm)
952{
953}
954
955static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
956{
957}
958#endif /* __ASSEMBLY__ */
Thomas Gleixner6f30c1a2009-08-20 13:19:57 +0200959#endif /* !CONFIG_PARAVIRT */
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700960#endif /* _ASM_X86_PARAVIRT_H */