blob: bde11696b893fdab2ad262031f363fc473e994ae [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_DESC_H
2#define _ASM_X86_DESC_H
Glauber de Oliveira Costa80fbb692008-01-30 13:31:13 +01003
Glauber de Oliveira Costa80fbb692008-01-30 13:31:13 +01004#include <asm/desc_defs.h>
5#include <asm/ldt.h>
Glauber de Oliveira Costa881c2972008-01-30 13:31:14 +01006#include <asm/mmu.h>
Thomas Garnier69218e42017-03-14 10:05:07 -07007#include <asm/fixmap.h>
Ingo Molnar9a3865b2011-05-27 09:29:32 +02008
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +01009#include <linux/smp.h>
Alex Shic6ae41e2012-05-11 15:35:27 +080010#include <linux/percpu.h>
Glauber de Oliveira Costa80fbb692008-01-30 13:31:13 +010011
Ingo Molnar9a3865b2011-05-27 09:29:32 +020012static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
Glauber de Oliveira Costa80fbb692008-01-30 13:31:13 +010013{
Ingo Molnar9a3865b2011-05-27 09:29:32 +020014 desc->limit0 = info->limit & 0x0ffff;
Glauber de Oliveira Costa80fbb692008-01-30 13:31:13 +010015
Ingo Molnar9a3865b2011-05-27 09:29:32 +020016 desc->base0 = (info->base_addr & 0x0000ffff);
17 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
18
19 desc->type = (info->read_exec_only ^ 1) << 1;
20 desc->type |= info->contents << 2;
21
22 desc->s = 1;
23 desc->dpl = 0x3;
24 desc->p = info->seg_not_present ^ 1;
25 desc->limit = (info->limit & 0xf0000) >> 16;
26 desc->avl = info->useable;
27 desc->d = info->seg_32bit;
28 desc->g = info->limit_in_pages;
29
30 desc->base2 = (info->base_addr & 0xff000000) >> 24;
Jeremy Fitzhardinge64f53a02008-07-27 08:42:32 -070031 /*
Andy Lutomirski318f5a22011-08-03 09:31:53 -040032 * Don't allow setting of the lm bit. It would confuse
33 * user_64bit_mode and would get overridden by sysret anyway.
Jeremy Fitzhardinge64f53a02008-07-27 08:42:32 -070034 */
Ingo Molnar9a3865b2011-05-27 09:29:32 +020035 desc->l = 0;
Glauber de Oliveira Costa80fbb692008-01-30 13:31:13 +010036}
37
Glauber de Oliveira Costa881c2972008-01-30 13:31:14 +010038extern struct desc_ptr idt_descr;
39extern gate_desc idt_table[];
Kees Cook404f6aa2016-08-08 16:29:06 -070040extern const struct desc_ptr debug_idt_descr;
Seiji Aguchi629f4f92013-06-20 11:45:44 -040041extern gate_desc debug_idt_table[];
Thomas Garnier69218e42017-03-14 10:05:07 -070042extern pgprot_t pg_fixmap_gdt_flags;
Glauber de Oliveira Costa80fbb692008-01-30 13:31:13 +010043
Glauber Costaa9390982008-05-28 16:19:53 -070044struct gdt_page {
45 struct desc_struct gdt[GDT_ENTRIES];
46} __attribute__((aligned(PAGE_SIZE)));
Ingo Molnar9a3865b2011-05-27 09:29:32 +020047
David Howells9b8de742009-04-21 23:00:24 +010048DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
Glauber Costaa9390982008-05-28 16:19:53 -070049
Thomas Garnier69218e42017-03-14 10:05:07 -070050/* Provide the original GDT */
51static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu)
Glauber Costaa9390982008-05-28 16:19:53 -070052{
53 return per_cpu(gdt_page, cpu).gdt;
54}
55
Thomas Garnier69218e42017-03-14 10:05:07 -070056static inline unsigned long get_cpu_gdt_rw_vaddr(unsigned int cpu)
57{
58 return (unsigned long)get_cpu_gdt_rw(cpu);
59}
60
61/* Provide the current original GDT */
62static inline struct desc_struct *get_current_gdt_rw(void)
63{
64 return this_cpu_ptr(&gdt_page)->gdt;
65}
66
67static inline unsigned long get_current_gdt_rw_vaddr(void)
68{
69 return (unsigned long)get_current_gdt_rw();
70}
71
72/* Get the fixmap index for a specific processor */
73static inline unsigned int get_cpu_gdt_ro_index(int cpu)
74{
75 return FIX_GDT_REMAP_BEGIN + cpu;
76}
77
78/* Provide the fixmap address of the remapped GDT */
79static inline struct desc_struct *get_cpu_gdt_ro(int cpu)
80{
81 unsigned int idx = get_cpu_gdt_ro_index(cpu);
82 return (struct desc_struct *)__fix_to_virt(idx);
83}
84
85static inline unsigned long get_cpu_gdt_ro_vaddr(int cpu)
86{
87 return (unsigned long)get_cpu_gdt_ro(cpu);
88}
89
90/* Provide the current read-only GDT */
91static inline struct desc_struct *get_current_gdt_ro(void)
92{
93 return get_cpu_gdt_ro(smp_processor_id());
94}
95
96static inline unsigned long get_current_gdt_ro_vaddr(void)
97{
98 return (unsigned long)get_current_gdt_ro();
99}
100
Andy Lutomirskiaa4ea67552017-03-22 14:32:30 -0700101/* Provide the physical address of the GDT page. */
102static inline phys_addr_t get_cpu_gdt_paddr(unsigned int cpu)
103{
104 return per_cpu_ptr_to_phys(get_cpu_gdt_rw(cpu));
105}
106
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100107#ifdef CONFIG_X86_64
Glauber de Oliveira Costa507f90c2008-01-30 13:31:14 +0100108
109static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
110 unsigned dpl, unsigned ist, unsigned seg)
111{
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200112 gate->offset_low = PTR_LOW(func);
113 gate->segment = __KERNEL_CS;
114 gate->ist = ist;
115 gate->p = 1;
116 gate->dpl = dpl;
117 gate->zero0 = 0;
118 gate->zero1 = 0;
119 gate->type = type;
120 gate->offset_middle = PTR_MIDDLE(func);
121 gate->offset_high = PTR_HIGH(func);
Glauber de Oliveira Costa507f90c2008-01-30 13:31:14 +0100122}
123
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100124#else
Glauber de Oliveira Costa507f90c2008-01-30 13:31:14 +0100125static inline void pack_gate(gate_desc *gate, unsigned char type,
Joe Perchesc1773a12008-03-23 01:01:58 -0700126 unsigned long base, unsigned dpl, unsigned flags,
127 unsigned short seg)
Glauber de Oliveira Costa507f90c2008-01-30 13:31:14 +0100128{
129 gate->a = (seg << 16) | (base & 0xffff);
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200130 gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
Glauber de Oliveira Costa507f90c2008-01-30 13:31:14 +0100131}
132
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100133#endif
134
Glauber de Oliveira Costa746ff602008-01-30 13:31:27 +0100135static inline int desc_empty(const void *ptr)
136{
137 const u32 *desc = ptr;
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200138
Glauber de Oliveira Costa746ff602008-01-30 13:31:27 +0100139 return !(desc[0] | desc[1]);
140}
141
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100142#ifdef CONFIG_PARAVIRT
143#include <asm/paravirt.h>
144#else
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200145#define load_TR_desc() native_load_tr_desc()
146#define load_gdt(dtr) native_load_gdt(dtr)
147#define load_idt(dtr) native_load_idt(dtr)
148#define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
149#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100150
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200151#define store_gdt(dtr) native_store_gdt(dtr)
152#define store_idt(dtr) native_store_idt(dtr)
153#define store_tr(tr) (tr = native_store_tr())
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100154
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200155#define load_TLS(t, cpu) native_load_tls(t, cpu)
156#define set_ldt native_set_ldt
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100157
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200158#define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc)
159#define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type)
160#define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
Jeremy Fitzhardinge38ffbe62008-07-23 14:21:18 -0700161
162static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
163{
164}
165
166static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
167{
168}
169#endif /* CONFIG_PARAVIRT */
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100170
Jaswinder Singh Rajput8229d752009-03-11 19:13:49 +0530171#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
172
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200173static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100174{
175 memcpy(&idt[entry], gate, sizeof(*gate));
176}
177
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200178static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100179{
180 memcpy(&ldt[entry], desc, 8);
181}
182
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200183static inline void
184native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100185{
186 unsigned int size;
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200187
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100188 switch (type) {
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200189 case DESC_TSS: size = sizeof(tss_desc); break;
190 case DESC_LDT: size = sizeof(ldt_desc); break;
191 default: size = sizeof(*gdt); break;
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100192 }
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200193
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100194 memcpy(&gdt[entry], desc, size);
195}
196
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100197static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
198 unsigned long limit, unsigned char type,
199 unsigned char flags)
200{
201 desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
202 desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
Joe Perchesc1773a12008-03-23 01:01:58 -0700203 (limit & 0x000f0000) | ((type & 0xff) << 8) |
204 ((flags & 0xf) << 20);
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100205 desc->p = 1;
206}
207
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100208
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200209static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
Glauber de Oliveira Costac81c6ca2008-01-30 13:31:14 +0100210{
211#ifdef CONFIG_X86_64
Glauber de Oliveira Costaf6e0eba2008-01-30 13:31:20 +0100212 struct ldttss_desc64 *desc = d;
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200213
Glauber de Oliveira Costaf6e0eba2008-01-30 13:31:20 +0100214 memset(desc, 0, sizeof(*desc));
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200215
216 desc->limit0 = size & 0xFFFF;
217 desc->base0 = PTR_LOW(addr);
218 desc->base1 = PTR_MIDDLE(addr) & 0xFF;
219 desc->type = type;
220 desc->p = 1;
221 desc->limit1 = (size >> 16) & 0xF;
222 desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
223 desc->base3 = PTR_HIGH(addr);
Glauber de Oliveira Costac81c6ca2008-01-30 13:31:14 +0100224#else
Glauber de Oliveira Costaf6e0eba2008-01-30 13:31:20 +0100225 pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
Glauber de Oliveira Costac81c6ca2008-01-30 13:31:14 +0100226#endif
227}
228
229static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
230{
Thomas Garnier69218e42017-03-14 10:05:07 -0700231 struct desc_struct *d = get_cpu_gdt_rw(cpu);
Glauber de Oliveira Costac81c6ca2008-01-30 13:31:14 +0100232 tss_desc tss;
233
Glauber de Oliveira Costaf6e0eba2008-01-30 13:31:20 +0100234 set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
Andy Lutomirski4f53ab12017-02-20 08:56:09 -0800235 __KERNEL_TSS_LIMIT);
Glauber de Oliveira Costac81c6ca2008-01-30 13:31:14 +0100236 write_gdt_entry(d, entry, &tss, DESC_TSS);
237}
238
239#define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
240
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100241static inline void native_set_ldt(const void *addr, unsigned int entries)
242{
243 if (likely(entries == 0))
Joe Perchesc1773a12008-03-23 01:01:58 -0700244 asm volatile("lldt %w0"::"q" (0));
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100245 else {
246 unsigned cpu = smp_processor_id();
247 ldt_desc ldt;
248
Michael Karcher5ac37f82008-07-11 18:04:46 +0200249 set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
250 entries * LDT_ENTRY_SIZE - 1);
Thomas Garnier69218e42017-03-14 10:05:07 -0700251 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_LDT,
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100252 &ldt, DESC_LDT);
Joe Perchesc1773a12008-03-23 01:01:58 -0700253 asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100254 }
255}
256
Thomas Garnier45fc8752017-03-14 10:05:08 -0700257static inline void native_load_gdt(const struct desc_ptr *dtr)
258{
259 asm volatile("lgdt %0"::"m" (*dtr));
260}
261
262static inline void native_load_idt(const struct desc_ptr *dtr)
263{
264 asm volatile("lidt %0"::"m" (*dtr));
265}
266
267static inline void native_store_gdt(struct desc_ptr *dtr)
268{
269 asm volatile("sgdt %0":"=m" (*dtr));
270}
271
272static inline void native_store_idt(struct desc_ptr *dtr)
273{
274 asm volatile("sidt %0":"=m" (*dtr));
275}
276
277/*
278 * The LTR instruction marks the TSS GDT entry as busy. On 64-bit, the GDT is
279 * a read-only remapping. To prevent a page fault, the GDT is switched to the
280 * original writeable version when needed.
281 */
282#ifdef CONFIG_X86_64
283static inline void native_load_tr_desc(void)
284{
285 struct desc_ptr gdt;
286 int cpu = raw_smp_processor_id();
287 bool restore = 0;
288 struct desc_struct *fixmap_gdt;
289
290 native_store_gdt(&gdt);
291 fixmap_gdt = get_cpu_gdt_ro(cpu);
292
293 /*
294 * If the current GDT is the read-only fixmap, swap to the original
295 * writeable version. Swap back at the end.
296 */
297 if (gdt.address == (unsigned long)fixmap_gdt) {
298 load_direct_gdt(cpu);
299 restore = 1;
300 }
301 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
302 if (restore)
303 load_fixmap_gdt(cpu);
304}
305#else
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100306static inline void native_load_tr_desc(void)
307{
308 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
309}
Thomas Garnier45fc8752017-03-14 10:05:08 -0700310#endif
311
312static inline unsigned long native_store_tr(void)
313{
314 unsigned long tr;
315
316 asm volatile("str %0":"=r" (tr));
317
318 return tr;
319}
320
321static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
322{
323 struct desc_struct *gdt = get_cpu_gdt_rw(cpu);
324 unsigned int i;
325
326 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
327 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
328}
Glauber de Oliveira Costa54cd0ea2008-01-30 13:31:14 +0100329
Andy Lutomirskib7ceaec2017-02-22 07:36:16 -0800330DECLARE_PER_CPU(bool, __tss_limit_invalid);
331
Andy Lutomirskib7ffc442017-02-20 08:56:14 -0800332static inline void force_reload_TR(void)
333{
Thomas Garnier69218e42017-03-14 10:05:07 -0700334 struct desc_struct *d = get_current_gdt_rw();
Andy Lutomirskib7ffc442017-02-20 08:56:14 -0800335 tss_desc tss;
336
337 memcpy(&tss, &d[GDT_ENTRY_TSS], sizeof(tss_desc));
338
339 /*
340 * LTR requires an available TSS, and the TSS is currently
341 * busy. Make it be available so that LTR will work.
342 */
343 tss.type = DESC_TSS;
344 write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS);
345
346 load_TR_desc();
Andy Lutomirskib7ceaec2017-02-22 07:36:16 -0800347 this_cpu_write(__tss_limit_invalid, false);
Andy Lutomirskib7ffc442017-02-20 08:56:14 -0800348}
349
Andy Lutomirskib7ceaec2017-02-22 07:36:16 -0800350/*
351 * Call this if you need the TSS limit to be correct, which should be the case
352 * if and only if you have TIF_IO_BITMAP set or you're switching to a task
353 * with TIF_IO_BITMAP set.
354 */
355static inline void refresh_tss_limit(void)
Andy Lutomirskib7ffc442017-02-20 08:56:14 -0800356{
357 DEBUG_LOCKS_WARN_ON(preemptible());
358
Andy Lutomirskib7ceaec2017-02-22 07:36:16 -0800359 if (unlikely(this_cpu_read(__tss_limit_invalid)))
Andy Lutomirskib7ffc442017-02-20 08:56:14 -0800360 force_reload_TR();
Andy Lutomirskib7ffc442017-02-20 08:56:14 -0800361}
362
363/*
364 * If you do something evil that corrupts the cached TSS limit (I'm looking
365 * at you, VMX exits), call this function.
366 *
367 * The optimization here is that the TSS limit only matters for Linux if the
368 * IO bitmap is in use. If the TSS limit gets forced to its minimum value,
369 * everything works except that IO bitmap will be ignored and all CPL 3 IO
370 * instructions will #GP, which is exactly what we want for normal tasks.
371 */
372static inline void invalidate_tss_limit(void)
373{
374 DEBUG_LOCKS_WARN_ON(preemptible());
375
376 if (unlikely(test_thread_flag(TIF_IO_BITMAP)))
377 force_reload_TR();
378 else
Andy Lutomirskib7ceaec2017-02-22 07:36:16 -0800379 this_cpu_write(__tss_limit_invalid, true);
Andy Lutomirskib7ffc442017-02-20 08:56:14 -0800380}
381
Andy Lutomirskie30ab182015-01-22 11:27:58 -0800382/* This intentionally ignores lm, since 32-bit apps don't have that field. */
383#define LDT_empty(info) \
Joe Perchesc1773a12008-03-23 01:01:58 -0700384 ((info)->base_addr == 0 && \
385 (info)->limit == 0 && \
386 (info)->contents == 0 && \
387 (info)->read_exec_only == 1 && \
388 (info)->seg_32bit == 0 && \
389 (info)->limit_in_pages == 0 && \
390 (info)->seg_not_present == 1 && \
391 (info)->useable == 0)
Glauber de Oliveira Costa881c2972008-01-30 13:31:14 +0100392
Andy Lutomirski3669ef92015-01-22 11:27:59 -0800393/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
394static inline bool LDT_zero(const struct user_desc *info)
395{
396 return (info->base_addr == 0 &&
397 info->limit == 0 &&
398 info->contents == 0 &&
399 info->read_exec_only == 0 &&
400 info->seg_32bit == 0 &&
401 info->limit_in_pages == 0 &&
402 info->seg_not_present == 0 &&
403 info->useable == 0);
404}
405
Glauber de Oliveira Costa881c2972008-01-30 13:31:14 +0100406static inline void clear_LDT(void)
407{
408 set_ldt(NULL, 0);
409}
410
Roland McGrath1bd57182008-01-30 13:31:51 +0100411static inline unsigned long get_desc_base(const struct desc_struct *desc)
Glauber de Oliveira Costacc697852008-01-30 13:31:14 +0100412{
Chris Lalancette2c759102009-11-05 11:47:08 +0100413 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
Glauber de Oliveira Costacc697852008-01-30 13:31:14 +0100414}
Roland McGrath1bd57182008-01-30 13:31:51 +0100415
Akinobu Mita57594742009-07-19 00:11:06 +0900416static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
417{
418 desc->base0 = base & 0xffff;
419 desc->base1 = (base >> 16) & 0xff;
420 desc->base2 = (base >> 24) & 0xff;
421}
422
Roland McGrath1bd57182008-01-30 13:31:51 +0100423static inline unsigned long get_desc_limit(const struct desc_struct *desc)
424{
425 return desc->limit0 | (desc->limit << 16);
426}
427
Akinobu Mita57594742009-07-19 00:11:06 +0900428static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
429{
430 desc->limit0 = limit & 0xffff;
431 desc->limit = (limit >> 16) & 0xf;
432}
433
Steven Rostedt228bdaa2011-12-09 03:02:19 -0500434#ifdef CONFIG_X86_64
435static inline void set_nmi_gate(int gate, void *addr)
436{
437 gate_desc s;
438
439 pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
Seiji Aguchi629f4f92013-06-20 11:45:44 -0400440 write_idt_entry(debug_idt_table, gate, &s);
Steven Rostedt228bdaa2011-12-09 03:02:19 -0500441}
442#endif
443
Seiji Aguchicf910e82013-06-20 11:46:53 -0400444#ifdef CONFIG_TRACING
445extern struct desc_ptr trace_idt_descr;
446extern gate_desc trace_idt_table[];
447static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
448{
449 write_idt_entry(trace_idt_table, entry, gate);
450}
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400451
452static inline void _trace_set_gate(int gate, unsigned type, void *addr,
453 unsigned dpl, unsigned ist, unsigned seg)
454{
455 gate_desc s;
456
457 pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
458 /*
459 * does not need to be atomic because it is only done once at
460 * setup time
461 */
462 write_trace_idt_entry(gate, &s);
463}
Seiji Aguchicf910e82013-06-20 11:46:53 -0400464#else
465static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
466{
467}
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400468
469#define _trace_set_gate(gate, type, addr, dpl, ist, seg)
Seiji Aguchicf910e82013-06-20 11:46:53 -0400470#endif
471
Glauber de Oliveira Costa507f90c2008-01-30 13:31:14 +0100472static inline void _set_gate(int gate, unsigned type, void *addr,
Joe Perchesc1773a12008-03-23 01:01:58 -0700473 unsigned dpl, unsigned ist, unsigned seg)
Glauber de Oliveira Costa507f90c2008-01-30 13:31:14 +0100474{
475 gate_desc s;
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200476
Glauber de Oliveira Costa507f90c2008-01-30 13:31:14 +0100477 pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
478 /*
479 * does not need to be atomic because it is only done once at
480 * setup time
481 */
482 write_idt_entry(idt_table, gate, &s);
Seiji Aguchicf910e82013-06-20 11:46:53 -0400483 write_trace_idt_entry(gate, &s);
Glauber de Oliveira Costa507f90c2008-01-30 13:31:14 +0100484}
485
486/*
487 * This needs to use 'idt_table' rather than 'idt', and
488 * thus use the _nonmapped_ version of the IDT, as the
489 * Pentium F0 0F bugfix can have resulted in the mapped
490 * IDT being write-protected.
491 */
Wang Nan5eca7452015-02-27 12:19:49 +0800492#define set_intr_gate_notrace(n, addr) \
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400493 do { \
494 BUG_ON((unsigned)n > 0xFF); \
495 _set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
496 __KERNEL_CS); \
Wang Nan5eca7452015-02-27 12:19:49 +0800497 } while (0)
498
499#define set_intr_gate(n, addr) \
500 do { \
501 set_intr_gate_notrace(n, addr); \
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400502 _trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
503 0, 0, __KERNEL_CS); \
504 } while (0)
Glauber de Oliveira Costa507f90c2008-01-30 13:31:14 +0100505
Alan Mayer305b92a2008-04-15 15:36:56 -0500506extern int first_system_vector;
Yinghai Lub77b8812008-12-19 15:23:44 -0800507/* used_vectors is BITMAP for irq is not managed by percpu vector_irq */
508extern unsigned long used_vectors[];
Alan Mayer305b92a2008-04-15 15:36:56 -0500509
510static inline void alloc_system_vector(int vector)
511{
Yinghai Lub77b8812008-12-19 15:23:44 -0800512 if (!test_bit(vector, used_vectors)) {
513 set_bit(vector, used_vectors);
Alan Mayer305b92a2008-04-15 15:36:56 -0500514 if (first_system_vector > vector)
515 first_system_vector = vector;
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200516 } else {
Alan Mayer305b92a2008-04-15 15:36:56 -0500517 BUG();
Ingo Molnar9a3865b2011-05-27 09:29:32 +0200518 }
Alan Mayer305b92a2008-04-15 15:36:56 -0500519}
520
Seiji Aguchicf910e82013-06-20 11:46:53 -0400521#define alloc_intr_gate(n, addr) \
522 do { \
523 alloc_system_vector(n); \
Seiji Aguchi959c0712013-10-30 16:36:08 -0400524 set_intr_gate(n, addr); \
Seiji Aguchicf910e82013-06-20 11:46:53 -0400525 } while (0)
526
Glauber de Oliveira Costa507f90c2008-01-30 13:31:14 +0100527/*
528 * This routine sets up an interrupt gate at directory privilege level 3.
529 */
530static inline void set_system_intr_gate(unsigned int n, void *addr)
531{
532 BUG_ON((unsigned)n > 0xFF);
533 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
534}
535
Alexander van Heukelum699d2932008-10-03 22:00:32 +0200536static inline void set_system_trap_gate(unsigned int n, void *addr)
537{
538 BUG_ON((unsigned)n > 0xFF);
539 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
540}
541
Glauber de Oliveira Costa507f90c2008-01-30 13:31:14 +0100542static inline void set_trap_gate(unsigned int n, void *addr)
543{
544 BUG_ON((unsigned)n > 0xFF);
545 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
546}
547
Glauber de Oliveira Costa507f90c2008-01-30 13:31:14 +0100548static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
549{
550 BUG_ON((unsigned)n > 0xFF);
551 _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
552}
553
554static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
555{
556 BUG_ON((unsigned)n > 0xFF);
557 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
558}
559
Alexander van Heukelum699d2932008-10-03 22:00:32 +0200560static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
Glauber de Oliveira Costa507f90c2008-01-30 13:31:14 +0100561{
562 BUG_ON((unsigned)n > 0xFF);
563 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
564}
Glauber de Oliveira Costacc697852008-01-30 13:31:14 +0100565
Seiji Aguchi629f4f92013-06-20 11:45:44 -0400566#ifdef CONFIG_X86_64
567DECLARE_PER_CPU(u32, debug_idt_ctr);
568static inline bool is_debug_idt_enabled(void)
569{
570 if (this_cpu_read(debug_idt_ctr))
571 return true;
572
573 return false;
574}
575
576static inline void load_debug_idt(void)
577{
578 load_idt((const struct desc_ptr *)&debug_idt_descr);
579}
580#else
581static inline bool is_debug_idt_enabled(void)
582{
583 return false;
584}
585
586static inline void load_debug_idt(void)
587{
588}
589#endif
590
Seiji Aguchicf910e82013-06-20 11:46:53 -0400591#ifdef CONFIG_TRACING
592extern atomic_t trace_idt_ctr;
593static inline bool is_trace_idt_enabled(void)
594{
595 if (atomic_read(&trace_idt_ctr))
596 return true;
597
598 return false;
599}
600
601static inline void load_trace_idt(void)
602{
603 load_idt((const struct desc_ptr *)&trace_idt_descr);
604}
605#else
606static inline bool is_trace_idt_enabled(void)
607{
608 return false;
609}
610
611static inline void load_trace_idt(void)
612{
613}
614#endif
615
Seiji Aguchi629f4f92013-06-20 11:45:44 -0400616/*
Steven Rostedt (Red Hat)2b4bc782013-06-22 13:16:19 -0400617 * The load_current_idt() must be called with interrupts disabled
Seiji Aguchi629f4f92013-06-20 11:45:44 -0400618 * to avoid races. That way the IDT will always be set back to the expected
Steven Rostedt (Red Hat)2b4bc782013-06-22 13:16:19 -0400619 * descriptor. It's also called when a CPU is being initialized, and
620 * that doesn't need to disable interrupts, as nothing should be
621 * bothering the CPU then.
Seiji Aguchi629f4f92013-06-20 11:45:44 -0400622 */
623static inline void load_current_idt(void)
624{
Seiji Aguchi629f4f92013-06-20 11:45:44 -0400625 if (is_debug_idt_enabled())
626 load_debug_idt();
Seiji Aguchicf910e82013-06-20 11:46:53 -0400627 else if (is_trace_idt_enabled())
628 load_trace_idt();
Seiji Aguchi629f4f92013-06-20 11:45:44 -0400629 else
630 load_idt((const struct desc_ptr *)&idt_descr);
Seiji Aguchi629f4f92013-06-20 11:45:44 -0400631}
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700632#endif /* _ASM_X86_DESC_H */