blob: f45acad3c4b67849922f6d05b6c32e8744e0b258 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_FTRACE_H
2#define _ASM_X86_FTRACE_H
Abhishek Sagar395a59d2008-06-21 23:47:27 +05303
Ingo Molnar4944dd62008-10-27 10:50:54 +01004#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedtd57c5d52011-02-09 13:32:18 -05005#ifdef CC_USING_FENTRY
6# define MCOUNT_ADDR ((long)(__fentry__))
7#else
8# define MCOUNT_ADDR ((long)(mcount))
9#endif
Abhishek Sagar395a59d2008-06-21 23:47:27 +053010#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
11
Steven Rostedt28fb5df2011-08-10 22:00:55 -040012#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040013#define ARCH_SUPPORTS_FTRACE_OPS 1
Steven Rostedt08f6fba2012-04-30 16:20:23 -040014#endif
Steven Rostedt2f5f6ad2011-08-08 16:57:47 -040015
Abhishek Sagar395a59d2008-06-21 23:47:27 +053016#ifndef __ASSEMBLY__
17extern void mcount(void);
Steven Rostedta192cd02012-05-30 13:26:37 -040018extern atomic_t modifying_ftrace_code;
Steven Rostedtd57c5d52011-02-09 13:32:18 -050019extern void __fentry__(void);
Steven Rostedt68bf21a2008-08-14 15:45:08 -040020
21static inline unsigned long ftrace_call_adjust(unsigned long addr)
22{
23 /*
Martin Schwidefsky521ccb52011-05-10 10:10:41 +020024 * addr is the address of the mcount call instruction.
25 * recordmcount does the necessary offset calculation.
Steven Rostedt68bf21a2008-08-14 15:45:08 -040026 */
Martin Schwidefsky521ccb52011-05-10 10:10:41 +020027 return addr;
Steven Rostedt68bf21a2008-08-14 15:45:08 -040028}
Steven Rostedt31e88902008-11-14 16:21:19 -080029
30#ifdef CONFIG_DYNAMIC_FTRACE
31
32struct dyn_arch_ftrace {
33 /* No extra data needed for x86 */
34};
35
Steven Rostedt08d636b2011-08-16 09:57:10 -040036int ftrace_int3_handler(struct pt_regs *regs);
37
Steven Rostedt (Red Hat)1026ff92014-07-11 23:23:53 -040038#define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR
39
Steven Rostedt31e88902008-11-14 16:21:19 -080040#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedta26a2a22008-10-31 00:03:22 -040041#endif /* __ASSEMBLY__ */
Ingo Molnar4944dd62008-10-27 10:50:54 +010042#endif /* CONFIG_FUNCTION_TRACER */
Abhishek Sagar395a59d2008-06-21 23:47:27 +053043
Steven Rostedtf431b632013-02-12 16:18:59 -050044
45#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
46
47#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
48#include <asm/compat.h>
49
50/*
51 * Because ia32 syscalls do not map to x86_64 syscall numbers
52 * this screws up the trace output when tracing a ia32 task.
53 * Instead of reporting bogus syscalls, just do not trace them.
54 *
55 * If the user realy wants these, then they should use the
56 * raw syscall tracepoints with filtering.
57 */
58#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
59static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
60{
61 if (is_compat_task())
62 return true;
63 return false;
64}
65#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
66#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */
67
H. Peter Anvin1965aae2008-10-22 22:26:29 -070068#endif /* _ASM_X86_FTRACE_H */