blob: 744ecf0dc3a4f51c441732493f29ab977ae572ea [file] [log] [blame]
Chris Zankel9a8fd552005-06-23 22:01:26 -07001/*
Chris Zankel9a8fd552005-06-23 22:01:26 -07002 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
Marc Gauthier2d1c6452013-01-05 04:57:17 +04006 * Copyright (C) 2001 - 2008 Tensilica Inc.
Max Filippov38fef732015-07-16 10:37:31 +03007 * Copyright (C) 2015 Cadence Design Systems Inc.
Chris Zankel9a8fd552005-06-23 22:01:26 -07008 */
9
10#ifndef _XTENSA_PROCESSOR_H
11#define _XTENSA_PROCESSOR_H
12
Chris Zankel367b8112008-11-06 06:40:46 -080013#include <variant/core.h>
Johannes Weinere5083a62009-03-04 16:21:31 +010014#include <platform/hardware.h>
Chris Zankel9a8fd552005-06-23 22:01:26 -070015
Chase Ventersf6dc8c52006-07-08 11:10:29 -050016#include <linux/compiler.h>
Chris Zankel9a8fd552005-06-23 22:01:26 -070017#include <asm/ptrace.h>
18#include <asm/types.h>
Chris Zankel173d6682006-12-10 02:18:48 -080019#include <asm/regs.h>
Chris Zankel9a8fd552005-06-23 22:01:26 -070020
21/* Assertions. */
22
23#if (XCHAL_HAVE_WINDOWED != 1)
Chris Zankel173d6682006-12-10 02:18:48 -080024# error Linux requires the Xtensa Windowed Registers Option.
Chris Zankel9a8fd552005-06-23 22:01:26 -070025#endif
26
Oskar Schirmera81cbd22009-03-04 16:21:30 +010027#define ARCH_SLAB_MINALIGN XCHAL_DATA_WIDTH
28
Chris Zankel9a8fd552005-06-23 22:01:26 -070029/*
30 * User space process size: 1 GB.
31 * Windowed call ABI requires caller and callee to be located within the same
32 * 1 GB region. The C compiler places trampoline code on the stack for sources
33 * that take the address of a nested C function (a feature used by glibc), so
34 * the 1 GB requirement applies to the stack as well.
35 */
36
Johannes Weinere5083a62009-03-04 16:21:31 +010037#ifdef CONFIG_MMU
Chris Zankel24a9ab72007-08-05 11:24:13 -070038#define TASK_SIZE __XTENSA_UL_CONST(0x40000000)
Johannes Weinere5083a62009-03-04 16:21:31 +010039#else
40#define TASK_SIZE (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
41#endif
42
David Howells922a70d2008-02-08 04:19:26 -080043#define STACK_TOP TASK_SIZE
44#define STACK_TOP_MAX STACK_TOP
Chris Zankel9a8fd552005-06-23 22:01:26 -070045
46/*
Max Filippov38fef732015-07-16 10:37:31 +030047 * General exception cause assigned to fake NMI. Fake NMI needs to be handled
48 * differently from other interrupts, but it uses common kernel entry/exit
49 * code.
50 */
51
52#define EXCCAUSE_MAPPED_NMI 62
53
54/*
Chris Zankel9a8fd552005-06-23 22:01:26 -070055 * General exception cause assigned to debug exceptions. Debug exceptions go
56 * to their own vector, rather than the general exception vectors (user,
57 * kernel, double); and their specific causes are reported via DEBUGCAUSE
58 * rather than EXCCAUSE. However it is sometimes convenient to redirect debug
59 * exceptions to the general exception mechanism. To do this, an otherwise
60 * unused EXCCAUSE value was assigned to debug exceptions for this purpose.
61 */
62
63#define EXCCAUSE_MAPPED_DEBUG 63
64
65/*
66 * We use DEPC also as a flag to distinguish between double and regular
67 * exceptions. For performance reasons, DEPC might contain the value of
68 * EXCCAUSE for regular exceptions, so we use this definition to mark a
69 * valid double exception address.
70 * (Note: We use it in bgeui, so it should be 64, 128, or 256)
71 */
72
73#define VALID_DOUBLE_EXCEPTION_ADDRESS 64
74
Max Filippov38fef732015-07-16 10:37:31 +030075#define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno)
76#define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL
77
78#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
79#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
80
Max Filippove4629192015-11-27 16:26:41 +030081#define XTENSA_INTLEVEL_ANDBELOW_MASK(l) _XTENSA_INTLEVEL_ANDBELOW_MASK(l)
82#define _XTENSA_INTLEVEL_ANDBELOW_MASK(l) (XCHAL_INTLEVEL##l##_ANDBELOW_MASK)
Max Filippov38fef732015-07-16 10:37:31 +030083
84#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
85
Chris Zankel9a8fd552005-06-23 22:01:26 -070086/* LOCKLEVEL defines the interrupt level that masks all
87 * general-purpose interrupts.
88 */
Max Filippove4629192015-11-27 16:26:41 +030089#if defined(CONFIG_XTENSA_FAKE_NMI) && defined(XCHAL_PROFILING_INTERRUPT)
90#define LOCKLEVEL (PROFILING_INTLEVEL - 1)
Max Filippov38fef732015-07-16 10:37:31 +030091#else
Marc Gauthier2d1c6452013-01-05 04:57:17 +040092#define LOCKLEVEL XCHAL_EXCM_LEVEL
Max Filippov38fef732015-07-16 10:37:31 +030093#endif
Max Filippove4629192015-11-27 16:26:41 +030094
Max Filippov38fef732015-07-16 10:37:31 +030095#define TOPLEVEL XCHAL_EXCM_LEVEL
96#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
Chris Zankel9a8fd552005-06-23 22:01:26 -070097
98/* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
99 * registers
100 */
101#define WSBITS (XCHAL_NUM_AREGS / 4) /* width of WINDOWSTART in bits */
102#define WBBITS (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE in bits */
103
104#ifndef __ASSEMBLY__
105
106/* Build a valid return address for the specified call winsize.
107 * winsize must be 1 (call4), 2 (call8), or 3 (call12)
108 */
109#define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30)
110
111/* Convert return address to a valid pc
112 * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
113 */
114#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
115
116typedef struct {
Chris Zankelc4c45942012-11-28 16:53:51 -0800117 unsigned long seg;
Chris Zankel9a8fd552005-06-23 22:01:26 -0700118} mm_segment_t;
119
120struct thread_struct {
121
122 /* kernel's return address and stack pointer for context switching */
123 unsigned long ra; /* kernel's a0: return address and window call size */
124 unsigned long sp; /* kernel's a1: stack pointer */
125
126 mm_segment_t current_ds; /* see uaccess.h for example uses */
127
128 /* struct xtensa_cpuinfo info; */
129
130 unsigned long bad_vaddr; /* last user fault */
131 unsigned long bad_uaddr; /* last kernel fault accessing user space */
132 unsigned long error_code;
133
134 unsigned long ibreak[XCHAL_NUM_IBREAK];
135 unsigned long dbreaka[XCHAL_NUM_DBREAK];
136 unsigned long dbreakc[XCHAL_NUM_DBREAK];
137
Chris Zankel9a8fd552005-06-23 22:01:26 -0700138 /* Make structure 16 bytes aligned. */
139 int align[0] __attribute__ ((aligned(16)));
140};
141
142
143/*
144 * Default implementation of macro that returns current
145 * instruction pointer ("program counter").
146 */
147#define current_text_addr() ({ __label__ _l; _l: &&_l;})
148
149
150/* This decides where the kernel will search for a free chunk of vm
151 * space during mmap's.
152 */
153#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
154
155#define INIT_THREAD \
156{ \
157 ra: 0, \
158 sp: sizeof(init_stack) + (long) &init_stack, \
159 current_ds: {0}, \
160 /*info: {0}, */ \
161 bad_vaddr: 0, \
162 bad_uaddr: 0, \
163 error_code: 0, \
164}
165
166
167/*
168 * Do necessary setup to start up a newly executed thread.
169 * Note: We set-up ps as if we did a call4 to the new pc.
170 * set_thread_state in signal.c depends on it.
171 */
Chris Zankel173d6682006-12-10 02:18:48 -0800172#define USER_PS_VALUE ((1 << PS_WOE_BIT) | \
Chris Zankelc4c45942012-11-28 16:53:51 -0800173 (1 << PS_CALLINC_SHIFT) | \
174 (USER_RING << PS_RING_SHIFT) | \
175 (1 << PS_UM_BIT) | \
176 (1 << PS_EXCM_BIT))
Chris Zankel9a8fd552005-06-23 22:01:26 -0700177
178/* Clearing a0 terminates the backtrace. */
179#define start_thread(regs, new_pc, new_sp) \
Max Filippov3306a722012-10-25 11:10:50 +0400180 memset(regs, 0, sizeof(*regs)); \
Chris Zankel9a8fd552005-06-23 22:01:26 -0700181 regs->pc = new_pc; \
182 regs->ps = USER_PS_VALUE; \
183 regs->areg[1] = new_sp; \
184 regs->areg[0] = 0; \
185 regs->wmask = 1; \
186 regs->depc = 0; \
187 regs->windowbase = 0; \
188 regs->windowstart = 1;
189
190/* Forward declaration */
191struct task_struct;
192struct mm_struct;
193
Chris Zankel9a8fd552005-06-23 22:01:26 -0700194/* Free all resources held by a thread. */
195#define release_thread(thread) do { } while(0)
196
Chris Zankel9a8fd552005-06-23 22:01:26 -0700197/* Copy and release all segment info associated with a VM */
Chris Zankel9a8fd552005-06-23 22:01:26 -0700198#define copy_segments(p, mm) do { } while(0)
199#define release_segments(mm) do { } while(0)
200#define forget_segments() do { } while (0)
201
Al Viro04fe6fa2006-01-12 01:05:50 -0800202#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
Chris Zankel9a8fd552005-06-23 22:01:26 -0700203
204extern unsigned long get_wchan(struct task_struct *p);
205
Al Viro04fe6fa2006-01-12 01:05:50 -0800206#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
207#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
Chris Zankel9a8fd552005-06-23 22:01:26 -0700208
Chase Ventersf6dc8c52006-07-08 11:10:29 -0500209#define cpu_relax() barrier()
Davidlohr Bueso3a6bfbc2014-06-29 15:09:33 -0700210#define cpu_relax_lowlatency() cpu_relax()
Chris Zankel9a8fd552005-06-23 22:01:26 -0700211
212/* Special register access. */
213
214#define WSR(v,sr) __asm__ __volatile__ ("wsr %0,"__stringify(sr) :: "a"(v));
215#define RSR(v,sr) __asm__ __volatile__ ("rsr %0,"__stringify(sr) : "=a"(v));
216
217#define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);})
218#define get_sr(sr) ({unsigned int v; RSR(v,sr); v; })
219
Max Filippov26a8e962013-12-01 12:04:57 +0400220#ifndef XCHAL_HAVE_EXTERN_REGS
221#define XCHAL_HAVE_EXTERN_REGS 0
222#endif
223
224#if XCHAL_HAVE_EXTERN_REGS
225
226static inline void set_er(unsigned long value, unsigned long addr)
227{
228 asm volatile ("wer %0, %1" : : "a" (value), "a" (addr) : "memory");
229}
230
231static inline unsigned long get_er(unsigned long addr)
232{
233 register unsigned long value;
234 asm volatile ("rer %0, %1" : "=a" (value) : "a" (addr) : "memory");
235 return value;
236}
237
238#endif /* XCHAL_HAVE_EXTERN_REGS */
239
Chris Zankel9a8fd552005-06-23 22:01:26 -0700240#endif /* __ASSEMBLY__ */
241#endif /* _XTENSA_PROCESSOR_H */