blob: d6466aa09fb7a181a1dd07a52ac646b3c8755c06 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 Waldorf GMBH
7 * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle
8 * Copyright (C) 1996 Paul M. Antoine
9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 */
11#ifndef _ASM_PROCESSOR_H
12#define _ASM_PROCESSOR_H
13
14#include <linux/config.h>
15#include <linux/threads.h>
16
17#include <asm/cachectl.h>
18#include <asm/cpu.h>
19#include <asm/cpu-info.h>
20#include <asm/mipsregs.h>
21#include <asm/prefetch.h>
22#include <asm/system.h>
23
24/*
25 * Return current * instruction pointer ("program counter").
26 */
27#define current_text_addr() ({ __label__ _l; _l: &&_l;})
28
29/*
30 * System setup and hardware flags..
31 */
32extern void (*cpu_wait)(void);
33
34extern unsigned int vced_count, vcei_count;
35
Ralf Baechle875d43e2005-09-03 15:56:16 -070036#ifdef CONFIG_32BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070037/*
38 * User space process size: 2GB. This is hardcoded into a few places,
39 * so don't change it unless you know what you are doing.
40 */
41#define TASK_SIZE 0x7fff8000UL
42
43/*
44 * This decides where the kernel will search for a free chunk of vm
45 * space during mmap's.
46 */
47#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
48#endif
49
Ralf Baechle875d43e2005-09-03 15:56:16 -070050#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/*
52 * User space process size: 1TB. This is hardcoded into a few places,
53 * so don't change it unless you know what you are doing. TASK_SIZE
54 * is limited to 1TB by the R4000 architecture; R10000 and better can
55 * support 16TB; the architectural reserve for future expansion is
56 * 8192EB ...
57 */
58#define TASK_SIZE32 0x7fff8000UL
59#define TASK_SIZE 0x10000000000UL
60
61/*
62 * This decides where the kernel will search for a free chunk of vm
63 * space during mmap's.
64 */
65#define TASK_UNMAPPED_BASE ((current->thread.mflags & MF_32BIT_ADDR) ? \
66 PAGE_ALIGN(TASK_SIZE32 / 3) : PAGE_ALIGN(TASK_SIZE / 3))
67#endif
68
69#define NUM_FPU_REGS 32
70
71typedef __u64 fpureg_t;
72
73struct mips_fpu_hard_struct {
74 fpureg_t fpr[NUM_FPU_REGS];
75 unsigned int fcr31;
76};
77
78/*
79 * It would be nice to add some more fields for emulator statistics, but there
80 * are a number of fixed offsets in offset.h and elsewhere that would have to
81 * be recalculated by hand. So the additional information will be private to
82 * the FPU emulator for now. See asm-mips/fpu_emulator.h.
83 */
84
85struct mips_fpu_soft_struct {
86 fpureg_t fpr[NUM_FPU_REGS];
87 unsigned int fcr31;
88};
89
90union mips_fpu_union {
91 struct mips_fpu_hard_struct hard;
92 struct mips_fpu_soft_struct soft;
93};
94
95#define INIT_FPU { \
96 {{0,},} \
97}
98
99typedef struct {
100 unsigned long seg;
101} mm_segment_t;
102
103#define ARCH_MIN_TASKALIGN 8
104
105/*
106 * If you change thread_struct remember to change the #defines below too!
107 */
108struct thread_struct {
109 /* Saved main processor registers. */
110 unsigned long reg16;
111 unsigned long reg17, reg18, reg19, reg20, reg21, reg22, reg23;
112 unsigned long reg29, reg30, reg31;
113
114 /* Saved cp0 stuff. */
115 unsigned long cp0_status;
116
117 /* Saved fpu/fpu emulator stuff. */
118 union mips_fpu_union fpu;
119
120 /* Other stuff associated with the thread. */
121 unsigned long cp0_badvaddr; /* Last user fault */
122 unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */
123 unsigned long error_code;
124 unsigned long trap_no;
125#define MF_FIXADE 1 /* Fix address errors in software */
126#define MF_LOGADE 2 /* Log address errors to syslog */
127#define MF_32BIT_REGS 4 /* also implies 16/32 fprs */
128#define MF_32BIT_ADDR 8 /* 32-bit address space (o32/n32) */
129 unsigned long mflags;
130 unsigned long irix_trampoline; /* Wheee... */
131 unsigned long irix_oldctx;
132};
133
134#define MF_ABI_MASK (MF_32BIT_REGS | MF_32BIT_ADDR)
135#define MF_O32 (MF_32BIT_REGS | MF_32BIT_ADDR)
136#define MF_N32 MF_32BIT_ADDR
137#define MF_N64 0
138
139#define INIT_THREAD { \
140 /* \
141 * saved main processor registers \
142 */ \
143 0, 0, 0, 0, 0, 0, 0, 0, \
144 0, 0, 0, \
145 /* \
146 * saved cp0 stuff \
147 */ \
148 0, \
149 /* \
150 * saved fpu/fpu emulator stuff \
151 */ \
152 INIT_FPU, \
153 /* \
154 * Other stuff associated with the process \
155 */ \
156 0, 0, 0, 0, \
157 /* \
158 * For now the default is to fix address errors \
159 */ \
160 MF_FIXADE, 0, 0 \
161}
162
163struct task_struct;
164
165/* Free all resources held by a thread. */
166#define release_thread(thread) do { } while(0)
167
168/* Prepare to copy thread state - unlazy all lazy status */
169#define prepare_to_copy(tsk) do { } while (0)
170
171extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
172
173extern unsigned long thread_saved_pc(struct task_struct *tsk);
174
175/*
176 * Do necessary setup to start up a newly executed thread.
177 */
178extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp);
179
180unsigned long get_wchan(struct task_struct *p);
181
182#define __PT_REG(reg) ((long)&((struct pt_regs *)0)->reg - sizeof(struct pt_regs))
183#define __KSTK_TOS(tsk) ((unsigned long)(tsk->thread_info) + THREAD_SIZE - 32)
184#define KSTK_EIP(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(cp0_epc)))
185#define KSTK_ESP(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(regs[29])))
186#define KSTK_STATUS(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(cp0_status)))
187
188#define cpu_relax() barrier()
189
190/*
191 * Return_address is a replacement for __builtin_return_address(count)
192 * which on certain architectures cannot reasonably be implemented in GCC
193 * (MIPS, Alpha) or is unuseable with -fomit-frame-pointer (i386).
194 * Note that __builtin_return_address(x>=1) is forbidden because GCC
195 * aborts compilation on some CPUs. It's simply not possible to unwind
196 * some CPU's stackframes.
197 *
198 * __builtin_return_address works only for non-leaf functions. We avoid the
199 * overhead of a function call by forcing the compiler to save the return
200 * address register on the stack.
201 */
202#define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);})
203
204#ifdef CONFIG_CPU_HAS_PREFETCH
205
206#define ARCH_HAS_PREFETCH
207
208extern inline void prefetch(const void *addr)
209{
210 __asm__ __volatile__(
211 " .set mips4 \n"
212 " pref %0, (%1) \n"
213 " .set mips0 \n"
214 :
215 : "i" (Pref_Load), "r" (addr));
216}
217
218#endif
219
220#endif /* _ASM_PROCESSOR_H */