blob: 18e0377f72bbb9ad8f559b00fc2452532d0ad312 [file] [log] [blame]
Paul Mundtaf3c7df2007-11-09 17:08:54 +09001/*
2 * include/asm-sh/processor.h
3 *
4 * Copyright (C) 1999, 2000 Niibe Yutaka
5 * Copyright (C) 2002, 2003 Paul Mundt
6 */
7
8#ifndef __ASM_SH_PROCESSOR_32_H
9#define __ASM_SH_PROCESSOR_32_H
10#ifdef __KERNEL__
11
12#include <linux/compiler.h>
Paul Mundtfa439722008-09-04 18:53:58 +090013#include <linux/linkage.h>
Paul Mundtaf3c7df2007-11-09 17:08:54 +090014#include <asm/page.h>
15#include <asm/types.h>
Paul Mundt4352fc12010-01-05 19:06:45 +090016#include <asm/hw_breakpoint.h>
Paul Mundtaf3c7df2007-11-09 17:08:54 +090017
18/*
19 * Default implementation of macro that returns current
20 * instruction pointer ("program counter").
21 */
Paul Mundt103340c2008-07-28 22:32:03 +090022#define current_text_addr() ({ void *pc; __asm__("mova 1f, %0\n.align 2\n1:":"=z" (pc)); pc; })
Paul Mundtaf3c7df2007-11-09 17:08:54 +090023
24/* Core Processor Version Register */
25#define CCN_PVR 0xff000030
26#define CCN_CVR 0xff000040
27#define CCN_PRR 0xff000044
28
Paul Mundtaf3c7df2007-11-09 17:08:54 +090029/*
30 * User space process size: 2GB.
31 *
32 * Since SH7709 and SH7750 have "area 7", we can't use 0x7c000000--0x7fffffff
33 */
34#define TASK_SIZE 0x7c000000UL
35
David Howells922a70d2008-02-08 04:19:26 -080036#define STACK_TOP TASK_SIZE
37#define STACK_TOP_MAX STACK_TOP
38
Paul Mundtaf3c7df2007-11-09 17:08:54 +090039/* This decides where the kernel will search for a free chunk of vm
40 * space during mmap's.
41 */
Kuninori Morimoto30c254f2013-01-10 20:17:35 -080042#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
Paul Mundtaf3c7df2007-11-09 17:08:54 +090043
44/*
45 * Bit of SR register
46 *
47 * FD-bit:
48 * When it's set, it means the processor doesn't have right to use FPU,
49 * and it results exception when the floating operation is executed.
50 *
51 * IMASK-bit:
52 * Interrupt level mask
53 */
Paul Mundtaf3c7df2007-11-09 17:08:54 +090054#define SR_DSP 0x00001000
55#define SR_IMASK 0x000000f0
Paul Mundt9bbafce2008-03-26 19:02:47 +090056#define SR_FD 0x00008000
Stuart Menefyd3ea9fa2009-09-25 18:25:10 +010057#define SR_MD 0x40000000
Paul Mundtaf3c7df2007-11-09 17:08:54 +090058
59/*
Michael Trimarchi01ab1032009-04-03 17:32:33 +000060 * DSP structure and data
61 */
62struct sh_dsp_struct {
63 unsigned long dsp_regs[14];
64 long status;
65};
66
67/*
Paul Mundtaf3c7df2007-11-09 17:08:54 +090068 * FPU structure and data
69 */
70
71struct sh_fpu_hard_struct {
72 unsigned long fp_regs[16];
73 unsigned long xfp_regs[16];
74 unsigned long fpscr;
75 unsigned long fpul;
76
77 long status; /* software status information */
78};
79
80/* Dummy fpu emulator */
81struct sh_fpu_soft_struct {
82 unsigned long fp_regs[16];
83 unsigned long xfp_regs[16];
84 unsigned long fpscr;
85 unsigned long fpul;
86
87 unsigned char lookahead;
88 unsigned long entry_pc;
89};
90
Paul Mundt0ea820c2010-01-13 12:51:40 +090091union thread_xstate {
92 struct sh_fpu_hard_struct hardfpu;
93 struct sh_fpu_soft_struct softfpu;
Paul Mundtaf3c7df2007-11-09 17:08:54 +090094};
95
96struct thread_struct {
97 /* Saved registers when thread is descheduled */
98 unsigned long sp;
99 unsigned long pc;
100
Paul Mundt94ea5e42010-02-23 12:56:30 +0900101 /* Various thread flags, see SH_THREAD_xxx */
102 unsigned long flags;
103
Paul Mundt09a07292009-11-09 16:27:40 +0900104 /* Save middle states of ptrace breakpoints */
Paul Mundt94ea5e42010-02-23 12:56:30 +0900105 struct perf_event *ptrace_bps[HBP_NUM];
Michael Trimarchi01ab1032009-04-03 17:32:33 +0000106
107#ifdef CONFIG_SH_DSP
108 /* Dsp status information */
109 struct sh_dsp_struct dsp_status;
110#endif
Paul Mundtaf3c7df2007-11-09 17:08:54 +0900111
Paul Mundt0ea820c2010-01-13 12:51:40 +0900112 /* Extended processor state */
113 union thread_xstate *xstate;
Vineet Gupta616c05d2013-11-12 15:08:45 -0800114
115 /*
116 * fpu_counter contains the number of consecutive context switches
117 * that the FPU is used. If this is over a threshold, the lazy fpu
118 * saving becomes unlazy to save the trap. This is an unsigned char
119 * so that after 256 times the counter wraps and the behavior turns
120 * lazy again; this to deal with bursty apps that only use FPU for
121 * a short time
122 */
123 unsigned char fpu_counter;
Paul Mundtaf3c7df2007-11-09 17:08:54 +0900124};
Paul Mundtaf3c7df2007-11-09 17:08:54 +0900125
126#define INIT_THREAD { \
127 .sp = sizeof(init_stack) + (long) &init_stack, \
Paul Mundt94ea5e42010-02-23 12:56:30 +0900128 .flags = 0, \
Paul Mundtaf3c7df2007-11-09 17:08:54 +0900129}
130
Paul Mundtaf3c7df2007-11-09 17:08:54 +0900131/* Forward declaration, a strange C thing */
132struct task_struct;
Paul Mundt70e068e2010-01-12 18:52:00 +0900133
134extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp);
Paul Mundtaf3c7df2007-11-09 17:08:54 +0900135
136/* Free all resources held by a thread. */
137extern void release_thread(struct task_struct *);
138
Paul Mundtaf3c7df2007-11-09 17:08:54 +0900139/* Copy and release all segment info associated with a VM */
140#define copy_segments(p, mm) do { } while(0)
141#define release_segments(mm) do { } while(0)
142
143/*
144 * FPU lazy state save handling.
145 */
146
147static __inline__ void disable_fpu(void)
148{
149 unsigned long __dummy;
150
151 /* Set FD flag in SR */
152 __asm__ __volatile__("stc sr, %0\n\t"
153 "or %1, %0\n\t"
154 "ldc %0, sr"
155 : "=&r" (__dummy)
156 : "r" (SR_FD));
157}
158
159static __inline__ void enable_fpu(void)
160{
161 unsigned long __dummy;
162
163 /* Clear out FD flag in SR */
164 __asm__ __volatile__("stc sr, %0\n\t"
165 "and %1, %0\n\t"
166 "ldc %0, sr"
167 : "=&r" (__dummy)
168 : "r" (~SR_FD));
169}
170
Paul Mundtaf3c7df2007-11-09 17:08:54 +0900171/* Double presision, NANS as NANS, rounding to nearest, no exceptions */
172#define FPSCR_INIT 0x00080000
173
174#define FPSCR_CAUSE_MASK 0x0001f000 /* Cause bits */
175#define FPSCR_FLAG_MASK 0x0000007c /* Flag bits */
176
177/*
178 * Return saved PC of a blocked thread.
179 */
180#define thread_saved_pc(tsk) (tsk->thread.pc)
181
182void show_trace(struct task_struct *tsk, unsigned long *sp,
183 struct pt_regs *regs);
Paul Mundt5d2685d2008-12-17 15:56:06 +0900184
185#ifdef CONFIG_DUMP_CODE
Paul Mundt9cfc9a92008-11-26 14:31:03 +0900186void show_code(struct pt_regs *regs);
Paul Mundt5d2685d2008-12-17 15:56:06 +0900187#else
188static inline void show_code(struct pt_regs *regs)
189{
190}
191#endif
192
Paul Mundtaf3c7df2007-11-09 17:08:54 +0900193extern unsigned long get_wchan(struct task_struct *p);
194
195#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
196#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15])
197
Paul Mundta73090ff2009-02-27 16:42:05 +0900198#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH4)
Paul Mundtc81fc382011-01-11 14:39:35 +0900199
Paul Mundtaf3c7df2007-11-09 17:08:54 +0900200#define PREFETCH_STRIDE L1_CACHE_BYTES
201#define ARCH_HAS_PREFETCH
202#define ARCH_HAS_PREFETCHW
Paul Mundtc81fc382011-01-11 14:39:35 +0900203
204static inline void prefetch(const void *x)
Paul Mundtaf3c7df2007-11-09 17:08:54 +0900205{
Giuseppe CAVALLAROd53e4302010-11-17 06:50:17 +0000206 __builtin_prefetch(x, 0, 3);
Paul Mundtaf3c7df2007-11-09 17:08:54 +0900207}
208
Paul Mundtc81fc382011-01-11 14:39:35 +0900209static inline void prefetchw(const void *x)
Giuseppe CAVALLAROd53e4302010-11-17 06:50:17 +0000210{
211 __builtin_prefetch(x, 1, 3);
212}
Paul Mundtaf3c7df2007-11-09 17:08:54 +0900213#endif
214
Paul Mundtaf3c7df2007-11-09 17:08:54 +0900215#endif /* __KERNEL__ */
216#endif /* __ASM_SH_PROCESSOR_32_H */