blob: 8154c4ee8c9c24c3680b291dc034bcc2c4f35518 [file] [log] [blame]
Aurelien Jacquiot687b12b2011-10-04 11:03:44 -04001/*
2 * Port on Texas Instruments TMS320C6x architecture
3 *
4 * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
5 * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
6 *
7 * Updated for 2.6.34: Mark Salter <msalter@redhat.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#ifndef _ASM_C6X_PROCESSOR_H
14#define _ASM_C6X_PROCESSOR_H
15
16#include <asm/ptrace.h>
17#include <asm/page.h>
18#include <asm/current.h>
19
20/*
21 * Default implementation of macro that returns current
22 * instruction pointer ("program counter").
23 */
24#define current_text_addr() \
25({ \
26 void *__pc; \
27 asm("mvc .S2 pce1,%0\n" : "=b"(__pc)); \
28 __pc; \
29})
30
31/*
32 * User space process size. This is mostly meaningless for NOMMU
33 * but some C6X processors may have RAM addresses up to 0xFFFFFFFF.
34 * Since calls like mmap() can return an address or an error, we
35 * have to allow room for error returns when code does something
36 * like:
37 *
38 * addr = do_mmap(...)
39 * if ((unsigned long)addr >= TASK_SIZE)
40 * ... its an error code, not an address ...
41 *
42 * Here, we allow for 4096 error codes which means we really can't
43 * use the last 4K page on systems with RAM extending all the way
44 * to the end of the 32-bit address space.
45 */
46#define TASK_SIZE 0xFFFFF000
47
48/*
49 * This decides where the kernel will search for a free chunk of vm
50 * space during mmap's. We won't be using it
51 */
52#define TASK_UNMAPPED_BASE 0
53
54struct thread_struct {
55 unsigned long long b15_14;
56 unsigned long long a15_14;
57 unsigned long long b13_12;
58 unsigned long long a13_12;
59 unsigned long long b11_10;
60 unsigned long long a11_10;
61 unsigned long long ricl_icl;
62 unsigned long usp; /* user stack pointer */
63 unsigned long pc; /* kernel pc */
64 unsigned long wchan;
65};
66
67#define INIT_THREAD \
68{ \
69 .usp = 0, \
70 .wchan = 0, \
71}
72
73#define INIT_MMAP { \
74 &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, \
75 NULL, NULL }
76
77#define task_pt_regs(task) \
78 ((struct pt_regs *)(THREAD_START_SP + task_stack_page(task)) - 1)
79
80#define alloc_kernel_stack() __get_free_page(GFP_KERNEL)
81#define free_kernel_stack(page) free_page((page))
82
83
84/* Forward declaration, a strange C thing */
85struct task_struct;
86
87extern void start_thread(struct pt_regs *regs, unsigned int pc,
88 unsigned long usp);
89
90/* Free all resources held by a thread. */
91static inline void release_thread(struct task_struct *dead_task)
92{
93}
94
95/* Prepare to copy thread state - unlazy all lazy status */
96#define prepare_to_copy(tsk) do { } while (0)
97
98extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
99
100#define copy_segments(tsk, mm) do { } while (0)
101#define release_segments(mm) do { } while (0)
102
103/*
104 * saved PC of a blocked thread.
105 */
106#define thread_saved_pc(tsk) (task_pt_regs(tsk)->pc)
107
108/*
109 * saved kernel SP and DP of a blocked thread.
110 */
111#ifdef _BIG_ENDIAN
112#define thread_saved_ksp(tsk) \
113 (*(unsigned long *)&(tsk)->thread.b15_14)
114#define thread_saved_dp(tsk) \
115 (*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
116#else
117#define thread_saved_ksp(tsk) \
118 (*(((unsigned long *)&(tsk)->thread.b15_14) + 1))
119#define thread_saved_dp(tsk) \
120 (*(unsigned long *)&(tsk)->thread.b15_14)
121#endif
122
123extern unsigned long get_wchan(struct task_struct *p);
124
125#define KSTK_EIP(tsk) (task_pt_regs(task)->pc)
126#define KSTK_ESP(tsk) (task_pt_regs(task)->sp)
127
128#define cpu_relax() do { } while (0)
129
130extern const struct seq_operations cpuinfo_op;
131
132#endif /* ASM_C6X_PROCESSOR_H */