blob: 0935094f370a6a85193c8c7c4267ec3664f498fd [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_SYSTEM_H
16#define _ASM_TILE_SYSTEM_H
17
18#ifndef __ASSEMBLY__
19
20#include <linux/types.h>
21#include <linux/irqflags.h>
22
23/* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */
24#include <asm/ptrace.h>
25
26#include <arch/chip.h>
27#include <arch/sim_def.h>
28#include <arch/spr_def.h>
29
30/*
31 * read_barrier_depends - Flush all pending reads that subsequents reads
32 * depend on.
33 *
34 * No data-dependent reads from memory-like regions are ever reordered
35 * over this barrier. All reads preceding this primitive are guaranteed
36 * to access memory (but not necessarily other CPUs' caches) before any
37 * reads following this primitive that depend on the data return by
38 * any of the preceding reads. This primitive is much lighter weight than
39 * rmb() on most CPUs, and is never heavier weight than is
40 * rmb().
41 *
42 * These ordering constraints are respected by both the local CPU
43 * and the compiler.
44 *
45 * Ordering is not guaranteed by anything other than these primitives,
46 * not even by data dependencies. See the documentation for
47 * memory_barrier() for examples and URLs to more information.
48 *
49 * For example, the following code would force ordering (the initial
50 * value of "a" is zero, "b" is one, and "p" is "&a"):
51 *
52 * <programlisting>
53 * CPU 0 CPU 1
54 *
55 * b = 2;
56 * memory_barrier();
57 * p = &b; q = p;
58 * read_barrier_depends();
59 * d = *q;
60 * </programlisting>
61 *
62 * because the read of "*q" depends on the read of "p" and these
63 * two reads are separated by a read_barrier_depends(). However,
64 * the following code, with the same initial values for "a" and "b":
65 *
66 * <programlisting>
67 * CPU 0 CPU 1
68 *
69 * a = 2;
70 * memory_barrier();
71 * b = 3; y = b;
72 * read_barrier_depends();
73 * x = a;
74 * </programlisting>
75 *
76 * does not enforce ordering, since there is no data dependency between
77 * the read of "a" and the read of "b". Therefore, on some CPUs, such
78 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
79 * in cases like this where there are no data dependencies.
80 */
81
82#define read_barrier_depends() do { } while (0)
83
84#define __sync() __insn_mf()
85
86#if CHIP_HAS_SPLIT_CYCLE()
87#define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW)
88#else
89#define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */
90#endif
91
92/* Fence to guarantee visibility of stores to incoherent memory. */
93static inline void
94mb_incoherent(void)
95{
96 __insn_mf();
97
98#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
99 {
100 int __mb_incoherent(void);
101#if CHIP_HAS_TILE_WRITE_PENDING()
102 const unsigned long WRITE_TIMEOUT_CYCLES = 400;
103 unsigned long start = get_cycles_low();
104 do {
105 if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0)
106 return;
107 } while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES);
108#endif /* CHIP_HAS_TILE_WRITE_PENDING() */
109 (void) __mb_incoherent();
110 }
111#endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */
112}
113
114#define fast_wmb() __sync()
115#define fast_rmb() __sync()
116#define fast_mb() __sync()
117#define fast_iob() mb_incoherent()
118
119#define wmb() fast_wmb()
120#define rmb() fast_rmb()
121#define mb() fast_mb()
122#define iob() fast_iob()
123
124#ifdef CONFIG_SMP
125#define smp_mb() mb()
126#define smp_rmb() rmb()
127#define smp_wmb() wmb()
128#define smp_read_barrier_depends() read_barrier_depends()
129#else
130#define smp_mb() barrier()
131#define smp_rmb() barrier()
132#define smp_wmb() barrier()
133#define smp_read_barrier_depends() do { } while (0)
134#endif
135
136#define set_mb(var, value) \
137 do { var = value; mb(); } while (0)
138
139#include <linux/irqflags.h>
140
141/*
142 * Pause the DMA engine and static network before task switching.
143 */
144#define prepare_arch_switch(next) _prepare_arch_switch(next)
145void _prepare_arch_switch(struct task_struct *next);
146
147
148/*
149 * switch_to(n) should switch tasks to task nr n, first
150 * checking that n isn't the current task, in which case it does nothing.
151 * The number of callee-saved registers saved on the kernel stack
152 * is defined here for use in copy_thread() and must agree with __switch_to().
153 */
154#endif /* !__ASSEMBLY__ */
155#define CALLEE_SAVED_FIRST_REG 30
156#define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */
157#ifndef __ASSEMBLY__
158struct task_struct;
159#define switch_to(prev, next, last) ((last) = _switch_to((prev), (next)))
160extern struct task_struct *_switch_to(struct task_struct *prev,
161 struct task_struct *next);
162
Chris Metcalf0707ad32010-06-25 17:04:17 -0400163/* Helper function for _switch_to(). */
164extern struct task_struct *__switch_to(struct task_struct *prev,
165 struct task_struct *next,
166 unsigned long new_system_save_1_0);
167
168/* Address that switched-away from tasks are at. */
169extern unsigned long get_switch_to_pc(void);
170
Chris Metcalf867e3592010-05-28 23:09:12 -0400171/*
172 * On SMP systems, when the scheduler does migration-cost autodetection,
173 * it needs a way to flush as much of the CPU's caches as possible:
174 *
175 * TODO: fill this in!
176 */
177static inline void sched_cacheflush(void)
178{
179}
180
181#define arch_align_stack(x) (x)
182
183/*
184 * Is the kernel doing fixups of unaligned accesses? If <0, no kernel
185 * intervention occurs and SIGBUS is delivered with no data address
186 * info. If 0, the kernel single-steps the instruction to discover
187 * the data address to provide with the SIGBUS. If 1, the kernel does
188 * a fixup.
189 */
190extern int unaligned_fixup;
191
192/* Is the kernel printing on each unaligned fixup? */
193extern int unaligned_printk;
194
195/* Number of unaligned fixups performed */
196extern unsigned int unaligned_fixup_count;
197
Chris Metcalf0707ad32010-06-25 17:04:17 -0400198/* Init-time routine to do tile-specific per-cpu setup. */
199void setup_cpu(int boot);
200
Chris Metcalf867e3592010-05-28 23:09:12 -0400201/* User-level DMA management functions */
202void grant_dma_mpls(void);
203void restrict_dma_mpls(void);
204
Chris Metcalf0707ad32010-06-25 17:04:17 -0400205#ifdef CONFIG_HARDWALL
206/* User-level network management functions */
207void reset_network_state(void);
208void grant_network_mpls(void);
209void restrict_network_mpls(void);
210int hardwall_deactivate(struct task_struct *task);
211
212/* Hook hardwall code into changes in affinity. */
213#define arch_set_cpus_allowed(p, new_mask) do { \
214 if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \
215 hardwall_deactivate(p); \
216} while (0)
217#endif
Chris Metcalf867e3592010-05-28 23:09:12 -0400218
219/* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */
220extern int _sim_syscall(int syscall_num, ...);
221#define sim_syscall(syscall_num, ...) \
222 _sim_syscall(SIM_CONTROL_SYSCALL + \
223 ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS), \
224 ## __VA_ARGS__)
225
226/*
227 * Kernel threads can check to see if they need to migrate their
228 * stack whenever they return from a context switch; for user
229 * threads, we defer until they are returning to user-space.
230 */
231#define finish_arch_switch(prev) do { \
232 if (unlikely((prev)->state == TASK_DEAD)) \
233 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \
234 ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \
235 __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \
236 (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \
237 if (current->mm == NULL && !kstack_hash && \
238 current_thread_info()->homecache_cpu != smp_processor_id()) \
239 homecache_migrate_kthread(); \
240} while (0)
241
Chris Metcalf0707ad32010-06-25 17:04:17 -0400242/* Support function for forking a new task. */
243void ret_from_fork(void);
244
245/* Called from ret_from_fork() when a new process starts up. */
246struct task_struct *sim_notify_fork(struct task_struct *prev);
247
Chris Metcalf867e3592010-05-28 23:09:12 -0400248#endif /* !__ASSEMBLY__ */
249
250#endif /* _ASM_TILE_SYSTEM_H */