blob: 6f804f5960abeff77cdbcd2cecf48a1acf989e7c [file] [log] [blame]
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +00001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 Maciej W. Rozycki
8 * Copyright (C) 2008 Thiemo Seufer
Steven J. Hillc0226302012-07-06 21:56:01 +02009 * Copyright (C) 2012 MIPS Technologies, Inc.
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +000010 */
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +000011#include <linux/kernel.h>
12#include <linux/sched.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010013#include <linux/smp.h>
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +000014#include <linux/mm.h>
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +000015#include <linux/proc_fs.h>
16
17#include <asm/bugs.h>
18#include <asm/cacheops.h>
Ralf Baechle69f24d12013-09-17 10:25:47 +020019#include <asm/cpu-type.h>
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +000020#include <asm/inst.h>
21#include <asm/io.h>
22#include <asm/page.h>
23#include <asm/pgtable.h>
24#include <asm/prefetch.h>
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +000025#include <asm/bootinfo.h>
26#include <asm/mipsregs.h>
27#include <asm/mmu_context.h>
28#include <asm/cpu.h>
29#include <asm/war.h>
30
31#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
32#include <asm/sibyte/sb1250.h>
33#include <asm/sibyte/sb1250_regs.h>
34#include <asm/sibyte/sb1250_dma.h>
35#endif
36
Florian Fainelli3482d712010-01-28 15:21:24 +010037#include <asm/uasm.h>
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +000038
39/* Registers used in the assembled routines. */
40#define ZERO 0
41#define AT 2
42#define A0 4
43#define A1 5
44#define A2 6
45#define T0 8
46#define T1 9
47#define T2 10
48#define T3 11
49#define T9 25
50#define RA 31
51
52/* Handle labels (which must be positive integers). */
53enum label_id {
54 label_clear_nopref = 1,
55 label_clear_pref,
56 label_copy_nopref,
57 label_copy_pref_both,
58 label_copy_pref_store,
59};
60
61UASM_L_LA(_clear_nopref)
62UASM_L_LA(_clear_pref)
63UASM_L_LA(_copy_nopref)
64UASM_L_LA(_copy_pref_both)
65UASM_L_LA(_copy_pref_store)
66
67/* We need one branch and therefore one relocation per target label. */
Paul Gortmaker078a55f2013-06-18 13:38:59 +000068static struct uasm_label labels[5];
69static struct uasm_reloc relocs[5];
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +000070
71#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
72#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
73
Markos Chandrasd2e6d302014-11-19 09:39:56 +000074/*
75 * R6 has a limited offset of the pref instruction.
76 * Skip it if the offset is more than 9 bits.
77 */
78#define _uasm_i_pref(a, b, c, d) \
79do { \
80 if (cpu_has_mips_r6) { \
81 if (c <= 0xff && c >= -0x100) \
82 uasm_i_pref(a, b, c, d);\
83 } else { \
84 uasm_i_pref(a, b, c, d); \
85 } \
86} while(0)
87
Paul Gortmaker078a55f2013-06-18 13:38:59 +000088static int pref_bias_clear_store;
89static int pref_bias_copy_load;
90static int pref_bias_copy_store;
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +000091
Paul Gortmaker078a55f2013-06-18 13:38:59 +000092static u32 pref_src_mode;
93static u32 pref_dst_mode;
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +000094
Paul Gortmaker078a55f2013-06-18 13:38:59 +000095static int clear_word_size;
96static int copy_word_size;
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +000097
Paul Gortmaker078a55f2013-06-18 13:38:59 +000098static int half_clear_loop_size;
99static int half_copy_loop_size;
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000100
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000101static int cache_line_size;
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000102#define cache_line_mask() (cache_line_size - 1)
103
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000104static inline void
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000105pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
106{
107 if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
108 if (off > 0x7fff) {
109 uasm_i_lui(buf, T9, uasm_rel_hi(off));
110 uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
111 } else
112 uasm_i_addiu(buf, T9, ZERO, off);
113 uasm_i_daddu(buf, reg1, reg2, T9);
114 } else {
115 if (off > 0x7fff) {
116 uasm_i_lui(buf, T9, uasm_rel_hi(off));
117 uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
118 UASM_i_ADDU(buf, reg1, reg2, T9);
119 } else
120 UASM_i_ADDIU(buf, reg1, reg2, off);
121 }
122}
123
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000124static void set_prefetch_parameters(void)
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000125{
126 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
127 clear_word_size = 8;
128 else
129 clear_word_size = 4;
130
131 if (cpu_has_64bit_gp_regs)
132 copy_word_size = 8;
133 else
134 copy_word_size = 4;
135
136 /*
137 * The pref's used here are using "streaming" hints, which cause the
138 * copied data to be kicked out of the cache sooner. A page copy often
139 * ends up copying a lot more data than is commonly used, so this seems
140 * to make sense in terms of reducing cache pollution, but I've no real
141 * performance data to back this up.
142 */
143 if (cpu_has_prefetch) {
144 /*
145 * XXX: Most prefetch bias values in here are based on
146 * guesswork.
147 */
148 cache_line_size = cpu_dcache_line_size();
149 switch (current_cpu_type()) {
Shinya Kuribayashia644b272009-03-03 18:05:51 +0900150 case CPU_R5500:
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000151 case CPU_TX49XX:
Shinya Kuribayashia644b272009-03-03 18:05:51 +0900152 /* These processors only support the Pref_Load. */
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000153 pref_bias_copy_load = 256;
154 break;
155
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000156 case CPU_R10000:
157 case CPU_R12000:
158 case CPU_R14000:
Joshua Kinard30577392015-01-21 07:59:45 -0500159 case CPU_R16000:
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000160 /*
161 * Those values have been experimentally tuned for an
162 * Origin 200.
163 */
164 pref_bias_clear_store = 512;
165 pref_bias_copy_load = 256;
166 pref_bias_copy_store = 256;
167 pref_src_mode = Pref_LoadStreamed;
168 pref_dst_mode = Pref_StoreStreamed;
169 break;
170
171 case CPU_SB1:
172 case CPU_SB1A:
173 pref_bias_clear_store = 128;
174 pref_bias_copy_load = 128;
175 pref_bias_copy_store = 128;
176 /*
177 * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
178 * hints are broken.
179 */
180 if (current_cpu_type() == CPU_SB1 &&
181 (current_cpu_data.processor_id & 0xff) < 0x02) {
182 pref_src_mode = Pref_Load;
183 pref_dst_mode = Pref_Store;
184 } else {
185 pref_src_mode = Pref_LoadStreamed;
186 pref_dst_mode = Pref_StoreStreamed;
187 }
188 break;
189
Huacai Chen1e820da32016-03-03 09:45:13 +0800190 case CPU_LOONGSON3:
191 /* Loongson-3 only support the Pref_Load/Pref_Store. */
192 pref_bias_clear_store = 128;
193 pref_bias_copy_load = 128;
194 pref_bias_copy_store = 128;
195 pref_src_mode = Pref_Load;
196 pref_dst_mode = Pref_Store;
197 break;
198
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000199 default:
200 pref_bias_clear_store = 128;
201 pref_bias_copy_load = 256;
202 pref_bias_copy_store = 128;
203 pref_src_mode = Pref_LoadStreamed;
Markos Chandrasd2e6d302014-11-19 09:39:56 +0000204 if (cpu_has_mips_r6)
205 /*
206 * Bit 30 (Pref_PrepareForStore) has been
207 * removed from MIPS R6. Use bit 5
208 * (Pref_StoreStreamed).
209 */
210 pref_dst_mode = Pref_StoreStreamed;
211 else
212 pref_dst_mode = Pref_PrepareForStore;
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000213 break;
214 }
215 } else {
216 if (cpu_has_cache_cdex_s)
217 cache_line_size = cpu_scache_line_size();
218 else if (cpu_has_cache_cdex_p)
219 cache_line_size = cpu_dcache_line_size();
220 }
221 /*
222 * Too much unrolling will overflow the available space in
Thomas Bogendoerfer14defd92008-07-08 14:46:34 +0200223 * clear_space_array / copy_page_array.
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000224 */
Thomas Bogendoerfer14defd92008-07-08 14:46:34 +0200225 half_clear_loop_size = min(16 * clear_word_size,
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000226 max(cache_line_size >> 1,
227 4 * clear_word_size));
Thomas Bogendoerfer14defd92008-07-08 14:46:34 +0200228 half_copy_loop_size = min(16 * copy_word_size,
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000229 max(cache_line_size >> 1,
230 4 * copy_word_size));
231}
232
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000233static void build_clear_store(u32 **buf, int off)
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000234{
235 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
236 uasm_i_sd(buf, ZERO, off, A0);
237 } else {
238 uasm_i_sw(buf, ZERO, off, A0);
239 }
240}
241
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000242static inline void build_clear_pref(u32 **buf, int off)
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000243{
244 if (off & cache_line_mask())
245 return;
246
247 if (pref_bias_clear_store) {
Markos Chandrasd2e6d302014-11-19 09:39:56 +0000248 _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000249 A0);
Thomas Bogendoerfer14defd92008-07-08 14:46:34 +0200250 } else if (cache_line_size == (half_clear_loop_size << 1)) {
251 if (cpu_has_cache_cdex_s) {
252 uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
253 } else if (cpu_has_cache_cdex_p) {
254 if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
255 uasm_i_nop(buf);
256 uasm_i_nop(buf);
257 uasm_i_nop(buf);
258 uasm_i_nop(buf);
259 }
260
261 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
262 uasm_i_lw(buf, ZERO, ZERO, AT);
263
264 uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000265 }
Tony Wufc192e52013-06-21 10:10:46 +0000266 }
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000267}
268
Steven J. Hillc0226302012-07-06 21:56:01 +0200269extern u32 __clear_page_start;
270extern u32 __clear_page_end;
271extern u32 __copy_page_start;
272extern u32 __copy_page_end;
273
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000274void build_clear_page(void)
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000275{
276 int off;
Steven J. Hillc0226302012-07-06 21:56:01 +0200277 u32 *buf = &__clear_page_start;
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000278 struct uasm_label *l = labels;
279 struct uasm_reloc *r = relocs;
280 int i;
Huacai Chen87599342013-03-17 11:49:38 +0000281 static atomic_t run_once = ATOMIC_INIT(0);
282
283 if (atomic_xchg(&run_once, 1)) {
284 return;
285 }
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000286
287 memset(labels, 0, sizeof(labels));
288 memset(relocs, 0, sizeof(relocs));
289
290 set_prefetch_parameters();
291
292 /*
293 * This algorithm makes the following assumptions:
294 * - The prefetch bias is a multiple of 2 words.
295 * - The prefetch bias is less than one page.
296 */
297 BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
298 BUG_ON(PAGE_SIZE < pref_bias_clear_store);
299
300 off = PAGE_SIZE - pref_bias_clear_store;
301 if (off > 0xffff || !pref_bias_clear_store)
302 pg_addiu(&buf, A2, A0, off);
303 else
304 uasm_i_ori(&buf, A2, A0, off);
305
306 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
Thomas Bogendoerferf3f0d952014-04-08 08:58:01 +0200307 uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000308
Yoichi Yuasacd9da132008-05-07 23:38:15 +0900309 off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
Ralf Baechle70342282013-01-22 12:59:30 +0100310 * cache_line_size : 0;
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000311 while (off) {
312 build_clear_pref(&buf, -off);
313 off -= cache_line_size;
314 }
315 uasm_l_clear_pref(&l, buf);
316 do {
317 build_clear_pref(&buf, off);
318 build_clear_store(&buf, off);
319 off += clear_word_size;
320 } while (off < half_clear_loop_size);
321 pg_addiu(&buf, A0, A0, 2 * off);
322 off = -off;
323 do {
324 build_clear_pref(&buf, off);
325 if (off == -clear_word_size)
326 uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
327 build_clear_store(&buf, off);
328 off += clear_word_size;
329 } while (off < 0);
330
331 if (pref_bias_clear_store) {
332 pg_addiu(&buf, A2, A0, pref_bias_clear_store);
333 uasm_l_clear_nopref(&l, buf);
334 off = 0;
335 do {
336 build_clear_store(&buf, off);
337 off += clear_word_size;
338 } while (off < half_clear_loop_size);
339 pg_addiu(&buf, A0, A0, 2 * off);
340 off = -off;
341 do {
342 if (off == -clear_word_size)
343 uasm_il_bne(&buf, &r, A0, A2,
344 label_clear_nopref);
345 build_clear_store(&buf, off);
346 off += clear_word_size;
347 } while (off < 0);
348 }
349
350 uasm_i_jr(&buf, RA);
351 uasm_i_nop(&buf);
352
Steven J. Hillc0226302012-07-06 21:56:01 +0200353 BUG_ON(buf > &__clear_page_end);
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000354
355 uasm_resolve_relocs(relocs, labels);
356
357 pr_debug("Synthesized clear page handler (%u instructions).\n",
Steven J. Hillc0226302012-07-06 21:56:01 +0200358 (u32)(buf - &__clear_page_start));
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000359
360 pr_debug("\t.set push\n");
361 pr_debug("\t.set noreorder\n");
Steven J. Hillc0226302012-07-06 21:56:01 +0200362 for (i = 0; i < (buf - &__clear_page_start); i++)
363 pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000364 pr_debug("\t.set pop\n");
365}
366
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000367static void build_copy_load(u32 **buf, int reg, int off)
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000368{
369 if (cpu_has_64bit_gp_regs) {
370 uasm_i_ld(buf, reg, off, A1);
371 } else {
372 uasm_i_lw(buf, reg, off, A1);
373 }
374}
375
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000376static void build_copy_store(u32 **buf, int reg, int off)
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000377{
378 if (cpu_has_64bit_gp_regs) {
379 uasm_i_sd(buf, reg, off, A0);
380 } else {
381 uasm_i_sw(buf, reg, off, A0);
382 }
383}
384
385static inline void build_copy_load_pref(u32 **buf, int off)
386{
387 if (off & cache_line_mask())
388 return;
389
390 if (pref_bias_copy_load)
Markos Chandrasd2e6d302014-11-19 09:39:56 +0000391 _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000392}
393
394static inline void build_copy_store_pref(u32 **buf, int off)
395{
396 if (off & cache_line_mask())
397 return;
398
399 if (pref_bias_copy_store) {
Markos Chandrasd2e6d302014-11-19 09:39:56 +0000400 _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000401 A0);
Thomas Bogendoerfer14defd92008-07-08 14:46:34 +0200402 } else if (cache_line_size == (half_copy_loop_size << 1)) {
403 if (cpu_has_cache_cdex_s) {
404 uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
405 } else if (cpu_has_cache_cdex_p) {
406 if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
407 uasm_i_nop(buf);
408 uasm_i_nop(buf);
409 uasm_i_nop(buf);
410 uasm_i_nop(buf);
411 }
412
413 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
414 uasm_i_lw(buf, ZERO, ZERO, AT);
415
416 uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000417 }
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000418 }
419}
420
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000421void build_copy_page(void)
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000422{
423 int off;
Steven J. Hillc0226302012-07-06 21:56:01 +0200424 u32 *buf = &__copy_page_start;
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000425 struct uasm_label *l = labels;
426 struct uasm_reloc *r = relocs;
427 int i;
Huacai Chen87599342013-03-17 11:49:38 +0000428 static atomic_t run_once = ATOMIC_INIT(0);
429
430 if (atomic_xchg(&run_once, 1)) {
431 return;
432 }
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000433
434 memset(labels, 0, sizeof(labels));
435 memset(relocs, 0, sizeof(relocs));
436
437 set_prefetch_parameters();
438
439 /*
440 * This algorithm makes the following assumptions:
441 * - All prefetch biases are multiples of 8 words.
442 * - The prefetch biases are less than one page.
443 * - The store prefetch bias isn't greater than the load
444 * prefetch bias.
445 */
446 BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
447 BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
448 BUG_ON(PAGE_SIZE < pref_bias_copy_load);
449 BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
450
451 off = PAGE_SIZE - pref_bias_copy_load;
452 if (off > 0xffff || !pref_bias_copy_load)
453 pg_addiu(&buf, A2, A0, off);
454 else
455 uasm_i_ori(&buf, A2, A0, off);
456
457 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
Thomas Bogendoerferf3f0d952014-04-08 08:58:01 +0200458 uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000459
Yoichi Yuasacd9da132008-05-07 23:38:15 +0900460 off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
Ralf Baechle70342282013-01-22 12:59:30 +0100461 cache_line_size : 0;
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000462 while (off) {
463 build_copy_load_pref(&buf, -off);
464 off -= cache_line_size;
465 }
Atsushi Nemoto7bd0fea2008-05-30 13:07:21 +0900466 off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
Ralf Baechle70342282013-01-22 12:59:30 +0100467 cache_line_size : 0;
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000468 while (off) {
469 build_copy_store_pref(&buf, -off);
470 off -= cache_line_size;
471 }
472 uasm_l_copy_pref_both(&l, buf);
473 do {
474 build_copy_load_pref(&buf, off);
475 build_copy_load(&buf, T0, off);
476 build_copy_load_pref(&buf, off + copy_word_size);
477 build_copy_load(&buf, T1, off + copy_word_size);
478 build_copy_load_pref(&buf, off + 2 * copy_word_size);
479 build_copy_load(&buf, T2, off + 2 * copy_word_size);
480 build_copy_load_pref(&buf, off + 3 * copy_word_size);
481 build_copy_load(&buf, T3, off + 3 * copy_word_size);
482 build_copy_store_pref(&buf, off);
483 build_copy_store(&buf, T0, off);
484 build_copy_store_pref(&buf, off + copy_word_size);
485 build_copy_store(&buf, T1, off + copy_word_size);
486 build_copy_store_pref(&buf, off + 2 * copy_word_size);
487 build_copy_store(&buf, T2, off + 2 * copy_word_size);
488 build_copy_store_pref(&buf, off + 3 * copy_word_size);
489 build_copy_store(&buf, T3, off + 3 * copy_word_size);
490 off += 4 * copy_word_size;
491 } while (off < half_copy_loop_size);
492 pg_addiu(&buf, A1, A1, 2 * off);
493 pg_addiu(&buf, A0, A0, 2 * off);
494 off = -off;
495 do {
496 build_copy_load_pref(&buf, off);
497 build_copy_load(&buf, T0, off);
498 build_copy_load_pref(&buf, off + copy_word_size);
499 build_copy_load(&buf, T1, off + copy_word_size);
500 build_copy_load_pref(&buf, off + 2 * copy_word_size);
501 build_copy_load(&buf, T2, off + 2 * copy_word_size);
502 build_copy_load_pref(&buf, off + 3 * copy_word_size);
503 build_copy_load(&buf, T3, off + 3 * copy_word_size);
504 build_copy_store_pref(&buf, off);
505 build_copy_store(&buf, T0, off);
506 build_copy_store_pref(&buf, off + copy_word_size);
507 build_copy_store(&buf, T1, off + copy_word_size);
508 build_copy_store_pref(&buf, off + 2 * copy_word_size);
509 build_copy_store(&buf, T2, off + 2 * copy_word_size);
510 build_copy_store_pref(&buf, off + 3 * copy_word_size);
511 if (off == -(4 * copy_word_size))
512 uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
513 build_copy_store(&buf, T3, off + 3 * copy_word_size);
514 off += 4 * copy_word_size;
515 } while (off < 0);
516
517 if (pref_bias_copy_load - pref_bias_copy_store) {
518 pg_addiu(&buf, A2, A0,
519 pref_bias_copy_load - pref_bias_copy_store);
520 uasm_l_copy_pref_store(&l, buf);
521 off = 0;
522 do {
523 build_copy_load(&buf, T0, off);
524 build_copy_load(&buf, T1, off + copy_word_size);
525 build_copy_load(&buf, T2, off + 2 * copy_word_size);
526 build_copy_load(&buf, T3, off + 3 * copy_word_size);
527 build_copy_store_pref(&buf, off);
528 build_copy_store(&buf, T0, off);
529 build_copy_store_pref(&buf, off + copy_word_size);
530 build_copy_store(&buf, T1, off + copy_word_size);
531 build_copy_store_pref(&buf, off + 2 * copy_word_size);
532 build_copy_store(&buf, T2, off + 2 * copy_word_size);
533 build_copy_store_pref(&buf, off + 3 * copy_word_size);
534 build_copy_store(&buf, T3, off + 3 * copy_word_size);
535 off += 4 * copy_word_size;
536 } while (off < half_copy_loop_size);
537 pg_addiu(&buf, A1, A1, 2 * off);
538 pg_addiu(&buf, A0, A0, 2 * off);
539 off = -off;
540 do {
541 build_copy_load(&buf, T0, off);
542 build_copy_load(&buf, T1, off + copy_word_size);
543 build_copy_load(&buf, T2, off + 2 * copy_word_size);
544 build_copy_load(&buf, T3, off + 3 * copy_word_size);
545 build_copy_store_pref(&buf, off);
546 build_copy_store(&buf, T0, off);
547 build_copy_store_pref(&buf, off + copy_word_size);
548 build_copy_store(&buf, T1, off + copy_word_size);
549 build_copy_store_pref(&buf, off + 2 * copy_word_size);
550 build_copy_store(&buf, T2, off + 2 * copy_word_size);
551 build_copy_store_pref(&buf, off + 3 * copy_word_size);
552 if (off == -(4 * copy_word_size))
553 uasm_il_bne(&buf, &r, A2, A0,
554 label_copy_pref_store);
555 build_copy_store(&buf, T3, off + 3 * copy_word_size);
556 off += 4 * copy_word_size;
557 } while (off < 0);
558 }
559
560 if (pref_bias_copy_store) {
561 pg_addiu(&buf, A2, A0, pref_bias_copy_store);
562 uasm_l_copy_nopref(&l, buf);
563 off = 0;
564 do {
565 build_copy_load(&buf, T0, off);
566 build_copy_load(&buf, T1, off + copy_word_size);
567 build_copy_load(&buf, T2, off + 2 * copy_word_size);
568 build_copy_load(&buf, T3, off + 3 * copy_word_size);
569 build_copy_store(&buf, T0, off);
570 build_copy_store(&buf, T1, off + copy_word_size);
571 build_copy_store(&buf, T2, off + 2 * copy_word_size);
572 build_copy_store(&buf, T3, off + 3 * copy_word_size);
573 off += 4 * copy_word_size;
574 } while (off < half_copy_loop_size);
575 pg_addiu(&buf, A1, A1, 2 * off);
576 pg_addiu(&buf, A0, A0, 2 * off);
577 off = -off;
578 do {
579 build_copy_load(&buf, T0, off);
580 build_copy_load(&buf, T1, off + copy_word_size);
581 build_copy_load(&buf, T2, off + 2 * copy_word_size);
582 build_copy_load(&buf, T3, off + 3 * copy_word_size);
583 build_copy_store(&buf, T0, off);
584 build_copy_store(&buf, T1, off + copy_word_size);
585 build_copy_store(&buf, T2, off + 2 * copy_word_size);
586 if (off == -(4 * copy_word_size))
587 uasm_il_bne(&buf, &r, A2, A0,
588 label_copy_nopref);
589 build_copy_store(&buf, T3, off + 3 * copy_word_size);
590 off += 4 * copy_word_size;
591 } while (off < 0);
592 }
593
594 uasm_i_jr(&buf, RA);
595 uasm_i_nop(&buf);
596
Steven J. Hillc0226302012-07-06 21:56:01 +0200597 BUG_ON(buf > &__copy_page_end);
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000598
599 uasm_resolve_relocs(relocs, labels);
600
601 pr_debug("Synthesized copy page handler (%u instructions).\n",
Steven J. Hillc0226302012-07-06 21:56:01 +0200602 (u32)(buf - &__copy_page_start));
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000603
604 pr_debug("\t.set push\n");
605 pr_debug("\t.set noreorder\n");
Steven J. Hillc0226302012-07-06 21:56:01 +0200606 for (i = 0; i < (buf - &__copy_page_start); i++)
607 pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000608 pr_debug("\t.set pop\n");
609}
610
611#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
Steven J. Hillc0226302012-07-06 21:56:01 +0200612extern void clear_page_cpu(void *page);
613extern void copy_page_cpu(void *to, void *from);
Thiemo Seuferfb2a27e72008-02-18 19:32:49 +0000614
615/*
616 * Pad descriptors to cacheline, since each is exclusively owned by a
617 * particular CPU.
618 */
619struct dmadscr {
620 u64 dscr_a;
621 u64 dscr_b;
622 u64 pad_a;
623 u64 pad_b;
624} ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
625
626void sb1_dma_init(void)
627{
628 int i;
629
630 for (i = 0; i < DM_NUM_CHANNELS; i++) {
631 const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
632 V_DM_DSCR_BASE_RINGSZ(1);
633 void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
634
635 __raw_writeq(base_val, base_reg);
636 __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
637 __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
638 }
639}
640
641void clear_page(void *page)
642{
643 u64 to_phys = CPHYSADDR((unsigned long)page);
644 unsigned int cpu = smp_processor_id();
645
646 /* if the page is not in KSEG0, use old way */
647 if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
648 return clear_page_cpu(page);
649
650 page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
651 M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
652 page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
653 __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
654
655 /*
656 * Don't really want to do it this way, but there's no
657 * reliable way to delay completion detection.
658 */
659 while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
660 & M_DM_DSCR_BASE_INTERRUPT))
661 ;
662 __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
663}
664
665void copy_page(void *to, void *from)
666{
667 u64 from_phys = CPHYSADDR((unsigned long)from);
668 u64 to_phys = CPHYSADDR((unsigned long)to);
669 unsigned int cpu = smp_processor_id();
670
671 /* if any page is not in KSEG0, use old way */
672 if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
673 || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
674 return copy_page_cpu(to, from);
675
676 page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
677 M_DM_DSCRA_INTERRUPT;
678 page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
679 __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
680
681 /*
682 * Don't really want to do it this way, but there's no
683 * reliable way to delay completion detection.
684 */
685 while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
686 & M_DM_DSCR_BASE_INTERRUPT))
687 ;
688 __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
689}
690
691#endif /* CONFIG_SIBYTE_DMA_PAGEOPS */