blob: 9322dc551e91d2bcacb7236d5b320e93a184c851 [file] [log] [blame]
Chris Metcalf18aecc22011-05-04 14:38:26 -04001/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/linkage.h>
16#include <asm/errno.h>
17#include <asm/cache.h>
18#include <arch/chip.h>
19
20/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
21
Chris Metcalf18aecc22011-05-04 14:38:26 -040022/*
Chris Metcalf18aecc22011-05-04 14:38:26 -040023 * clear_user_asm takes the user target address in r0 and the
24 * number of bytes to zero in r1.
25 * It returns the number of uncopiable bytes (hopefully zero) in r0.
26 * Note that we don't use a separate .fixup section here since we fall
27 * through into the "fixup" code as the last straight-line bundle anyway.
28 */
29STD_ENTRY(clear_user_asm)
30 { beqz r1, 2f; or r2, r0, r1 }
31 andi r2, r2, 7
32 beqzt r2, .Lclear_aligned_user_asm
331: { st1 r0, zero; addi r0, r0, 1; addi r1, r1, -1 }
34 bnezt r1, 1b
352: { move r0, r1; jrp lr }
36 .pushsection __ex_table,"a"
Chris Metcalfd4d9eab2013-08-09 15:38:43 -040037 .align 8
Chris Metcalf18aecc22011-05-04 14:38:26 -040038 .quad 1b, 2b
39 .popsection
40
41.Lclear_aligned_user_asm:
421: { st r0, zero; addi r0, r0, 8; addi r1, r1, -8 }
43 bnezt r1, 1b
442: { move r0, r1; jrp lr }
45 STD_ENDPROC(clear_user_asm)
46 .pushsection __ex_table,"a"
Chris Metcalfd4d9eab2013-08-09 15:38:43 -040047 .align 8
Chris Metcalf18aecc22011-05-04 14:38:26 -040048 .quad 1b, 2b
49 .popsection
50
51/*
52 * flush_user_asm takes the user target address in r0 and the
53 * number of bytes to flush in r1.
54 * It returns the number of unflushable bytes (hopefully zero) in r0.
55 */
56STD_ENTRY(flush_user_asm)
57 beqz r1, 2f
58 { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
59 { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
60 { and r0, r0, r2; and r1, r1, r2 }
61 { sub r1, r1, r0 }
621: { flush r0; addi r1, r1, -CHIP_FLUSH_STRIDE() }
63 { addi r0, r0, CHIP_FLUSH_STRIDE(); bnezt r1, 1b }
642: { move r0, r1; jrp lr }
65 STD_ENDPROC(flush_user_asm)
66 .pushsection __ex_table,"a"
Chris Metcalfd4d9eab2013-08-09 15:38:43 -040067 .align 8
Chris Metcalf18aecc22011-05-04 14:38:26 -040068 .quad 1b, 2b
69 .popsection
70
71/*
Chris Metcalf18aecc22011-05-04 14:38:26 -040072 * finv_user_asm takes the user target address in r0 and the
73 * number of bytes to flush-invalidate in r1.
74 * It returns the number of not finv'able bytes (hopefully zero) in r0.
75 */
76STD_ENTRY(finv_user_asm)
77 beqz r1, 2f
78 { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
79 { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
80 { and r0, r0, r2; and r1, r1, r2 }
81 { sub r1, r1, r0 }
821: { finv r0; addi r1, r1, -CHIP_FINV_STRIDE() }
83 { addi r0, r0, CHIP_FINV_STRIDE(); bnezt r1, 1b }
842: { move r0, r1; jrp lr }
85 STD_ENDPROC(finv_user_asm)
86 .pushsection __ex_table,"a"
Chris Metcalfd4d9eab2013-08-09 15:38:43 -040087 .align 8
Chris Metcalf18aecc22011-05-04 14:38:26 -040088 .quad 1b, 2b
89 .popsection