blob: db93ad5fae2560e873b54e5e5257d720b986c8f5 [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/linkage.h>
16#include <asm/errno.h>
17#include <asm/cache.h>
18#include <arch/chip.h>
19
20/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
21
Chris Metcalf867e3592010-05-28 23:09:12 -040022/*
Chris Metcalf867e3592010-05-28 23:09:12 -040023 * clear_user_asm takes the user target address in r0 and the
24 * number of bytes to zero in r1.
25 * It returns the number of uncopiable bytes (hopefully zero) in r0.
26 * Note that we don't use a separate .fixup section here since we fall
27 * through into the "fixup" code as the last straight-line bundle anyway.
28 */
29STD_ENTRY(clear_user_asm)
30 { bz r1, 2f; or r2, r0, r1 }
31 andi r2, r2, 3
32 bzt r2, .Lclear_aligned_user_asm
331: { sb r0, zero; addi r0, r0, 1; addi r1, r1, -1 }
34 bnzt r1, 1b
352: { move r0, r1; jrp lr }
36 .pushsection __ex_table,"a"
Chris Metcalfd4d9eab2013-08-09 15:38:43 -040037 .align 4
Chris Metcalf867e3592010-05-28 23:09:12 -040038 .word 1b, 2b
39 .popsection
40
41.Lclear_aligned_user_asm:
421: { sw r0, zero; addi r0, r0, 4; addi r1, r1, -4 }
43 bnzt r1, 1b
442: { move r0, r1; jrp lr }
45 STD_ENDPROC(clear_user_asm)
46 .pushsection __ex_table,"a"
Chris Metcalfd4d9eab2013-08-09 15:38:43 -040047 .align 4
Chris Metcalf867e3592010-05-28 23:09:12 -040048 .word 1b, 2b
49 .popsection
50
51/*
52 * flush_user_asm takes the user target address in r0 and the
53 * number of bytes to flush in r1.
54 * It returns the number of unflushable bytes (hopefully zero) in r0.
55 */
56STD_ENTRY(flush_user_asm)
57 bz r1, 2f
58 { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
59 { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
60 { and r0, r0, r2; and r1, r1, r2 }
61 { sub r1, r1, r0 }
621: { flush r0; addi r1, r1, -CHIP_FLUSH_STRIDE() }
63 { addi r0, r0, CHIP_FLUSH_STRIDE(); bnzt r1, 1b }
642: { move r0, r1; jrp lr }
65 STD_ENDPROC(flush_user_asm)
66 .pushsection __ex_table,"a"
Chris Metcalfd4d9eab2013-08-09 15:38:43 -040067 .align 4
Chris Metcalf867e3592010-05-28 23:09:12 -040068 .word 1b, 2b
69 .popsection
70
71/*
Chris Metcalf867e3592010-05-28 23:09:12 -040072 * finv_user_asm takes the user target address in r0 and the
73 * number of bytes to flush-invalidate in r1.
74 * It returns the number of not finv'able bytes (hopefully zero) in r0.
75 */
76STD_ENTRY(finv_user_asm)
77 bz r1, 2f
78 { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
79 { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
80 { and r0, r0, r2; and r1, r1, r2 }
81 { sub r1, r1, r0 }
821: { finv r0; addi r1, r1, -CHIP_FINV_STRIDE() }
83 { addi r0, r0, CHIP_FINV_STRIDE(); bnzt r1, 1b }
842: { move r0, r1; jrp lr }
85 STD_ENDPROC(finv_user_asm)
86 .pushsection __ex_table,"a"
Chris Metcalfd4d9eab2013-08-09 15:38:43 -040087 .align 4
Chris Metcalf867e3592010-05-28 23:09:12 -040088 .word 1b, 2b
89 .popsection