blob: 09d5deea747f2fc37921c1cdb7d692010e165e4e [file] [log] [blame]
Thiemo Seuferba5187d2005-04-25 16:36:23 +00001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2005 Thiemo Seufer
Ralf Baechle70342282013-01-22 12:59:30 +01007 * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
Thiemo Seuferba5187d2005-04-25 16:36:23 +00008 * Author: Maciej W. Rozycki <macro@mips.com>
9 */
10
Thiemo Seuferba5187d2005-04-25 16:36:23 +000011
12#include <asm/addrspace.h>
13#include <asm/bug.h>
Atsushi Nemoto8c412862007-07-12 00:55:40 +090014#include <asm/cacheflush.h>
Thiemo Seuferba5187d2005-04-25 16:36:23 +000015
16#ifndef CKSEG2
17#define CKSEG2 CKSSEG
18#endif
19#ifndef TO_PHYS_MASK
20#define TO_PHYS_MASK -1
21#endif
22
23/*
24 * FUNC is executed in one of the uncached segments, depending on its
25 * original address as follows:
26 *
27 * 1. If the original address is in CKSEG0 or CKSEG1, then the uncached
28 * segment used is CKSEG1.
29 * 2. If the original address is in XKPHYS, then the uncached segment
30 * used is XKPHYS(2).
31 * 3. Otherwise it's a bug.
32 *
33 * The same remapping is done with the stack pointer. Stack handling
34 * works because we don't handle stack arguments or more complex return
35 * values, so we can avoid sharing the same stack area between a cached
36 * and the uncached mode.
37 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +000038unsigned long run_uncached(void *func)
Thiemo Seuferba5187d2005-04-25 16:36:23 +000039{
40 register long sp __asm__("$sp");
41 register long ret __asm__("$2");
42 long lfunc = (long)func, ufunc;
43 long usp;
44
45 if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
46 usp = CKSEG1ADDR(sp);
Yoichi Yuasac55197e2007-02-06 10:59:22 +090047#ifdef CONFIG_64BIT
Andrew Sharp48ef2622007-10-31 14:11:24 -070048 else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0, 0) &&
49 (long long)sp < (long long)PHYS_TO_XKPHYS(8, 0))
50 usp = PHYS_TO_XKPHYS(K_CALG_UNCACHED,
Thiemo Seuferba5187d2005-04-25 16:36:23 +000051 XKPHYS_TO_PHYS((long long)sp));
Yoichi Yuasac55197e2007-02-06 10:59:22 +090052#endif
Thiemo Seuferba5187d2005-04-25 16:36:23 +000053 else {
54 BUG();
55 usp = sp;
56 }
57 if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2)
58 ufunc = CKSEG1ADDR(lfunc);
Yoichi Yuasac55197e2007-02-06 10:59:22 +090059#ifdef CONFIG_64BIT
Andrew Sharp48ef2622007-10-31 14:11:24 -070060 else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0, 0) &&
61 (long long)lfunc < (long long)PHYS_TO_XKPHYS(8, 0))
62 ufunc = PHYS_TO_XKPHYS(K_CALG_UNCACHED,
Thiemo Seuferba5187d2005-04-25 16:36:23 +000063 XKPHYS_TO_PHYS((long long)lfunc));
Yoichi Yuasac55197e2007-02-06 10:59:22 +090064#endif
Thiemo Seuferba5187d2005-04-25 16:36:23 +000065 else {
66 BUG();
67 ufunc = lfunc;
68 }
69
70 __asm__ __volatile__ (
71 " move $16, $sp\n"
72 " move $sp, %1\n"
73 " jalr %2\n"
74 " move $sp, $16"
75 : "=r" (ret)
76 : "r" (usp), "r" (ufunc)
77 : "$16", "$31");
78
79 return ret;
80}