blob: 91c31725ca9dce2e6071d4f7ca07a8314bc92589 [file] [log] [blame]
Achin Guptab51da822014-06-26 09:58:52 +01001/*
2 * Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
Achin Gupta0a46e2c2014-07-31 11:19:11 +010033#include <assert_macros.S>
Achin Guptab51da822014-06-26 09:58:52 +010034#include <platform_def.h>
Achin Gupta0a46e2c2014-07-31 11:19:11 +010035#include <psci.h>
Achin Guptab51da822014-06-26 09:58:52 +010036
37 .globl psci_do_pwrdown_cache_maintenance
38 .globl psci_do_pwrup_cache_maintenance
39
40/* -----------------------------------------------------------------------
41 * void psci_do_pwrdown_cache_maintenance(uint32_t affinity level);
42 *
Achin Gupta0a46e2c2014-07-31 11:19:11 +010043 * This function performs cache maintenance if the specified affinity
44 * level is the equal to the level of the highest affinity instance which
45 * will be/is physically powered off. The levels of cache affected are
46 * determined by the affinity level which is passed as the argument i.e.
47 * level 0 results in a flush of the L1 cache. Both the L1 and L2 caches
48 * are flushed for a higher affinity level.
49 *
50 * Additionally, this function also ensures that stack memory is correctly
51 * flushed out to avoid coherency issues due to a change in its memory
52 * attributes after the data cache is disabled.
Achin Guptab51da822014-06-26 09:58:52 +010053 * -----------------------------------------------------------------------
54 */
55func psci_do_pwrdown_cache_maintenance
56 stp x29, x30, [sp,#-16]!
57 stp x19, x20, [sp,#-16]!
58
Achin Gupta0a46e2c2014-07-31 11:19:11 +010059 mov x19, x0
60 bl psci_get_max_phys_off_afflvl
61#if ASM_ASSERTION
62 cmp x0, #PSCI_INVALID_DATA
63 ASM_ASSERT(ne)
64#endif
65 cmp x0, x19
66 b.ne 1f
67
Achin Guptab51da822014-06-26 09:58:52 +010068 /* ---------------------------------------------
69 * Disable the Data Cache.
70 * ---------------------------------------------
71 */
72 mrs x1, sctlr_el3
73 bic x1, x1, #SCTLR_C_BIT
74 msr sctlr_el3, x1
75 isb
76
77 /* ---------------------------------------------
78 * Determine to how many levels of cache will be
79 * subject to cache maintenance. Affinity level
80 * 0 implies that only the cpu is being powered
81 * down. Only the L1 data cache needs to be
82 * flushed to the PoU in this case. For a higher
83 * affinity level we are assuming that a flush
84 * of L1 data and L2 unified cache is enough.
85 * This information should be provided by the
86 * platform.
87 * ---------------------------------------------
88 */
89 cmp x0, #MPIDR_AFFLVL0
90 mov x0, #DCCISW
91 b.ne flush_caches_to_poc
92
93 /* ---------------------------------------------
94 * Flush L1 cache to PoU.
95 * ---------------------------------------------
96 */
97 bl dcsw_op_louis
98 b do_stack_maintenance
99
100 /* ---------------------------------------------
101 * Flush L1 and L2 caches to PoC.
102 * ---------------------------------------------
103 */
104flush_caches_to_poc:
105 bl dcsw_op_all
106
107 /* ---------------------------------------------
108 * TODO: Intra-cluster coherency should be
109 * turned off here once cpu-specific
110 * abstractions are in place.
111 * ---------------------------------------------
112 */
113
114 /* ---------------------------------------------
115 * Do stack maintenance by flushing the used
116 * stack to the main memory and invalidating the
117 * remainder.
118 * ---------------------------------------------
119 */
120do_stack_maintenance:
121 mrs x0, mpidr_el1
122 bl platform_get_stack
123
124 /* ---------------------------------------------
125 * Calculate and store the size of the used
126 * stack memory in x1.
127 * ---------------------------------------------
128 */
129 mov x19, x0
130 mov x1, sp
131 sub x1, x0, x1
132 mov x0, sp
133 bl flush_dcache_range
134
135 /* ---------------------------------------------
136 * Calculate and store the size of the unused
137 * stack memory in x1. Calculate and store the
138 * stack base address in x0.
139 * ---------------------------------------------
140 */
141 sub x0, x19, #PLATFORM_STACK_SIZE
142 sub x1, sp, x0
143 bl inv_dcache_range
144
Achin Gupta0a46e2c2014-07-31 11:19:11 +01001451:
Achin Guptab51da822014-06-26 09:58:52 +0100146 ldp x19, x20, [sp], #16
147 ldp x29, x30, [sp], #16
148 ret
149
150
151/* -----------------------------------------------------------------------
152 * void psci_do_pwrup_cache_maintenance(void);
153 *
154 * This function performs cache maintenance after this cpu is powered up.
155 * Currently, this involves managing the used stack memory before turning
156 * on the data cache.
157 * -----------------------------------------------------------------------
158 */
159func psci_do_pwrup_cache_maintenance
160 stp x29, x30, [sp,#-16]!
161
162 /* ---------------------------------------------
163 * Ensure any inflight stack writes have made it
164 * to main memory.
165 * ---------------------------------------------
166 */
167 dmb st
168
169 /* ---------------------------------------------
170 * Calculate and store the size of the used
171 * stack memory in x1. Calculate and store the
172 * stack base address in x0.
173 * ---------------------------------------------
174 */
175 mrs x0, mpidr_el1
176 bl platform_get_stack
177 mov x1, sp
178 sub x1, x0, x1
179 mov x0, sp
180 bl inv_dcache_range
181
182 /* ---------------------------------------------
183 * Enable the data cache.
184 * ---------------------------------------------
185 */
186 mrs x0, sctlr_el3
187 orr x0, x0, #SCTLR_C_BIT
188 msr sctlr_el3, x0
189 isb
190
191 ldp x29, x30, [sp], #16
192 ret