blob: c474981a6c0dd473627ac14ecd40430ded4d0f71 [file] [log] [blame]
Jayachandran C9584c552013-06-10 06:41:01 +00001/*
2 * Copyright 2003-2013 Broadcom Corporation.
3 * All Rights Reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the Broadcom
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
Jayachandran C9584c552013-06-10 06:41:01 +000035
36#include <asm/asm.h>
37#include <asm/asm-offsets.h>
Jayachandran C5874743e2014-04-29 20:07:49 +053038#include <asm/cpu.h>
Yonghong Songed8dfc42013-12-21 16:52:16 +053039#include <asm/cacheops.h>
Jayachandran C9584c552013-06-10 06:41:01 +000040#include <asm/regdef.h>
41#include <asm/mipsregs.h>
42#include <asm/stackframe.h>
43#include <asm/asmmacro.h>
44#include <asm/addrspace.h>
45
46#include <asm/netlogic/common.h>
47
48#include <asm/netlogic/xlp-hal/iomap.h>
49#include <asm/netlogic/xlp-hal/xlp.h>
50#include <asm/netlogic/xlp-hal/sys.h>
51#include <asm/netlogic/xlp-hal/cpucontrol.h>
52
Jayachandran Cd3b94282013-12-21 16:52:15 +053053#define SYS_CPU_COHERENT_BASE CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
54 XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \
Jayachandran C9584c552013-06-10 06:41:01 +000055 SYS_CPU_NONCOHERENT_MODE * 4
56
Jayachandran C9584c552013-06-10 06:41:01 +000057/* Enable XLP features and workarounds in the LSU */
58.macro xlp_config_lsu
59 li t0, LSU_DEFEATURE
60 mfcr t1, t0
61
Jayachandran C94e2b962015-01-07 16:58:35 +053062 lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */
Jayachandran C9584c552013-06-10 06:41:01 +000063 or t1, t1, t2
Jayachandran C9584c552013-06-10 06:41:01 +000064 mtcr t1, t0
65
66 li t0, ICU_DEFEATURE
67 mfcr t1, t0
68 ori t1, 0x1000 /* Enable Icache partitioning */
69 mtcr t1, t0
70
Jayachandran C9584c552013-06-10 06:41:01 +000071 li t0, SCHED_DEFEATURE
72 lui t1, 0x0100 /* Disable BRU accepting ALU ops */
73 mtcr t1, t0
Jayachandran C9584c552013-06-10 06:41:01 +000074.endm
75
76/*
Jayachandran Ce9126412014-04-29 20:07:47 +053077 * Allow access to physical mem >64G by enabling ELPA in PAGEGRAIN
78 * register. This is needed before going to C code since the SP can
79 * in this region. Called from all HW threads.
80 */
81.macro xlp_early_mmu_init
82 mfc0 t0, CP0_PAGEMASK, 1
83 li t1, (1 << 29) /* ELPA bit */
84 or t0, t1
85 mtc0 t0, CP0_PAGEMASK, 1
86.endm
87
88/*
Yonghong Songed8dfc42013-12-21 16:52:16 +053089 * L1D cache has to be flushed before enabling threads in XLP.
90 * On XLP8xx/XLP3xx, we do a low level flush using processor control
91 * registers. On XLPII CPUs, usual cache instructions work.
Jayachandran C9584c552013-06-10 06:41:01 +000092 */
93.macro xlp_flush_l1_dcache
James Hogan32eb6e82016-04-28 17:06:16 +010094 mfc0 t0, CP0_PRID
Jayachandran C5874743e2014-04-29 20:07:49 +053095 andi t0, t0, PRID_IMP_MASK
Yonghong Songed8dfc42013-12-21 16:52:16 +053096 slt t1, t0, 0x1200
97 beqz t1, 15f
98 nop
99
100 /* XLP8xx low level cache flush */
Jayachandran C9584c552013-06-10 06:41:01 +0000101 li t0, LSU_DEBUG_DATA0
102 li t1, LSU_DEBUG_ADDR
103 li t2, 0 /* index */
104 li t3, 0x1000 /* loop count */
Jayachandran Cd3b94282013-12-21 16:52:15 +053010511:
Jayachandran C9584c552013-06-10 06:41:01 +0000106 sll v0, t2, 5
107 mtcr zero, t0
108 ori v1, v0, 0x3 /* way0 | write_enable | write_active */
109 mtcr v1, t1
Jayachandran Cd3b94282013-12-21 16:52:15 +053011012:
Jayachandran C9584c552013-06-10 06:41:01 +0000111 mfcr v1, t1
112 andi v1, 0x1 /* wait for write_active == 0 */
Jayachandran Cd3b94282013-12-21 16:52:15 +0530113 bnez v1, 12b
Jayachandran C9584c552013-06-10 06:41:01 +0000114 nop
115 mtcr zero, t0
116 ori v1, v0, 0x7 /* way1 | write_enable | write_active */
117 mtcr v1, t1
Jayachandran Cd3b94282013-12-21 16:52:15 +053011813:
Jayachandran C9584c552013-06-10 06:41:01 +0000119 mfcr v1, t1
120 andi v1, 0x1 /* wait for write_active == 0 */
Jayachandran Cd3b94282013-12-21 16:52:15 +0530121 bnez v1, 13b
Jayachandran C9584c552013-06-10 06:41:01 +0000122 nop
123 addi t2, 1
Jayachandran Cd3b94282013-12-21 16:52:15 +0530124 bne t3, t2, 11b
Jayachandran C9584c552013-06-10 06:41:01 +0000125 nop
Yonghong Songed8dfc42013-12-21 16:52:16 +0530126 b 17f
127 nop
128
129 /* XLPII CPUs, Invalidate all 64k of L1 D-cache */
13015:
131 li t0, 0x80000000
132 li t1, 0x80010000
13316: cache Index_Writeback_Inv_D, 0(t0)
134 addiu t0, t0, 32
135 bne t0, t1, 16b
136 nop
13717:
Jayachandran C9584c552013-06-10 06:41:01 +0000138.endm
139
140/*
141 * nlm_reset_entry will be copied to the reset entry point for
142 * XLR and XLP. The XLP cores start here when they are woken up. This
143 * is also the NMI entry point.
144 *
145 * We use scratch reg 6/7 to save k0/k1 and check for NMI first.
146 *
147 * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS
148 * location, this will have the thread mask (used when core is woken up)
149 * and the current NMI handler in case we reached here for an NMI.
150 *
151 * When a core or thread is newly woken up, it marks itself ready and
152 * loops in a 'wait'. When the CPU really needs waking up, we send an NMI
153 * IPI to it, with the NMI handler set to prom_boot_secondary_cpus
154 */
155 .set noreorder
156 .set noat
157 .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
158
159FEXPORT(nlm_reset_entry)
160 dmtc0 k0, $22, 6
161 dmtc0 k1, $22, 7
162 mfc0 k0, CP0_STATUS
163 li k1, 0x80000
164 and k1, k0, k1
165 beqz k1, 1f /* go to real reset entry */
166 nop
167 li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
168 ld k0, BOOT_NMI_HANDLER(k1)
169 jr k0
170 nop
171
1721: /* Entry point on core wakeup */
James Hogan32eb6e82016-04-28 17:06:16 +0100173 mfc0 t0, CP0_PRID /* processor ID */
Jayachandran C5874743e2014-04-29 20:07:49 +0530174 andi t0, PRID_IMP_MASK
Jayachandran C861c0562013-12-21 16:52:23 +0530175 li t1, 0x1500 /* XLP 9xx */
176 beq t0, t1, 2f /* does not need to set coherent */
177 nop
178
Yonghong Song1c983982014-04-29 20:07:53 +0530179 li t1, 0x1300 /* XLP 5xx */
180 beq t0, t1, 2f /* does not need to set coherent */
181 nop
182
Jayachandran C861c0562013-12-21 16:52:23 +0530183 /* set bit in SYS coherent register for the core */
James Hogan32eb6e82016-04-28 17:06:16 +0100184 mfc0 t0, CP0_EBASE
185 mfc0 t1, CP0_EBASE
Jayachandran C9584c552013-06-10 06:41:01 +0000186 srl t1, 5
187 andi t1, 0x3 /* t1 <- node */
188 li t2, 0x40000
189 mul t3, t2, t1 /* t3 = node * 0x40000 */
190 srl t0, t0, 2
191 and t0, t0, 0x7 /* t0 <- core */
192 li t1, 0x1
193 sll t0, t1, t0
194 nor t0, t0, zero /* t0 <- ~(1 << core) */
Jayachandran Cd3b94282013-12-21 16:52:15 +0530195 li t2, SYS_CPU_COHERENT_BASE
Jayachandran C9584c552013-06-10 06:41:01 +0000196 add t2, t2, t3 /* t2 <- SYS offset for node */
197 lw t1, 0(t2)
198 and t1, t1, t0
199 sw t1, 0(t2)
200
201 /* read back to ensure complete */
202 lw t1, 0(t2)
203 sync
204
Jayachandran C861c0562013-12-21 16:52:23 +05302052:
Jayachandran C9584c552013-06-10 06:41:01 +0000206 /* Configure LSU on Non-0 Cores. */
207 xlp_config_lsu
208 /* FALL THROUGH */
209
210/*
Jayachandran Cd3b94282013-12-21 16:52:15 +0530211 * Wake up sibling threads from the initial thread in a core.
Jayachandran C9584c552013-06-10 06:41:01 +0000212 */
213EXPORT(nlm_boot_siblings)
214 /* core L1D flush before enable threads */
215 xlp_flush_l1_dcache
Jayachandran Ca3deecf2014-05-09 16:35:14 +0530216 /* save ra and sp, will be used later (only for boot cpu) */
217 dmtc0 ra, $22, 6
218 dmtc0 sp, $22, 7
Jayachandran C9584c552013-06-10 06:41:01 +0000219 /* Enable hw threads by writing to MAP_THREADMODE of the core */
220 li t0, CKSEG1ADDR(RESET_DATA_PHYS)
221 lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
222 li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
223 mfcr t2, t0
224 or t2, t2, t1
225 mtcr t2, t0
226
227 /*
228 * The new hardware thread starts at the next instruction
229 * For all the cases other than core 0 thread 0, we will
Jayachandran Cd3b94282013-12-21 16:52:15 +0530230 * jump to the secondary wait function.
231
232 * NOTE: All GPR contents are lost after the mtcr above!
233 */
James Hogan32eb6e82016-04-28 17:06:16 +0100234 mfc0 v0, CP0_EBASE
Jayachandran C9584c552013-06-10 06:41:01 +0000235 andi v0, 0x3ff /* v0 <- node/core */
236
Jayachandran C5084e932015-01-09 16:13:20 +0530237 /*
238 * Errata: to avoid potential live lock, setup IFU_BRUB_RESERVE
239 * when running 4 threads per core
240 */
241 andi v1, v0, 0x3 /* v1 <- thread id */
242 bnez v1, 2f
243 nop
244
245 /* thread 0 of each core. */
246 li t0, CKSEG1ADDR(RESET_DATA_PHYS)
247 lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
248 subu t1, 0x3 /* 4-thread per core mode? */
249 bnez t1, 2f
250 nop
251
252 li t0, IFU_BRUB_RESERVE
253 li t1, 0x55
254 mtcr t1, t0
255 _ehb
2562:
Jayachandran C60991152013-06-10 06:41:09 +0000257 beqz v0, 4f /* boot cpu (cpuid == 0)? */
Jayachandran C9584c552013-06-10 06:41:01 +0000258 nop
259
260 /* setup status reg */
261 move t1, zero
262#ifdef CONFIG_64BIT
263 ori t1, ST0_KX
264#endif
265 mtc0 t1, CP0_STATUS
Jayachandran C919f9ab2013-06-10 06:41:04 +0000266
Jayachandran Ce9126412014-04-29 20:07:47 +0530267 xlp_early_mmu_init
268
Jayachandran Cd3b94282013-12-21 16:52:15 +0530269 /* mark CPU ready */
Jayachandran C919f9ab2013-06-10 06:41:04 +0000270 li t3, CKSEG1ADDR(RESET_DATA_PHYS)
271 ADDIU t1, t3, BOOT_CPU_READY
Jayachandran C9584c552013-06-10 06:41:01 +0000272 sll v1, v0, 2
273 PTR_ADDU t1, v1
274 li t2, 1
275 sw t2, 0(t1)
276 /* Wait until NMI hits */
2773: wait
Jayachandran Cfd5f5272013-06-10 06:41:05 +0000278 b 3b
Jayachandran C9584c552013-06-10 06:41:01 +0000279 nop
280
281 /*
Jayachandran Ca3deecf2014-05-09 16:35:14 +0530282 * For the boot CPU, we have to restore ra and sp and return, rest
283 * of the registers will be restored by the caller
Jayachandran C9584c552013-06-10 06:41:01 +0000284 */
Jayachandran Ca3deecf2014-05-09 16:35:14 +05302854:
286 dmfc0 ra, $22, 6
287 dmfc0 sp, $22, 7
Jayachandran C9584c552013-06-10 06:41:01 +0000288 jr ra
289 nop
290EXPORT(nlm_reset_entry_end)
291
292LEAF(nlm_init_boot_cpu)
293#ifdef CONFIG_CPU_XLP
294 xlp_config_lsu
Jayachandran Ce9126412014-04-29 20:07:47 +0530295 xlp_early_mmu_init
Jayachandran C9584c552013-06-10 06:41:01 +0000296#endif
297 jr ra
298 nop
299END(nlm_init_boot_cpu)