2 * Copyright (C) 2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Based on code by Carl van Schaik <carl@ok-labs.com>.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <asm/arch/cpu.h>
28 * SECURE_RAM to text_end :
29 * ._secure_text section
30 * text_end to ALIGN_PAGE(text_end):
32 * ALIGN_PAGE(text_end) to ALIGN_PAGE(text_end) + 0x1000)
33 * 1kB of stack per CPU (4 CPUs max).
36 .pushsection ._secure.text, "ax"
40 #define ONE_MS (CONFIG_SYS_CLK_FREQ / 1000)
41 #define TEN_MS (10 * ONE_MS)
42 #define GICD_BASE 0x1c81000
43 #define GICC_BASE 0x1c82000
45 .macro timer_wait reg, ticks
47 movw \reg, #(\ticks & 0xffff)
48 movt \reg, #(\ticks >> 16)
49 mcr p15, 0, \reg, c14, c2, 0
51 @ Enable physical timer, mask interrupt
53 mcr p15, 0, \reg, c14, c2, 1
54 @ Poll physical timer until ISTATUS is on
56 mrc p15, 0, \reg, c14, c2, 1
61 mcr p15, 0, \reg, c14, c2, 1
70 mrc p15, 0, r7, c1, c1, 0
72 mcr p15, 0, r8, c1, c1, 0
75 @ Validate reason based on IAR and acknowledge
76 movw r8, #(GICC_BASE & 0xffff)
77 movt r8, #(GICC_BASE >> 16)
78 ldr r9, [r8, #GICC_IAR]
81 cmp r9, r10 @ skip spurious interrupt 1023
83 movw r10, #0x3fe @ ...and 1022
86 str r9, [r8, #GICC_EOIR] @ acknowledge the interrupt
93 movw r8, #(SUN7I_CPUCFG_BASE & 0xffff)
94 movt r8, #(SUN7I_CPUCFG_BASE >> 16)
96 @ Wait for the core to enter WFI
100 1: ldr r10, [r11, #0x48]
103 timer_wait r10, ONE_MS
108 str r10, [r11, #0x40]
112 lsl r9, r10, r9 @ r9 is now CPU mask
113 ldr r10, [r8, #0x1e4]
115 str r10, [r8, #0x1e4]
118 ldr r10, [r8, #0x1b4]
120 str r10, [r8, #0x1b4]
121 timer_wait r10, ONE_MS
123 @ Activate power clamp
125 1: str r10, [r8, #0x1b0]
131 @ Restore security level
132 out: mcr p15, 0, r7, c1, c1, 0
145 movw r0, #(SUN7I_CPUCFG_BASE & 0xffff)
146 movt r0, #(SUN7I_CPUCFG_BASE >> 16)
149 and r1, r1, #3 @ only care about first cluster
153 adr r6, _sunxi_cpu_entry
154 str r6, [r0, #0x1a4] @ PRIVATE_REG (boot vector)
156 @ Assert reset on target CPU
158 lsl r5, r1, #6 @ 64 bytes per CPU
159 add r5, r5, #0x40 @ Offset from base
160 add r5, r5, r0 @ CPU control block
161 str r6, [r5] @ Reset CPU
173 @ Release power clamp
180 timer_wait r1, TEN_MS
187 @ Deassert reset on target CPU
196 mov r0, #ARM_PSCI_RET_SUCCESS @ Return PSCI_RET_SUCCESS
202 /* Imported from Linux kernel */
204 dmb @ ensure ordering with previous memory accesses
205 mrc p15, 1, r0, c0, c0, 1 @ read clidr
206 ands r3, r0, #0x7000000 @ extract loc from clidr
207 mov r3, r3, lsr #23 @ left align loc bit field
208 beq finished @ if loc is 0, then no need to clean
209 mov r10, #0 @ start clean at cache level 0
211 add r2, r10, r10, lsr #1 @ work out 3x current cache level
212 mov r1, r0, lsr r2 @ extract cache type bits from clidr
213 and r1, r1, #7 @ mask of the bits for current cache only
214 cmp r1, #2 @ see what cache we have at this level
215 blt skip @ skip if no cache, or just i-cache
216 mrs r9, cpsr @ make cssr&csidr read atomic
217 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
218 isb @ isb to sych the new cssr&csidr
219 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
221 and r2, r1, #7 @ extract the length of the cache lines
222 add r2, r2, #4 @ add 4 (line length offset)
224 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
225 clz r5, r4 @ find bit position of way size increment
227 ands r7, r7, r1, lsr #13 @ extract max number of the index size
229 mov r9, r7 @ create working copy of max index
231 orr r11, r10, r4, lsl r5 @ factor way and cache number into r11
232 orr r11, r11, r9, lsl r2 @ factor index number into r11
233 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
234 subs r9, r9, #1 @ decrement the index
236 subs r4, r4, #1 @ decrement the way
239 add r10, r10, #2 @ increment cache number
243 mov r10, #0 @ swith back to cache level 0
244 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
251 mrc p15, 0, r0, c1, c0, 1
253 mcr p15, 0, r0, c1, c0, 1
265 mrc p15, 0, r0, c1, c0, 0 @ SCTLR
266 bic r0, r0, #(1 << 2) @ Clear C bit
267 mcr p15, 0, r0, c1, c0, 0 @ SCTLR
271 bl v7_flush_dcache_all
275 mrc p15, 0, r0, c1, c0, 1 @ ACTLR
276 bic r0, r0, #(1 << 6) @ Clear SMP bit
277 mcr p15, 0, r0, c1, c0, 1 @ ACTLR
281 @ Ask CPU0 to pull the rug...
282 movw r0, #(GICD_BASE & 0xffff)
283 movt r0, #(GICD_BASE >> 16)
285 movt r1, #1 @ Target is CPU0
286 str r1, [r0, #GICD_SGIR]
292 .globl psci_arch_init
294 movw r4, #(GICD_BASE & 0xffff)
295 movt r4, #(GICD_BASE >> 16)
297 ldr r5, [r4, #GICD_IGROUPRn]
298 bic r5, r5, #(1 << 15) @ SGI15 as Group-0
299 str r5, [r4, #GICD_IGROUPRn]
301 mov r5, #0 @ Set SGI15 priority to 0
302 strb r5, [r4, #(GICD_IPRIORITYRn + 15)]
304 add r4, r4, #0x1000 @ GICC address
307 str r5, [r4, #GICC_PMR] @ Be cool with non-secure
309 ldr r5, [r4, #GICC_CTLR]
310 orr r5, r5, #(1 << 3) @ Switch FIQEn on
311 str r5, [r4, #GICC_CTLR]
313 mrc p15, 0, r5, c1, c1, 0 @ Read SCR
314 orr r5, r5, #4 @ Enable FIQ in monitor mode
315 bic r5, r5, #1 @ Secure mode
316 mcr p15, 0, r5, c1, c1, 0 @ Write SCR
319 mrc p15, 0, r4, c0, c0, 5 @ MPIDR
320 and r4, r4, #3 @ cpu number in cluster
321 mov r5, #0x400 @ 1kB of stack per CPU
324 adr r5, text_end @ end of text
325 add r5, r5, #0x2000 @ Skip two pages
326 lsr r5, r5, #12 @ Align to start of page
328 sub sp, r5, r4 @ here's our stack!