2 * (C) Copyright 2014-2015 Freescale Semiconductor
4 * SPDX-License-Identifier: GPL-2.0+
6 * Extracted from armv8/start.S
10 #include <linux/linkage.h>
12 #include <asm/macro.h>
14 #include <asm/arch/mp.h>
18 mov x29, lr /* Save LR */
20 #ifdef CONFIG_FSL_LSCH3
22 /* Set Wuo bit for RN-I 20 */
24 ldr x0, =CCI_AUX_CONTROL_BASE(20)
29 /* Add fully-coherent masters to DVM domain */
31 ldr x1, =CCI_MN_RNF_NODEID_LIST
32 ldr x2, =CCI_MN_DVM_DOMAIN_CTL_SET
33 bl ccn504_add_masters_to_dvm
35 /* Set all RN-I ports to QoS of 15 */
36 ldr x0, =CCI_S0_QOS_CONTROL_BASE(0)
39 ldr x0, =CCI_S1_QOS_CONTROL_BASE(0)
42 ldr x0, =CCI_S2_QOS_CONTROL_BASE(0)
46 ldr x0, =CCI_S0_QOS_CONTROL_BASE(2)
49 ldr x0, =CCI_S1_QOS_CONTROL_BASE(2)
52 ldr x0, =CCI_S2_QOS_CONTROL_BASE(2)
56 ldr x0, =CCI_S0_QOS_CONTROL_BASE(6)
59 ldr x0, =CCI_S1_QOS_CONTROL_BASE(6)
62 ldr x0, =CCI_S2_QOS_CONTROL_BASE(6)
66 ldr x0, =CCI_S0_QOS_CONTROL_BASE(12)
69 ldr x0, =CCI_S1_QOS_CONTROL_BASE(12)
72 ldr x0, =CCI_S2_QOS_CONTROL_BASE(12)
76 ldr x0, =CCI_S0_QOS_CONTROL_BASE(16)
79 ldr x0, =CCI_S1_QOS_CONTROL_BASE(16)
82 ldr x0, =CCI_S2_QOS_CONTROL_BASE(16)
86 ldr x0, =CCI_S0_QOS_CONTROL_BASE(20)
89 ldr x0, =CCI_S1_QOS_CONTROL_BASE(20)
92 ldr x0, =CCI_S2_QOS_CONTROL_BASE(20)
97 /* Set the SMMU page size in the sACR register */
100 orr w0, w0, #1 << 16 /* set sACR.pagesize to indicate 64K page */
103 /* Initialize GIC Secure Bank Status */
104 #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
105 branch_if_slave x0, 1f
111 bl gic_init_secure_percpu
112 #elif defined(CONFIG_GICV2)
115 bl gic_init_secure_percpu
119 branch_if_master x0, x1, 2f
121 #if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
122 ldr x0, =secondary_boot_func
127 #ifdef CONFIG_FSL_TZPC_BP147
128 /* Set Non Secure access for all devices protected via TZPC */
129 ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
130 orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
137 #ifdef CONFIG_FSL_TZASC_400
138 /* Set TZASC so that:
139 * a. We use only Region0 whose global secure write/read is EN
140 * b. We use only Region0 whose NSAID write/read is EN
142 * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
145 ldr x1, =TZASC_GATE_KEEPER(0)
146 ldr x0, [x1] /* Filter 0 Gate Keeper Register */
147 orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */
150 ldr x1, =TZASC_GATE_KEEPER(1)
151 ldr x0, [x1] /* Filter 0 Gate Keeper Register */
152 orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */
155 ldr x1, =TZASC_REGION_ATTRIBUTES_0(0)
156 ldr x0, [x1] /* Region-0 Attributes Register */
157 orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */
158 orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */
161 ldr x1, =TZASC_REGION_ATTRIBUTES_0(1)
162 ldr x0, [x1] /* Region-1 Attributes Register */
163 orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */
164 orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */
167 ldr x1, =TZASC_REGION_ID_ACCESS_0(0)
168 ldr w0, [x1] /* Region-0 Access Register */
169 mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
172 ldr x1, =TZASC_REGION_ID_ACCESS_0(1)
173 ldr w0, [x1] /* Region-1 Attributes Register */
174 mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
180 mov lr, x29 /* Restore LR */
182 ENDPROC(lowlevel_init)
185 /* x0 has the desired status, return 0 for success, 1 for timeout
186 * clobber x1, x2, x3, x4, x6, x7
189 mov x7, #0 /* flag for timeout */
190 mrs x3, cntpct_el0 /* read timer */
191 add x3, x3, #1200 /* timeout after 100 microseconds */
193 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */
194 mov w6, #8 /* HN-F node count */
197 cmp x2, x1 /* check status */
202 mov x7, #1 /* timeout */
205 add x0, x0, #0x10000 /* move to next node */
213 /* x0 has the desired state, clobber x1, x2, x6 */
215 /* power state to SFONLY */
216 mov w6, #8 /* HN-F node count */
218 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */
219 1: /* set pstate to sfonly */
221 and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */
224 add x0, x0, #0x10000 /* move to next node */
230 ENTRY(__asm_flush_l3_cache)
232 * Return status in x0
234 * tmeout 1 for setting SFONLY, 2 for FAM, 3 for both
240 mov x0, #0x1 /* HNFPSTAT_SFONLY */
243 mov x0, #0x4 /* SFONLY status */
246 mov x8, #1 /* timeout */
249 mov x0, #0x3 /* HNFPSTAT_FAM */
252 mov x0, #0xc /* FAM status */
260 ENDPROC(__asm_flush_l3_cache)
263 /* Keep literals not used by the secondary boot code outside it */
266 /* Using 64 bit alignment since the spin table is accessed as data */
268 .global secondary_boot_code
269 /* Secondary Boot Code starts here */
273 .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
276 ENTRY(secondary_boot_func)
279 * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
280 * MPIDR[7:2] = AFF0_RES
281 * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
282 * MPIDR[23:16] = AFF2_CLUSTERID
284 * MPIDR[29:25] = RES0
287 * MPIDR[39:32] = AFF3
289 * Linear Processor ID (LPID) calculation from MPIDR_EL1:
290 * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
291 * until AFF2_CLUSTERID and AFF3 have non-zero values)
293 * LPID = MPIDR[15:8] | MPIDR[1:0]
298 orr x10, x2, x1, lsl #2 /* x10 has LPID */
299 ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
301 * offset of the spin table element for this core from start of spin
302 * table (each elem is padded to 64 bytes)
305 ldr x0, =__spin_table
306 /* physical address of this cpus spin table element */
309 ldr x0, =__real_cntfrq
311 msr cntfrq_el0, x0 /* set with real frequency */
312 str x9, [x11, #16] /* LPID */
314 str x4, [x11, #8] /* STATUS */
316 #if defined(CONFIG_GICV3)
317 gic_wait_for_interrupt_m x0
318 #elif defined(CONFIG_GICV2)
320 gic_wait_for_interrupt_m x0, w1
323 bl secondary_switch_to_el2
324 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
325 bl secondary_switch_to_el1
332 #ifndef CONFIG_ARMV8_SWITCH_TO_EL1
337 tbz x1, #25, cpu_is_le
338 rev x0, x0 /* BE to LE conversion */
340 br x0 /* branch to the given address */
341 ENDPROC(secondary_boot_func)
343 ENTRY(secondary_switch_to_el2)
344 switch_el x0, 1f, 0f, 0f
346 1: armv8_switch_to_el2_m x0
347 ENDPROC(secondary_switch_to_el2)
349 ENTRY(secondary_switch_to_el1)
350 switch_el x0, 0f, 1f, 0f
352 1: armv8_switch_to_el1_m x0, x1
353 ENDPROC(secondary_switch_to_el1)
355 /* Ensure that the literals used by the secondary boot code are
356 * assembled within it (this is required so that we can protect
357 * this area with a single memreserve region
361 /* 64 bit alignment for elements accessed as data */
363 .global __real_cntfrq
365 .quad COUNTER_FREQUENCY
366 .globl __secondary_boot_code_size
367 .type __secondary_boot_code_size, %object
368 /* Secondary Boot Code ends here */
369 __secondary_boot_code_size:
370 .quad .-secondary_boot_code