1 #ifndef __ASM_ARM_SYSTEM_H
2 #define __ASM_ARM_SYSTEM_H
5 #include <linux/compiler.h>
10 * SCTLR_EL1/SCTLR_EL2/SCTLR_EL3 bits definitions
12 #define CR_M (1 << 0) /* MMU enable */
13 #define CR_A (1 << 1) /* Alignment abort enable */
14 #define CR_C (1 << 2) /* Dcache enable */
15 #define CR_SA (1 << 3) /* Stack Alignment Check Enable */
16 #define CR_I (1 << 12) /* Icache enable */
17 #define CR_WXN (1 << 19) /* Write Permision Imply XN */
18 #define CR_EE (1 << 25) /* Exception (Big) Endian */
20 #ifndef CONFIG_SYS_FULL_VA
21 #define PGTABLE_SIZE (0x10000)
23 #define PGTABLE_SIZE CONFIG_SYS_PGTABLE_SIZE
27 #define MMU_SECTION_SHIFT 21
28 #define MMU_SECTION_SIZE (1 << MMU_SECTION_SHIFT)
38 "isb" : : : "memory"); \
43 "wfi" : : : "memory"); \
46 static inline unsigned int current_el(void)
49 asm volatile("mrs %0, CurrentEL" : "=r" (el) : : "cc");
53 static inline unsigned int get_sctlr(void)
59 asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc");
61 asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc");
63 asm volatile("mrs %0, sctlr_el3" : "=r" (val) : : "cc");
68 static inline void set_sctlr(unsigned int val)
74 asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc");
76 asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc");
78 asm volatile("msr sctlr_el3, %0" : : "r" (val) : "cc");
83 static inline unsigned long read_mpidr(void)
87 asm volatile("mrs %0, mpidr_el1" : "=r" (val));
94 void __asm_flush_dcache_all(void);
95 void __asm_invalidate_dcache_all(void);
96 void __asm_flush_dcache_range(u64 start, u64 end);
97 void __asm_invalidate_tlb_all(void);
98 void __asm_invalidate_icache_all(void);
99 int __asm_flush_l3_cache(void);
101 void armv8_switch_to_el2(void);
102 void armv8_switch_to_el1(void);
104 void gic_send_sgi(unsigned long sgino);
105 void wait_for_wakeup(void);
106 void protect_secure_region(void);
107 void smp_kick_all_cpus(void);
109 void flush_l3_cache(void);
112 *Issue a hypervisor call in accordance with ARM "SMC Calling convention",
115 * @args: input and output arguments
118 void hvc_call(struct pt_regs *args);
121 *Issue a secure monitor call in accordance with ARM "SMC Calling convention",
124 * @args: input and output arguments
127 void smc_call(struct pt_regs *args);
129 #endif /* __ASSEMBLY__ */
131 #else /* CONFIG_ARM64 */
135 #define CPU_ARCH_UNKNOWN 0
136 #define CPU_ARCH_ARMv3 1
137 #define CPU_ARCH_ARMv4 2
138 #define CPU_ARCH_ARMv4T 3
139 #define CPU_ARCH_ARMv5 4
140 #define CPU_ARCH_ARMv5T 5
141 #define CPU_ARCH_ARMv5TE 6
142 #define CPU_ARCH_ARMv5TEJ 7
143 #define CPU_ARCH_ARMv6 8
144 #define CPU_ARCH_ARMv7 9
147 * CR1 bits (CP#15 CR1)
149 #define CR_M (1 << 0) /* MMU enable */
150 #define CR_A (1 << 1) /* Alignment abort enable */
151 #define CR_C (1 << 2) /* Dcache enable */
152 #define CR_W (1 << 3) /* Write buffer enable */
153 #define CR_P (1 << 4) /* 32-bit exception handler */
154 #define CR_D (1 << 5) /* 32-bit data address range */
155 #define CR_L (1 << 6) /* Implementation defined */
156 #define CR_B (1 << 7) /* Big endian */
157 #define CR_S (1 << 8) /* System MMU protection */
158 #define CR_R (1 << 9) /* ROM MMU protection */
159 #define CR_F (1 << 10) /* Implementation defined */
160 #define CR_Z (1 << 11) /* Implementation defined */
161 #define CR_I (1 << 12) /* Icache enable */
162 #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
163 #define CR_RR (1 << 14) /* Round Robin cache replacement */
164 #define CR_L4 (1 << 15) /* LDR pc can set T bit */
165 #define CR_DT (1 << 16)
166 #define CR_IT (1 << 18)
167 #define CR_ST (1 << 19)
168 #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
169 #define CR_U (1 << 22) /* Unaligned access operation */
170 #define CR_XP (1 << 23) /* Extended page tables */
171 #define CR_VE (1 << 24) /* Vectored interrupts */
172 #define CR_EE (1 << 25) /* Exception (Big) Endian */
173 #define CR_TRE (1 << 28) /* TEX remap enable */
174 #define CR_AFE (1 << 29) /* Access flag enable */
175 #define CR_TE (1 << 30) /* Thumb exception enable */
178 #define PGTABLE_SIZE (4096 * 4)
182 * This is used to ensure the compiler did actually allocate the register we
183 * asked it for some inline assembly sequences. Apparently we can't trust
184 * the compiler from one version to another so a bit of paranoia won't hurt.
185 * This string is meant to be concatenated with the inline asm string and
186 * will cause compilation to stop on mismatch.
187 * (for details, see gcc PR 15089)
189 #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
194 * save_boot_params() - Save boot parameters before starting reset sequence
196 * If you provide this function it will be called immediately U-Boot starts,
197 * both for SPL and U-Boot proper.
199 * All registers are unchanged from U-Boot entry. No registers need be
202 * This is not a normal C function. There is no stack. Return by branching to
203 * save_boot_params_ret.
205 * void save_boot_params(u32 r0, u32 r1, u32 r2, u32 r3);
209 * save_boot_params_ret() - Return from save_boot_params()
211 * If you provide save_boot_params(), then you should jump back to this
212 * function when done. Try to preserve all registers.
214 * If your implementation of save_boot_params() is in C then it is acceptable
215 * to simply call save_boot_params_ret() at the end of your function. Since
216 * there is no link register set up, you cannot just exit the function. U-Boot
217 * will return to the (initialised) value of lr, and likely crash/hang.
219 * If your implementation of save_boot_params() is in assembler then you
220 * should use 'b' or 'bx' to return to save_boot_params_ret.
222 void save_boot_params_ret(void);
224 #define isb() __asm__ __volatile__ ("" : : : "memory")
226 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
228 #ifdef __ARM_ARCH_7A__
229 #define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
234 static inline unsigned int get_cr(void)
237 asm volatile("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
241 static inline void set_cr(unsigned int val)
243 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
244 : : "r" (val) : "cc");
248 static inline unsigned int get_dacr(void)
251 asm("mrc p15, 0, %0, c3, c0, 0 @ get DACR" : "=r" (val) : : "cc");
255 static inline void set_dacr(unsigned int val)
257 asm volatile("mcr p15, 0, %0, c3, c0, 0 @ set DACR"
258 : : "r" (val) : "cc");
263 /* Short-Descriptor Translation Table Level 1 Bits */
264 #define TTB_SECT_NS_MASK (1 << 19)
265 #define TTB_SECT_NG_MASK (1 << 17)
266 #define TTB_SECT_S_MASK (1 << 16)
267 /* Note: TTB AP bits are set elsewhere */
268 #define TTB_SECT_TEX(x) ((x & 0x7) << 12)
269 #define TTB_SECT_DOMAIN(x) ((x & 0xf) << 5)
270 #define TTB_SECT_XN_MASK (1 << 4)
271 #define TTB_SECT_C_MASK (1 << 3)
272 #define TTB_SECT_B_MASK (1 << 2)
273 #define TTB_SECT (2 << 0)
275 /* options available for data cache on each page */
277 DCACHE_OFF = TTB_SECT_S_MASK | TTB_SECT_DOMAIN(0) |
278 TTB_SECT_XN_MASK | TTB_SECT,
279 DCACHE_WRITETHROUGH = DCACHE_OFF | TTB_SECT_C_MASK,
280 DCACHE_WRITEBACK = DCACHE_WRITETHROUGH | TTB_SECT_B_MASK,
281 DCACHE_WRITEALLOC = DCACHE_WRITEBACK | TTB_SECT_TEX(1),
284 /* options available for data cache on each page */
287 DCACHE_WRITETHROUGH = 0x1a,
288 DCACHE_WRITEBACK = 0x1e,
289 DCACHE_WRITEALLOC = 0x16,
293 /* Size of an MMU section */
295 MMU_SECTION_SHIFT = 20,
296 MMU_SECTION_SIZE = 1 << MMU_SECTION_SHIFT,
301 #define TTBR0_BASE_ADDR_MASK 0xFFFFC000
302 #define TTBR0_RGN_NC (0 << 3)
303 #define TTBR0_RGN_WBWA (1 << 3)
304 #define TTBR0_RGN_WT (2 << 3)
305 #define TTBR0_RGN_WB (3 << 3)
306 /* TTBR0[6] is IRGN[0] and TTBR[0] is IRGN[1] */
307 #define TTBR0_IRGN_NC (0 << 0 | 0 << 6)
308 #define TTBR0_IRGN_WBWA (0 << 0 | 1 << 6)
309 #define TTBR0_IRGN_WT (1 << 0 | 0 << 6)
310 #define TTBR0_IRGN_WB (1 << 0 | 1 << 6)
314 * Register an update to the page tables, and flush the TLB
316 * \param start start address of update in page table
317 * \param stop stop address of update in page table
319 void mmu_page_table_flush(unsigned long start, unsigned long stop);
321 #endif /* __ASSEMBLY__ */
323 #define arch_align_stack(x) (x)
325 #endif /* __KERNEL__ */
327 #endif /* CONFIG_ARM64 */
331 * Change the cache settings for a region.
333 * \param start start address of memory region to change
334 * \param size size of memory region to change
335 * \param option dcache option to select
337 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
338 enum dcache_option option);
340 #ifdef CONFIG_SYS_NONCACHED_MEMORY
341 void noncached_init(void);
342 phys_addr_t noncached_alloc(size_t size, size_t align);
343 #endif /* CONFIG_SYS_NONCACHED_MEMORY */
345 #endif /* __ASSEMBLY__ */