]> git.sur5r.net Git - u-boot/commitdiff
armv8: aarch64: Fix the warning about x1-x3 nonzero issue
authorAlison Wang <b18965@freescale.com>
Tue, 17 Jan 2017 01:39:17 +0000 (09:39 +0800)
committerYork Sun <york.sun@nxp.com>
Wed, 18 Jan 2017 17:29:33 +0000 (09:29 -0800)
For 64-bit kernel, there is a warning about x1-x3 nonzero in violation
of boot protocol. To fix this issue, input argument 4 is added for
armv8_switch_to_el2 and armv8_switch_to_el1. The input argument 4 will
be set to the right value, such as zero.

Signed-off-by: Alison Wang <alison.wang@nxp.com>
Reviewed-by: Alexander Graf <agraf@suse.de>
Tested-by: Ryan Harkin <ryan.harkin@linaro.org>
Tested-by: Michal Simek <michal.simek@xilinx.com>
Reviewed-by: York Sun <york.sun@nxp.com>
arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S
arch/arm/cpu/armv8/sec_firmware_asm.S
arch/arm/cpu/armv8/start.S
arch/arm/cpu/armv8/transition.S
arch/arm/include/asm/system.h
arch/arm/lib/bootm.c
arch/arm/mach-rmobile/lowlevel_init_gen3.S
cmd/bootefi.c

index 62efa9097e5a29650473ec71e5b47fd5b46b5b28..a2185f2def23dce8fa508baf33c010aefdc85549 100644 (file)
@@ -486,29 +486,29 @@ cpu_is_le:
        b.eq    1f
 
 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
-       adr     x3, secondary_switch_to_el1
-       ldr     x4, =ES_TO_AARCH64
+       adr     x4, secondary_switch_to_el1
+       ldr     x5, =ES_TO_AARCH64
 #else
-       ldr     x3, [x11]
-       ldr     x4, =ES_TO_AARCH32
+       ldr     x4, [x11]
+       ldr     x5, =ES_TO_AARCH32
 #endif
        bl      secondary_switch_to_el2
 
 1:
 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
-       adr     x3, secondary_switch_to_el1
+       adr     x4, secondary_switch_to_el1
 #else
-       ldr     x3, [x11]
+       ldr     x4, [x11]
 #endif
-       ldr     x4, =ES_TO_AARCH64
+       ldr     x5, =ES_TO_AARCH64
        bl      secondary_switch_to_el2
 
 ENDPROC(secondary_boot_func)
 
 ENTRY(secondary_switch_to_el2)
-       switch_el x5, 1f, 0f, 0f
+       switch_el x6, 1f, 0f, 0f
 0:     ret
-1:     armv8_switch_to_el2_m x3, x4, x5
+1:     armv8_switch_to_el2_m x4, x5, x6
 ENDPROC(secondary_switch_to_el2)
 
 ENTRY(secondary_switch_to_el1)
@@ -522,22 +522,22 @@ ENTRY(secondary_switch_to_el1)
        /* physical address of this cpus spin table element */
        add     x11, x1, x0
 
-       ldr     x3, [x11]
+       ldr     x4, [x11]
 
        ldr     x5, [x11, #24]
        ldr     x6, =IH_ARCH_DEFAULT
        cmp     x6, x5
        b.eq    2f
 
-       ldr     x4, =ES_TO_AARCH32
+       ldr     x5, =ES_TO_AARCH32
        bl      switch_to_el1
 
-2:     ldr     x4, =ES_TO_AARCH64
+2:     ldr     x5, =ES_TO_AARCH64
 
 switch_to_el1:
-       switch_el x5, 0f, 1f, 0f
+       switch_el x6, 0f, 1f, 0f
 0:     ret
-1:     armv8_switch_to_el1_m x3, x4, x5
+1:     armv8_switch_to_el1_m x4, x5, x6
 ENDPROC(secondary_switch_to_el1)
 
        /* Ensure that the literals used by the secondary boot code are
index 903195dbce3f9599fa6c01db9c1773dbbade92c9..5ed3677f5554abb8f09953d0af09540e78f7ef79 100644 (file)
@@ -57,7 +57,8 @@ ENDPROC(_sec_firmware_support_psci_version)
  * x0: argument, zero
  * x1: machine nr
  * x2: fdt address
- * x3: kernel entry point
+ * x3: input argument
+ * x4: kernel entry point
  * @param outputs for secure firmware:
  * x0: function id
  * x1: kernel entry point
@@ -65,10 +66,9 @@ ENDPROC(_sec_firmware_support_psci_version)
  * x3: fdt address
 */
 ENTRY(armv8_el2_to_aarch32)
-       mov     x0, x3
        mov     x3, x2
        mov     x2, x1
-       mov     x1, x0
+       mov     x1, x4
        ldr     x0, =0xc000ff04
        smc     #0
        ret
index 530870278c33c5eaac01dfb5332e527eb4736224..368e3dc02882bedeea6cfbd934a3b39bcae5eecc 100644 (file)
@@ -262,14 +262,14 @@ WEAK(lowlevel_init)
        /*
         * All slaves will enter EL2 and optionally EL1.
         */
-       adr     x3, lowlevel_in_el2
-       ldr     x4, =ES_TO_AARCH64
+       adr     x4, lowlevel_in_el2
+       ldr     x5, =ES_TO_AARCH64
        bl      armv8_switch_to_el2
 
 lowlevel_in_el2:
 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
-       adr     x3, lowlevel_in_el1
-       ldr     x4, =ES_TO_AARCH64
+       adr     x4, lowlevel_in_el1
+       ldr     x5, =ES_TO_AARCH64
        bl      armv8_switch_to_el1
 
 lowlevel_in_el1:
index adb9f3566bfc3f3fba690b4692400997a2b14468..ca074653769eb2e634d74a70e93a3a76f93624c0 100644 (file)
@@ -11,9 +11,9 @@
 #include <asm/macro.h>
 
 ENTRY(armv8_switch_to_el2)
-       switch_el x5, 1f, 0f, 0f
+       switch_el x6, 1f, 0f, 0f
 0:
-       cmp x4, #ES_TO_AARCH64
+       cmp x5, #ES_TO_AARCH64
        b.eq 2f
        /*
         * When loading 32-bit kernel, it will jump
@@ -22,23 +22,23 @@ ENTRY(armv8_switch_to_el2)
        bl armv8_el2_to_aarch32
 2:
        /*
-        * x3 is kernel entry point or switch_to_el1
+        * x4 is kernel entry point or switch_to_el1
         * if CONFIG_ARMV8_SWITCH_TO_EL1 is defined.
          * When running in EL2 now, jump to the
-        * address saved in x3.
+        * address saved in x4.
         */
-       br x3
-1:     armv8_switch_to_el2_m x3, x4, x5
+       br x4
+1:     armv8_switch_to_el2_m x4, x5, x6
 ENDPROC(armv8_switch_to_el2)
 
 ENTRY(armv8_switch_to_el1)
-       switch_el x5, 0f, 1f, 0f
+       switch_el x6, 0f, 1f, 0f
 0:
-       /* x3 is kernel entry point. When running in EL1
-        * now, jump to the address saved in x3.
+       /* x4 is kernel entry point. When running in EL1
+        * now, jump to the address saved in x4.
         */
-       br x3
-1:     armv8_switch_to_el1_m x3, x4, x5
+       br x4
+1:     armv8_switch_to_el1_m x4, x5, x6
 ENDPROC(armv8_switch_to_el1)
 
 WEAK(armv8_el2_to_aarch32)
index dc4c9914d7b77cbcce1d44b5b235425d88624920..766e929d462c9b6102896618882fdd5957f71ae9 100644 (file)
@@ -196,11 +196,12 @@ void __asm_switch_ttbr(u64 new_ttbr);
  *               For loading 32-bit OS, machine nr
  * @fdt_addr:    For loading 64-bit OS, zero.
  *               For loading 32-bit OS, fdt address.
+ * @arg4:       Input argument.
  * @entry_point: kernel entry point
  * @es_flag:     execution state flag, ES_TO_AARCH64 or ES_TO_AARCH32
  */
 void armv8_switch_to_el2(u64 args, u64 mach_nr, u64 fdt_addr,
-                        u64 entry_point, u64 es_flag);
+                        u64 arg4, u64 entry_point, u64 es_flag);
 /*
  * Switch from EL2 to EL1 for ARMv8
  *
@@ -210,13 +211,14 @@ void armv8_switch_to_el2(u64 args, u64 mach_nr, u64 fdt_addr,
  *               For loading 32-bit OS, machine nr
  * @fdt_addr:    For loading 64-bit OS, zero.
  *               For loading 32-bit OS, fdt address.
+ * @arg4:       Input argument.
  * @entry_point: kernel entry point
  * @es_flag:     execution state flag, ES_TO_AARCH64 or ES_TO_AARCH32
  */
 void armv8_switch_to_el1(u64 args, u64 mach_nr, u64 fdt_addr,
-                        u64 entry_point, u64 es_flag);
+                        u64 arg4, u64 entry_point, u64 es_flag);
 void armv8_el2_to_aarch32(u64 args, u64 mach_nr, u64 fdt_addr,
-                         u64 entry_point);
+                         u64 arg4, u64 entry_point);
 void gic_init(void);
 void gic_send_sgi(unsigned long sgino);
 void wait_for_wakeup(void);
index 43cc83ec95b6f845ca76d7bcd422f4e5e297de41..8125cf023f5ee96ded00a2e6c65a2e988348d0be 100644 (file)
@@ -287,11 +287,11 @@ static void switch_to_el1(void)
        if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
            (images.os.arch == IH_ARCH_ARM))
                armv8_switch_to_el1(0, (u64)gd->bd->bi_arch_number,
-                                   (u64)images.ft_addr,
+                                   (u64)images.ft_addr, 0,
                                    (u64)images.ep,
                                    ES_TO_AARCH32);
        else
-               armv8_switch_to_el1((u64)images.ft_addr, 0, 0,
+               armv8_switch_to_el1((u64)images.ft_addr, 0, 0, 0,
                                    images.ep,
                                    ES_TO_AARCH64);
 }
@@ -324,17 +324,17 @@ static void boot_jump_linux(bootm_headers_t *images, int flag)
                update_os_arch_secondary_cores(images->os.arch);
 
 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
-               armv8_switch_to_el2((u64)images->ft_addr, 0, 0,
+               armv8_switch_to_el2((u64)images->ft_addr, 0, 0, 0,
                                    (u64)switch_to_el1, ES_TO_AARCH64);
 #else
                if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
                    (images->os.arch == IH_ARCH_ARM))
                        armv8_switch_to_el2(0, (u64)gd->bd->bi_arch_number,
-                                           (u64)images->ft_addr,
+                                           (u64)images->ft_addr, 0,
                                            (u64)images->ep,
                                            ES_TO_AARCH32);
                else
-                       armv8_switch_to_el2((u64)images->ft_addr, 0, 0,
+                       armv8_switch_to_el2((u64)images->ft_addr, 0, 0, 0,
                                            images->ep,
                                            ES_TO_AARCH64);
 #endif
index 11acce0395707156abb4277f1cafbb1e09f990fa..ce3d4f5c5211f9417ca3835d151479a01ad7dee1 100644 (file)
@@ -61,14 +61,14 @@ ENTRY(lowlevel_init)
        /*
         * All slaves will enter EL2 and optionally EL1.
         */
-       adr     x3, lowlevel_in_el2
-       ldr     x4, =ES_TO_AARCH64
+       adr     x4, lowlevel_in_el2
+       ldr     x5, =ES_TO_AARCH64
        bl      armv8_switch_to_el2
 
 lowlevel_in_el2:
 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
-       adr     x3, lowlevel_in_el1
-       ldr     x4, =ES_TO_AARCH64
+       adr     x4, lowlevel_in_el1
+       ldr     x5, =ES_TO_AARCH64
        bl      armv8_switch_to_el1
 
 lowlevel_in_el1:
index 97a0fc9c7ca3c638c8ed85674d153930235e8856..06943a97026c7754e8402f85d4bc404af9c80d7f 100644 (file)
@@ -246,7 +246,7 @@ static unsigned long do_bootefi_exec(void *efi, void *fdt)
 
                /* Move into EL2 and keep running there */
                armv8_switch_to_el2((ulong)entry, (ulong)&loaded_image_info,
-                                   (ulong)&systab, (ulong)efi_run_in_el2,
+                                   (ulong)&systab, 0, (ulong)efi_run_in_el2,
                                    ES_TO_AARCH64);
 
                /* Should never reach here, efi exits with longjmp */