config ARMV8_MULTIENTRY
         boolean "Enable multiple CPUs to enter into U-Boot"
 
+config ARMV8_SPIN_TABLE
+       bool "Support spin-table enable method"
+       depends on ARMV8_MULTIENTRY && OF_LIBFDT
+       help
+         Say Y here to support "spin-table" enable method for booting Linux.
+
+         To use this feature, you must do:
+           - Specify enable-method = "spin-table" in each CPU node in the
+             Device Tree you are using to boot the kernel
+           - Let secondary CPUs in U-Boot (in a board specific manner)
+             before the master CPU jumps to the kernel
+
+         U-Boot automatically does:
+           - Set "cpu-release-addr" property of each CPU node
+             (overwrites it if already exists).
+           - Reserve the code for the spin-table and the release address
+             via a /memreserve/ region in the Device Tree.
+
 endif
 
 obj-y  += tlb.o
 obj-y  += transition.o
 obj-y  += fwcall.o
+ifndef CONFIG_SPL_BUILD
+obj-$(CONFIG_ARMV8_SPIN_TABLE) += spin_table.o spin_table_v8.o
+endif
 
 obj-$(CONFIG_FSL_LAYERSCAPE) += fsl-layerscape/
 obj-$(CONFIG_S32V234) += s32v234/
 
--- /dev/null
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * SPDX-License-Identifier:    GPL-2.0+
+ */
+
+#include <common.h>
+#include <libfdt.h>
+#include <asm/spin_table.h>
+
+int spin_table_update_dt(void *fdt)
+{
+       int cpus_offset, offset;
+       const char *prop;
+       int ret;
+       unsigned long rsv_addr = (unsigned long)&spin_table_reserve_begin;
+       unsigned long rsv_size = &spin_table_reserve_end -
+                                               &spin_table_reserve_begin;
+
+       cpus_offset = fdt_path_offset(fdt, "/cpus");
+       if (cpus_offset < 0)
+               return -ENODEV;
+
+       for (offset = fdt_first_subnode(fdt, cpus_offset);
+            offset >= 0;
+            offset = fdt_next_subnode(fdt, offset)) {
+               prop = fdt_getprop(fdt, offset, "device_type", NULL);
+               if (!prop || strcmp(prop, "cpu"))
+                       continue;
+
+               /*
+                * In the first loop, we check if every CPU node specifies
+                * spin-table.  Otherwise, just return successfully to not
+                * disturb other methods, like psci.
+                */
+               prop = fdt_getprop(fdt, offset, "enable-method", NULL);
+               if (!prop || strcmp(prop, "spin-table"))
+                       return 0;
+       }
+
+       for (offset = fdt_first_subnode(fdt, cpus_offset);
+            offset >= 0;
+            offset = fdt_next_subnode(fdt, offset)) {
+               prop = fdt_getprop(fdt, offset, "device_type", NULL);
+               if (!prop || strcmp(prop, "cpu"))
+                       continue;
+
+               ret = fdt_setprop_u64(fdt, offset, "cpu-release-addr",
+                               (unsigned long)&spin_table_cpu_release_addr);
+               if (ret)
+                       return -ENOSPC;
+       }
+
+       ret = fdt_add_mem_rsv(fdt, rsv_addr, rsv_size);
+       if (ret)
+               return -ENOSPC;
+
+       printf("   Reserved memory region for spin-table: addr=%lx size=%lx\n",
+              rsv_addr, rsv_size);
+
+       return 0;
+}
 
--- /dev/null
+/*
+ * Copyright (C) 2016 Socionext Inc.
+ *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * SPDX-License-Identifier:    GPL-2.0+
+ */
+
+#include <linux/linkage.h>
+
+ENTRY(spin_table_secondary_jump)
+.globl spin_table_reserve_begin
+spin_table_reserve_begin:
+0:     wfe
+       ldr     x0, spin_table_cpu_release_addr
+       cbz     x0, 0b
+       br      x0
+.globl spin_table_cpu_release_addr
+       .align  3
+spin_table_cpu_release_addr:
+       .quad   0
+.globl spin_table_reserve_end
+spin_table_reserve_end:
+ENDPROC(spin_table_secondary_jump)
 
        /* Processor specific initialization */
        bl      lowlevel_init
 
-#ifdef CONFIG_ARMV8_MULTIENTRY
+#if CONFIG_IS_ENABLED(ARMV8_SPIN_TABLE)
+       branch_if_master x0, x1, master_cpu
+       b       spin_table_secondary_jump
+       /* never return */
+#elif defined(CONFIG_ARMV8_MULTIENTRY)
        branch_if_master x0, x1, master_cpu
 
        /*
        ldr     x0, [x1]
        cbz     x0, slave_cpu
        br      x0                      /* branch to the given address */
-master_cpu:
-       /* On the master CPU */
 #endif /* CONFIG_ARMV8_MULTIENTRY */
-
+master_cpu:
        bl      _main
 
 #ifdef CONFIG_SYS_RESET_SCTRL
 
--- /dev/null
+/*
+ * SPDX-License-Identifier:    GPL-2.0+
+ */
+
+#ifndef __ASM_SPIN_TABLE_H__
+#define __ASM_SPIN_TABLE_H__
+
+extern u64 spin_table_cpu_release_addr;
+extern char spin_table_reserve_begin;
+extern char spin_table_reserve_end;
+
+int spin_table_update_dt(void *fdt);
+
+#endif /* __ASM_SPIN_TABLE_H__ */
 
 #include <asm/armv7.h>
 #endif
 #include <asm/psci.h>
+#include <asm/spin_table.h>
 
 DECLARE_GLOBAL_DATA_PTR;
 
        if (ret)
                return ret;
 
+#ifdef CONFIG_ARMV8_SPIN_TABLE
+       ret = spin_table_update_dt(blob);
+       if (ret)
+               return ret;
+#endif
+
 #ifdef CONFIG_ARMV7_NONSEC
        ret = psci_update_dt(blob);
        if (ret)