X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=arch%2Fx86%2Fcpu%2Fcpu.c;h=1707993409875e3ea8c9496c7e4c23b8b47cd53c;hb=80af39842e64a44258ab5eb913659e29fc319903;hp=2e252532d61363b4501c76f327193e8f877aeb6f;hpb=2db8c2d61a05bb4a94bb341329c6de811757c111;p=u-boot diff --git a/arch/x86/cpu/cpu.c b/arch/x86/cpu/cpu.c index 2e252532d6..1707993409 100644 --- a/arch/x86/cpu/cpu.c +++ b/arch/x86/cpu/cpu.c @@ -13,20 +13,32 @@ * Sysgo Real-Time Solutions, GmbH * Alex Zuepke * + * Part of this file is adapted from coreboot + * src/arch/x86/lib/cpu.c + * * SPDX-License-Identifier: GPL-2.0+ */ #include #include +#include #include #include #include #include +#include +#include +#include +#include +#include #include #include #include +#include #include +DECLARE_GLOBAL_DATA_PTR; + /* * Constructor for a conventional segment GDT (or LDT) entry * This is a macro so it can be used in initialisers @@ -43,6 +55,52 @@ struct gdt_ptr { u32 ptr; } __packed; +struct cpu_device_id { + unsigned vendor; + unsigned device; +}; + +struct cpuinfo_x86 { + uint8_t x86; /* CPU family */ + uint8_t x86_vendor; /* CPU vendor */ + uint8_t x86_model; + uint8_t x86_mask; +}; + +/* + * List of cpu vendor strings along with their normalized + * id values. + */ +static struct { + int vendor; + const char *name; +} x86_vendors[] = { + { X86_VENDOR_INTEL, "GenuineIntel", }, + { X86_VENDOR_CYRIX, "CyrixInstead", }, + { X86_VENDOR_AMD, "AuthenticAMD", }, + { X86_VENDOR_UMC, "UMC UMC UMC ", }, + { X86_VENDOR_NEXGEN, "NexGenDriven", }, + { X86_VENDOR_CENTAUR, "CentaurHauls", }, + { X86_VENDOR_RISE, "RiseRiseRise", }, + { X86_VENDOR_TRANSMETA, "GenuineTMx86", }, + { X86_VENDOR_TRANSMETA, "TransmetaCPU", }, + { X86_VENDOR_NSC, "Geode by NSC", }, + { X86_VENDOR_SIS, "SiS SiS SiS ", }, +}; + +static const char *const x86_vendor_name[] = { + [X86_VENDOR_INTEL] = "Intel", + [X86_VENDOR_CYRIX] = "Cyrix", + [X86_VENDOR_AMD] = "AMD", + [X86_VENDOR_UMC] = "UMC", + [X86_VENDOR_NEXGEN] = "NexGen", + [X86_VENDOR_CENTAUR] = "Centaur", + [X86_VENDOR_RISE] = "Rise", + [X86_VENDOR_TRANSMETA] = "Transmeta", + [X86_VENDOR_NSC] = "NSC", + [X86_VENDOR_SIS] = "SiS", +}; + static void load_ds(u32 segment) { asm volatile("movl %0, %%ds" : : "r" (segment * X86_GDT_ENTRY_SIZE)); @@ -72,30 +130,42 @@ static void load_gdt(const u64 *boot_gdt, u16 num_entries) { struct gdt_ptr gdt; - gdt.len = (num_entries * 8) - 1; + gdt.len = (num_entries * X86_GDT_ENTRY_SIZE) - 1; gdt.ptr = (u32)boot_gdt; asm volatile("lgdtl %0\n" : : "m" (gdt)); } -void setup_gdt(gd_t *id, u64 *gdt_addr) +void arch_setup_gd(gd_t *new_gd) { - /* CS: code, read/execute, 4 GB, base 0 */ + u64 *gdt_addr; + + gdt_addr = new_gd->arch.gdt; + + /* + * CS: code, read/execute, 4 GB, base 0 + * + * Some OS (like VxWorks) requires GDT entry 1 to be the 32-bit CS + */ + gdt_addr[X86_GDT_ENTRY_UNUSED] = GDT_ENTRY(0xc09b, 0, 0xfffff); gdt_addr[X86_GDT_ENTRY_32BIT_CS] = GDT_ENTRY(0xc09b, 0, 0xfffff); /* DS: data, read/write, 4 GB, base 0 */ gdt_addr[X86_GDT_ENTRY_32BIT_DS] = GDT_ENTRY(0xc093, 0, 0xfffff); /* FS: data, read/write, 4 GB, base (Global Data Pointer) */ - id->arch.gd_addr = id; + new_gd->arch.gd_addr = new_gd; gdt_addr[X86_GDT_ENTRY_32BIT_FS] = GDT_ENTRY(0xc093, - (ulong)&id->arch.gd_addr, 0xfffff); + (ulong)&new_gd->arch.gd_addr, 0xfffff); /* 16-bit CS: code, read/execute, 64 kB, base 0 */ - gdt_addr[X86_GDT_ENTRY_16BIT_CS] = GDT_ENTRY(0x109b, 0, 0x0ffff); + gdt_addr[X86_GDT_ENTRY_16BIT_CS] = GDT_ENTRY(0x009b, 0, 0x0ffff); /* 16-bit DS: data, read/write, 64 kB, base 0 */ - gdt_addr[X86_GDT_ENTRY_16BIT_DS] = GDT_ENTRY(0x1093, 0, 0x0ffff); + gdt_addr[X86_GDT_ENTRY_16BIT_DS] = GDT_ENTRY(0x0093, 0, 0x0ffff); + + gdt_addr[X86_GDT_ENTRY_16BIT_FLAT_CS] = GDT_ENTRY(0x809b, 0, 0xfffff); + gdt_addr[X86_GDT_ENTRY_16BIT_FLAT_DS] = GDT_ENTRY(0x8093, 0, 0xfffff); load_gdt(gdt_addr, X86_GDT_NUM_ENTRIES); load_ds(X86_GDT_ENTRY_32BIT_DS); @@ -105,40 +175,232 @@ void setup_gdt(gd_t *id, u64 *gdt_addr) load_fs(X86_GDT_ENTRY_32BIT_FS); } +#ifdef CONFIG_HAVE_FSP +/* + * Setup FSP execution environment GDT + * + * Per Intel FSP external architecture specification, before calling any FSP + * APIs, we need make sure the system is in flat 32-bit mode and both the code + * and data selectors should have full 4GB access range. Here we reuse the one + * we used in arch/x86/cpu/start16.S, and reload the segement registers. + */ +void setup_fsp_gdt(void) +{ + load_gdt((const u64 *)(gdt_rom + CONFIG_RESET_SEG_START), 4); + load_ds(X86_GDT_ENTRY_32BIT_DS); + load_ss(X86_GDT_ENTRY_32BIT_DS); + load_es(X86_GDT_ENTRY_32BIT_DS); + load_fs(X86_GDT_ENTRY_32BIT_DS); + load_gs(X86_GDT_ENTRY_32BIT_DS); +} +#endif + int __weak x86_cleanup_before_linux(void) { #ifdef CONFIG_BOOTSTAGE_STASH - bootstage_stash((void *)CONFIG_BOOTSTAGE_STASH, + bootstage_stash((void *)CONFIG_BOOTSTAGE_STASH_ADDR, CONFIG_BOOTSTAGE_STASH_SIZE); #endif return 0; } +/* + * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected + * by the fact that they preserve the flags across the division of 5/2. + * PII and PPro exhibit this behavior too, but they have cpuid available. + */ + +/* + * Perform the Cyrix 5/2 test. A Cyrix won't change + * the flags, while other 486 chips will. + */ +static inline int test_cyrix_52div(void) +{ + unsigned int test; + + __asm__ __volatile__( + "sahf\n\t" /* clear flags (%eax = 0x0005) */ + "div %b2\n\t" /* divide 5 by 2 */ + "lahf" /* store flags into %ah */ + : "=a" (test) + : "0" (5), "q" (2) + : "cc"); + + /* AH is 0x02 on Cyrix after the divide.. */ + return (unsigned char) (test >> 8) == 0x02; +} + +/* + * Detect a NexGen CPU running without BIOS hypercode new enough + * to have CPUID. (Thanks to Herbert Oppmann) + */ + +static int deep_magic_nexgen_probe(void) +{ + int ret; + + __asm__ __volatile__ ( + " movw $0x5555, %%ax\n" + " xorw %%dx,%%dx\n" + " movw $2, %%cx\n" + " divw %%cx\n" + " movl $0, %%eax\n" + " jnz 1f\n" + " movl $1, %%eax\n" + "1:\n" + : "=a" (ret) : : "cx", "dx"); + return ret; +} + +static bool has_cpuid(void) +{ + return flag_is_changeable_p(X86_EFLAGS_ID); +} + +static bool has_mtrr(void) +{ + return cpuid_edx(0x00000001) & (1 << 12) ? true : false; +} + +static int build_vendor_name(char *vendor_name) +{ + struct cpuid_result result; + result = cpuid(0x00000000); + unsigned int *name_as_ints = (unsigned int *)vendor_name; + + name_as_ints[0] = result.ebx; + name_as_ints[1] = result.edx; + name_as_ints[2] = result.ecx; + + return result.eax; +} + +static void identify_cpu(struct cpu_device_id *cpu) +{ + char vendor_name[16]; + int i; + + vendor_name[0] = '\0'; /* Unset */ + cpu->device = 0; /* fix gcc 4.4.4 warning */ + + /* Find the id and vendor_name */ + if (!has_cpuid()) { + /* Its a 486 if we can modify the AC flag */ + if (flag_is_changeable_p(X86_EFLAGS_AC)) + cpu->device = 0x00000400; /* 486 */ + else + cpu->device = 0x00000300; /* 386 */ + if ((cpu->device == 0x00000400) && test_cyrix_52div()) { + memcpy(vendor_name, "CyrixInstead", 13); + /* If we ever care we can enable cpuid here */ + } + /* Detect NexGen with old hypercode */ + else if (deep_magic_nexgen_probe()) + memcpy(vendor_name, "NexGenDriven", 13); + } + if (has_cpuid()) { + int cpuid_level; + + cpuid_level = build_vendor_name(vendor_name); + vendor_name[12] = '\0'; + + /* Intel-defined flags: level 0x00000001 */ + if (cpuid_level >= 0x00000001) { + cpu->device = cpuid_eax(0x00000001); + } else { + /* Have CPUID level 0 only unheard of */ + cpu->device = 0x00000400; + } + } + cpu->vendor = X86_VENDOR_UNKNOWN; + for (i = 0; i < ARRAY_SIZE(x86_vendors); i++) { + if (memcmp(vendor_name, x86_vendors[i].name, 12) == 0) { + cpu->vendor = x86_vendors[i].vendor; + break; + } + } +} + +static inline void get_fms(struct cpuinfo_x86 *c, uint32_t tfms) +{ + c->x86 = (tfms >> 8) & 0xf; + c->x86_model = (tfms >> 4) & 0xf; + c->x86_mask = tfms & 0xf; + if (c->x86 == 0xf) + c->x86 += (tfms >> 20) & 0xff; + if (c->x86 >= 0x6) + c->x86_model += ((tfms >> 16) & 0xF) << 4; +} + int x86_cpu_init_f(void) { const u32 em_rst = ~X86_CR0_EM; const u32 mp_ne_set = X86_CR0_MP | X86_CR0_NE; - /* initialize FPU, reset EM, set MP and NE */ - asm ("fninit\n" \ - "movl %%cr0, %%eax\n" \ - "andl %0, %%eax\n" \ - "orl %1, %%eax\n" \ - "movl %%eax, %%cr0\n" \ - : : "i" (em_rst), "i" (mp_ne_set) : "eax"); + if (ll_boot_init()) { + /* initialize FPU, reset EM, set MP and NE */ + asm ("fninit\n" \ + "movl %%cr0, %%eax\n" \ + "andl %0, %%eax\n" \ + "orl %1, %%eax\n" \ + "movl %%eax, %%cr0\n" \ + : : "i" (em_rst), "i" (mp_ne_set) : "eax"); + } - return 0; -} -int cpu_init_f(void) __attribute__((weak, alias("x86_cpu_init_f"))); + /* identify CPU via cpuid and store the decoded info into gd->arch */ + if (has_cpuid()) { + struct cpu_device_id cpu; + struct cpuinfo_x86 c; + + identify_cpu(&cpu); + get_fms(&c, cpu.device); + gd->arch.x86 = c.x86; + gd->arch.x86_vendor = cpu.vendor; + gd->arch.x86_model = c.x86_model; + gd->arch.x86_mask = c.x86_mask; + gd->arch.x86_device = cpu.device; + + gd->arch.has_mtrr = has_mtrr(); + } + /* Don't allow PCI region 3 to use memory in the 2-4GB memory hole */ + gd->pci_ram_top = 0x80000000U; + + /* Configure fixed range MTRRs for some legacy regions */ + if (gd->arch.has_mtrr) { + u64 mtrr_cap; + + mtrr_cap = native_read_msr(MTRR_CAP_MSR); + if (mtrr_cap & MTRR_CAP_FIX) { + /* Mark the VGA RAM area as uncacheable */ + native_write_msr(MTRR_FIX_16K_A0000_MSR, + MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE), + MTRR_FIX_TYPE(MTRR_TYPE_UNCACHEABLE)); + + /* + * Mark the PCI ROM area as cacheable to improve ROM + * execution performance. + */ + native_write_msr(MTRR_FIX_4K_C0000_MSR, + MTRR_FIX_TYPE(MTRR_TYPE_WRBACK), + MTRR_FIX_TYPE(MTRR_TYPE_WRBACK)); + native_write_msr(MTRR_FIX_4K_C8000_MSR, + MTRR_FIX_TYPE(MTRR_TYPE_WRBACK), + MTRR_FIX_TYPE(MTRR_TYPE_WRBACK)); + native_write_msr(MTRR_FIX_4K_D0000_MSR, + MTRR_FIX_TYPE(MTRR_TYPE_WRBACK), + MTRR_FIX_TYPE(MTRR_TYPE_WRBACK)); + native_write_msr(MTRR_FIX_4K_D8000_MSR, + MTRR_FIX_TYPE(MTRR_TYPE_WRBACK), + MTRR_FIX_TYPE(MTRR_TYPE_WRBACK)); + + /* Enable the fixed range MTRRs */ + msr_setbits_64(MTRR_DEF_TYPE_MSR, MTRR_DEF_TYPE_FIX_EN); + } + } -int x86_cpu_init_r(void) -{ - /* Initialize core interrupt and exception functionality of CPU */ - cpu_init_interrupts(); return 0; } -int cpu_init_r(void) __attribute__((weak, alias("x86_cpu_init_r"))); void x86_enable_caches(void) { @@ -189,27 +451,22 @@ void flush_cache(unsigned long dummy1, unsigned long dummy2) asm("wbinvd\n"); } -void __attribute__ ((regparm(0))) generate_gpf(void); - -/* segment 0x70 is an arbitrary segment which does not exist */ -asm(".globl generate_gpf\n" - ".hidden generate_gpf\n" - ".type generate_gpf, @function\n" - "generate_gpf:\n" - "ljmp $0x70, $0x47114711\n"); +__weak void reset_cpu(ulong addr) +{ + /* Do a hard reset through the chipset's reset control register */ + outb(SYS_RST | RST_CPU, PORT_RESET); + for (;;) + cpu_hlt(); +} -void __reset_cpu(ulong addr) +void x86_full_reset(void) { - printf("Resetting using x86 Triple Fault\n"); - set_vector(13, generate_gpf); /* general protection fault handler */ - set_vector(8, generate_gpf); /* double fault handler */ - generate_gpf(); /* start the show */ + outb(FULL_RST | SYS_RST | RST_CPU, PORT_RESET); } -void reset_cpu(ulong addr) __attribute__((weak, alias("__reset_cpu"))); int dcache_status(void) { - return !(read_cr0() & 0x40000000); + return !(read_cr0() & X86_CR0_CD); } /* Define these functions to allow ehch-hcd to function */ @@ -279,66 +536,63 @@ void cpu_disable_paging_pae(void) : "eax"); } -static bool has_cpuid(void) +static bool can_detect_long_mode(void) { - unsigned long flag; - - asm volatile("pushf\n" \ - "pop %%eax\n" - "mov %%eax, %%ecx\n" /* ecx = flags */ - "xor %1, %%eax\n" - "push %%eax\n" - "popf\n" /* flags ^= $2 */ - "pushf\n" - "pop %%eax\n" /* eax = flags */ - "push %%ecx\n" - "popf\n" /* flags = ecx */ - "xor %%ecx, %%eax\n" - "mov %%eax, %0" - : "=r" (flag) - : "i" (1 << 21) - : "eax", "ecx", "memory"); + return cpuid_eax(0x80000000) > 0x80000000UL; +} - return flag != 0; +static bool has_long_mode(void) +{ + return cpuid_edx(0x80000001) & (1 << 29) ? true : false; } -static bool can_detect_long_mode(void) +int cpu_has_64bit(void) { - unsigned long flag; + return has_cpuid() && can_detect_long_mode() && + has_long_mode(); +} - asm volatile("mov $0x80000000, %%eax\n" - "cpuid\n" - "mov %%eax, %0" - : "=r" (flag) - : - : "eax", "ebx", "ecx", "edx", "memory"); +const char *cpu_vendor_name(int vendor) +{ + const char *name; + name = ""; + if ((vendor < (ARRAY_SIZE(x86_vendor_name))) && + (x86_vendor_name[vendor] != 0)) + name = x86_vendor_name[vendor]; - return flag > 0x80000000UL; + return name; } -static bool has_long_mode(void) +char *cpu_get_name(char *name) { - unsigned long flag; + unsigned int *name_as_ints = (unsigned int *)name; + struct cpuid_result regs; + char *ptr; + int i; - asm volatile("mov $0x80000001, %%eax\n" - "cpuid\n" - "mov %%edx, %0" - : "=r" (flag) - : - : "eax", "ebx", "ecx", "edx", "memory"); + /* This bit adds up to 48 bytes */ + for (i = 0; i < 3; i++) { + regs = cpuid(0x80000002 + i); + name_as_ints[i * 4 + 0] = regs.eax; + name_as_ints[i * 4 + 1] = regs.ebx; + name_as_ints[i * 4 + 2] = regs.ecx; + name_as_ints[i * 4 + 3] = regs.edx; + } + name[CPU_MAX_NAME_LEN - 1] = '\0'; - return flag & (1 << 29) ? true : false; -} + /* Skip leading spaces. */ + ptr = name; + while (*ptr == ' ') + ptr++; -int cpu_has_64bit(void) -{ - return has_cpuid() && can_detect_long_mode() && - has_long_mode(); + return ptr; } -int print_cpuinfo(void) +int default_print_cpuinfo(void) { - printf("CPU: %s\n", cpu_has_64bit() ? "x86_64" : "x86"); + printf("CPU: %s, vendor %s, device %xh\n", + cpu_has_64bit() ? "x86_64" : "x86", + cpu_vendor_name(gd->arch.x86_vendor), gd->arch.x86_device); return 0; } @@ -384,3 +638,74 @@ int cpu_jump_to_64bit(ulong setup_base, ulong target) return -EFAULT; } + +void show_boot_progress(int val) +{ + outb(val, POST_PORT); +} + +#ifndef CONFIG_SYS_COREBOOT +int last_stage_init(void) +{ + write_tables(); + + return 0; +} +#endif + +#ifdef CONFIG_SMP +static int enable_smis(struct udevice *cpu, void *unused) +{ + return 0; +} + +static struct mp_flight_record mp_steps[] = { + MP_FR_BLOCK_APS(mp_init_cpu, NULL, mp_init_cpu, NULL), + /* Wait for APs to finish initialization before proceeding */ + MP_FR_BLOCK_APS(NULL, NULL, enable_smis, NULL), +}; + +static int x86_mp_init(void) +{ + struct mp_params mp_params; + + mp_params.parallel_microcode_load = 0, + mp_params.flight_plan = &mp_steps[0]; + mp_params.num_records = ARRAY_SIZE(mp_steps); + mp_params.microcode_pointer = 0; + + if (mp_init(&mp_params)) { + printf("Warning: MP init failure\n"); + return -EIO; + } + + return 0; +} +#endif + +__weak int x86_init_cpus(void) +{ +#ifdef CONFIG_SMP + debug("Init additional CPUs\n"); + x86_mp_init(); +#else + struct udevice *dev; + + /* + * This causes the cpu-x86 driver to be probed. + * We don't check return value here as we want to allow boards + * which have not been converted to use cpu uclass driver to boot. + */ + uclass_first_device(UCLASS_CPU, &dev); +#endif + + return 0; +} + +int cpu_init_r(void) +{ + if (ll_boot_init()) + return x86_init_cpus(); + + return 0; +}