X-Git-Url: https://git.sur5r.net/?a=blobdiff_plain;f=arch%2Fx86%2Fcpu%2Finterrupts.c;h=e733bcb30272fa0da847cac08ca318eac4ee61ee;hb=31bf0f57b2c322500ceccdc6a38b4a8edb7035f0;hp=89f39d2e363ca064f5db340aff4d4c40d124513e;hpb=6d7404c4c18ba2be711504909a67b2717ef55ac8;p=u-boot diff --git a/arch/x86/cpu/interrupts.c b/arch/x86/cpu/interrupts.c index 89f39d2e36..e733bcb302 100644 --- a/arch/x86/cpu/interrupts.c +++ b/arch/x86/cpu/interrupts.c @@ -28,9 +28,16 @@ */ #include +#include +#include #include #include #include +#include +#include +#include + +DECLARE_GLOBAL_DATA_PTR; #define DECLARE_INTERRUPT(x) \ ".globl irq_"#x"\n" \ @@ -40,72 +47,6 @@ "pushl $"#x"\n" \ "jmp irq_common_entry\n" -/* - * Volatile isn't enough to prevent the compiler from reordering the - * read/write functions for the control registers and messing everything up. - * A memory clobber would solve the problem, but would prevent reordering of - * all loads stores around it, which can hurt performance. Solution is to - * use a variable and mimic reads and writes to it to enforce serialisation - */ -static unsigned long __force_order; - -static inline unsigned long read_cr0(void) -{ - unsigned long val; - asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order)); - return val; -} - -static inline unsigned long read_cr2(void) -{ - unsigned long val; - asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order)); - return val; -} - -static inline unsigned long read_cr3(void) -{ - unsigned long val; - asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order)); - return val; -} - -static inline unsigned long read_cr4(void) -{ - unsigned long val; - asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order)); - return val; -} - -static inline unsigned long get_debugreg(int regno) -{ - unsigned long val = 0; /* Damn you, gcc! */ - - switch (regno) { - case 0: - asm("mov %%db0, %0" :"=r" (val)); - break; - case 1: - asm("mov %%db1, %0" :"=r" (val)); - break; - case 2: - asm("mov %%db2, %0" :"=r" (val)); - break; - case 3: - asm("mov %%db3, %0" :"=r" (val)); - break; - case 6: - asm("mov %%db6, %0" :"=r" (val)); - break; - case 7: - asm("mov %%db7, %0" :"=r" (val)); - break; - default: - val = 0; - } - return val; -} - void dump_regs(struct irq_regs *regs) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; @@ -120,7 +61,8 @@ void dump_regs(struct irq_regs *regs) printf("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", regs->esi, regs->edi, regs->ebp, regs->esp); printf(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", - (u16)regs->xds, (u16)regs->xes, (u16)regs->xfs, (u16)regs->xgs, (u16)regs->xss); + (u16)regs->xds, (u16)regs->xes, (u16)regs->xfs, + (u16)regs->xgs, (u16)regs->xss); cr0 = read_cr0(); cr2 = read_cr2(); @@ -164,21 +106,21 @@ struct idt_entry { u8 res; u8 access; u16 base_high; -} __attribute__ ((packed)); +} __packed; struct desc_ptr { unsigned short size; unsigned long address; unsigned short segment; -} __attribute__((packed)); +} __packed; -struct idt_entry idt[256]; +struct idt_entry idt[256] __aligned(16); struct desc_ptr idt_ptr; static inline void load_idt(const struct desc_ptr *dtr) { - asm volatile("cs lidt %0"::"m" (*dtr)); + asm volatile("cs lidt %0" : : "m" (*dtr)); } void set_vector(u8 intnum, void *routine) @@ -187,6 +129,11 @@ void set_vector(u8 intnum, void *routine) idt[intnum].base_low = (u16)((u32)(routine) & 0xffff); } +/* + * Ideally these would be defined static to avoid a checkpatch warning, but + * the compiler cannot see them in the inline asm and complains that they + * aren't defined + */ void irq_0(void); void irq_1(void); @@ -201,7 +148,7 @@ int cpu_init_interrupts(void) disable_interrupts(); /* Setup the IDT */ - for (i=0;i<256;i++) { + for (i = 0; i < 256; i++) { idt[i].access = 0x8e; idt[i].res = 0; idt[i].selector = 0x10; @@ -238,7 +185,7 @@ int disable_interrupts(void) asm volatile ("pushfl ; popl %0 ; cli\n" : "=g" (flags) : ); - return flags & X86_EFLAGS_IF; /* IE flags is bit 9 */ + return flags & X86_EFLAGS_IF; } /* IRQ Low-Level Service Routine */ @@ -672,3 +619,31 @@ asm(".globl irq_common_entry\n" \ DECLARE_INTERRUPT(253) \ DECLARE_INTERRUPT(254) \ DECLARE_INTERRUPT(255)); + +#if defined(CONFIG_INTEL_CORE_ARCH) +/* + * Get the number of CPU time counter ticks since it was read first time after + * restart. This yields a free running counter guaranteed to take almost 6 + * years to wrap around even at 100GHz clock rate. + */ +u64 get_ticks(void) +{ + u64 now_tick = rdtsc(); + + if (!gd->arch.tsc_base) + gd->arch.tsc_base = now_tick; + + return now_tick - gd->arch.tsc_base; +} + +#define PLATFORM_INFO_MSR 0xce + +unsigned long get_tbclk(void) +{ + u32 ratio; + u64 platform_info = native_read_msr(PLATFORM_INFO_MSR); + + ratio = (platform_info >> 8) & 0xff; + return 100 * 1000 * 1000 * ratio; /* 100MHz times Max Non Turbo ratio */ +} +#endif