#include <asm/cache.h>
#include <asm/cpu.h>
#include <asm/io.h>
+#include <asm/mtrr.h>
#include <asm/arch/tables.h>
#include <asm/arch/sysinfo.h>
#include <asm/arch/timestamp.h>
return pci_eth_init(bis);
}
-#define MTRR_TYPE_WP 5
-#define MTRRcap_MSR 0xfe
-#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
-#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
-
void board_final_cleanup(void)
{
/* Un-cache the ROM so the kernel has one
* Coreboot should have assigned this to the
* top available variable MTRR.
*/
- u8 top_mtrr = (native_read_msr(MTRRcap_MSR) & 0xff) - 1;
- u8 top_type = native_read_msr(MTRRphysBase_MSR(top_mtrr)) & 0xff;
+ u8 top_mtrr = (native_read_msr(MTRR_CAP_MSR) & 0xff) - 1;
+ u8 top_type = native_read_msr(MTRR_PHYS_BASE_MSR(top_mtrr)) & 0xff;
/* Make sure this MTRR is the correct Write-Protected type */
- if (top_type == MTRR_TYPE_WP) {
- disable_caches();
- wrmsrl(MTRRphysBase_MSR(top_mtrr), 0);
- wrmsrl(MTRRphysMask_MSR(top_mtrr), 0);
- enable_caches();
+ if (top_type == MTRR_TYPE_WRPROT) {
+ struct mtrr_state state;
+
+ mtrr_open(&state);
+ wrmsrl(MTRR_PHYS_BASE_MSR(top_mtrr), 0);
+ wrmsrl(MTRR_PHYS_MASK_MSR(top_mtrr), 0);
+ mtrr_close(&state);
}
/* Issue SMI to Coreboot to lock down ME and registers */
post_code(POST_CAR_MTRR)
/* Configure the default memory type to uncacheable */
- movl $MTRRdefType_MSR, %ecx
+ movl $MTRR_DEF_TYPE_MSR, %ecx
rdmsr
andl $(~0x00000cff), %eax
wrmsr
post_code(POST_CAR_BASE_ADDRESS)
/* Set Cache-as-RAM mask */
movl $(MTRR_PHYS_MASK_MSR(0)), %ecx
- movl $(~(CACHE_AS_RAM_SIZE - 1) | MTRRphysMaskValid), %eax
+ movl $(~(CACHE_AS_RAM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
movl $CPU_PHYSMASK_HI, %edx
wrmsr
post_code(POST_CAR_MASK)
/* Enable MTRR */
- movl $MTRRdefType_MSR, %ecx
+ movl $MTRR_DEF_TYPE_MSR, %ecx
rdmsr
- orl $MTRRdefTypeEn, %eax
+ orl $MTRR_DEF_TYPE_EN, %eax
wrmsr
/* Enable cache (CR0.CD = 0, CR0.NW = 0) */
movl $MTRR_PHYS_MASK_MSR(1), %ecx
movl $CPU_PHYSMASK_HI, %edx
- movl $(~(CONFIG_XIP_ROM_SIZE - 1) | MTRRphysMaskValid), %eax
+ movl $(~(CONFIG_XIP_ROM_SIZE - 1) | MTRR_PHYS_MASK_VALID), %eax
wrmsr
post_code(POST_CAR_ROM_CACHE)
xorl %edx, %edx
wrmsr
movl $MTRR_PHYS_MASK_MSR(2), %ecx
- movl $(CACHE_MRC_MASK | MTRRphysMaskValid), %eax
+ movl $(CACHE_MRC_MASK | MTRR_PHYS_MASK_VALID), %eax
movl $CPU_PHYSMASK_HI, %edx
wrmsr
#endif
--- /dev/null
+/*
+ * (C) Copyright 2014 Google, Inc
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ *
+ * Memory Type Range Regsters - these are used to tell the CPU whether
+ * memory is cacheable and if so the cache write mode to use.
+ *
+ * These can speed up booting. See the mtrr command.
+ *
+ * Reference: Intel Architecture Software Developer's Manual, Volume 3:
+ * System Programming
+ */
+
+#include <common.h>
+#include <asm/io.h>
+#include <asm/msr.h>
+#include <asm/mtrr.h>
+
+/* Prepare to adjust MTRRs */
+void mtrr_open(struct mtrr_state *state)
+{
+ state->enable_cache = dcache_status();
+
+ if (state->enable_cache)
+ disable_caches();
+ state->deftype = native_read_msr(MTRR_DEF_TYPE_MSR);
+ wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype & ~MTRR_DEF_TYPE_EN);
+}
+
+/* Clean up after adjusting MTRRs, and enable them */
+void mtrr_close(struct mtrr_state *state)
+{
+ wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype | MTRR_DEF_TYPE_EN);
+ if (state->enable_cache)
+ enable_caches();
+}
+
+int mtrr_commit(bool do_caches)
+{
+ struct mtrr_request *req = gd->arch.mtrr_req;
+ struct mtrr_state state;
+ uint64_t mask;
+ int i;
+
+ mtrr_open(&state);
+ for (i = 0; i < gd->arch.mtrr_req_count; i++, req++) {
+ mask = ~(req->size - 1);
+ mask &= (1ULL << CONFIG_CPU_ADDR_BITS) - 1;
+ wrmsrl(MTRR_PHYS_BASE_MSR(i), req->start | req->type);
+ wrmsrl(MTRR_PHYS_MASK_MSR(i), mask | MTRR_PHYS_MASK_VALID);
+ }
+
+ /* Clear the ones that are unused */
+ for (; i < MTRR_COUNT; i++)
+ wrmsrl(MTRR_PHYS_MASK_MSR(i), 0);
+ mtrr_close(&state);
+
+ return 0;
+}
+
+int mtrr_add_request(int type, uint64_t start, uint64_t size)
+{
+ struct mtrr_request *req;
+ uint64_t mask;
+
+ if (gd->arch.mtrr_req_count == MAX_MTRR_REQUESTS)
+ return -ENOSPC;
+ req = &gd->arch.mtrr_req[gd->arch.mtrr_req_count++];
+ req->type = type;
+ req->start = start;
+ req->size = size;
+ debug("%d: type=%d, %08llx %08llx\n", gd->arch.mtrr_req_count - 1,
+ req->type, req->start, req->size);
+ mask = ~(req->size - 1);
+ mask &= (1ULL << CONFIG_CPU_ADDR_BITS) - 1;
+ mask |= MTRR_PHYS_MASK_VALID;
+ debug(" %016llx %016llx\n", req->start | req->type, mask);
+
+ return 0;
+}
#ifndef _ASM_MTRR_H
#define _ASM_MTRR_H
-/* These are the region types */
-#define MTRR_TYPE_UNCACHEABLE 0
-#define MTRR_TYPE_WRCOMB 1
-/*#define MTRR_TYPE_ 2*/
-/*#define MTRR_TYPE_ 3*/
-#define MTRR_TYPE_WRTHROUGH 4
-#define MTRR_TYPE_WRPROT 5
-#define MTRR_TYPE_WRBACK 6
-#define MTRR_NUM_TYPES 7
-
-#define MTRRcap_MSR 0x0fe
-#define MTRRdefType_MSR 0x2ff
-
-#define MTRRdefTypeEn (1 << 11)
-#define MTRRdefTypeFixEn (1 << 10)
-
-#define SMRRphysBase_MSR 0x1f2
-#define SMRRphysMask_MSR 0x1f3
-
-#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
-#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
-
-#define MTRRphysMaskValid (1 << 11)
-
-#define NUM_FIXED_RANGES 88
-#define RANGES_PER_FIXED_MTRR 8
-#define MTRRfix64K_00000_MSR 0x250
-#define MTRRfix16K_80000_MSR 0x258
-#define MTRRfix16K_A0000_MSR 0x259
-#define MTRRfix4K_C0000_MSR 0x268
-#define MTRRfix4K_C8000_MSR 0x269
-#define MTRRfix4K_D0000_MSR 0x26a
-#define MTRRfix4K_D8000_MSR 0x26b
-#define MTRRfix4K_E0000_MSR 0x26c
-#define MTRRfix4K_E8000_MSR 0x26d
-#define MTRRfix4K_F0000_MSR 0x26e
-#define MTRRfix4K_F8000_MSR 0x26f
+/* MTRR region types */
+#define MTRR_TYPE_UNCACHEABLE 0
+#define MTRR_TYPE_WRCOMB 1
+#define MTRR_TYPE_WRTHROUGH 4
+#define MTRR_TYPE_WRPROT 5
+#define MTRR_TYPE_WRBACK 6
+
+#define MTRR_TYPE_COUNT 7
+
+#define MTRR_CAP_MSR 0x0fe
+#define MTRR_DEF_TYPE_MSR 0x2ff
+
+#define MTRR_DEF_TYPE_EN (1 << 11)
+#define MTRR_DEF_TYPE_FIX_EN (1 << 10)
+
+#define MTRR_PHYS_BASE_MSR(reg) (0x200 + 2 * (reg))
+#define MTRR_PHYS_MASK_MSR(reg) (0x200 + 2 * (reg) + 1)
+
+#define MTRR_PHYS_MASK_VALID (1 << 11)
+
+#define MTRR_BASE_TYPE_MASK 0x7
+
+/* Number of MTRRs supported */
+#define MTRR_COUNT 8
#if !defined(__ASSEMBLER__)
-/*
- * The MTRR code has some side effects that the callers should be aware for.
- * 1. The call sequence matters. x86_setup_mtrrs() calls
- * x86_setup_fixed_mtrrs_no_enable() then enable_fixed_mtrrs() (equivalent
- * of x86_setup_fixed_mtrrs()) then x86_setup_var_mtrrs(). If the callers
- * want to call the components of x86_setup_mtrrs() because of other
- * rquirements the ordering should still preserved.
- * 2. enable_fixed_mtrr() will enable both variable and fixed MTRRs because
- * of the nature of the global MTRR enable flag. Therefore, all direct
- * or indirect callers of enable_fixed_mtrr() should ensure that the
- * variable MTRR MSRs do not contain bad ranges.
- * 3. If CONFIG_CACHE_ROM is selected an MTRR is allocated for enabling
- * the caching of the ROM. However, it is set to uncacheable (UC). It
- * is the responsiblity of the caller to enable it by calling
- * x86_mtrr_enable_rom_caching().
+/**
+ * Information about the previous MTRR state, set up by mtrr_open()
+ *
+ * @deftype: Previous value of MTRR_DEF_TYPE_MSR
+ * @enable_cache: true if cache was enabled
*/
-void x86_setup_mtrrs(void);
-/*
- * x86_setup_var_mtrrs() parameters:
- * address_bits - number of physical address bits supported by cpu
- * above4gb - 2 means dynamically detect number of variable MTRRs available.
- * non-zero means handle memory ranges above 4GiB.
- * 0 means ignore memory ranges above 4GiB
+struct mtrr_state {
+ uint64_t deftype;
+ bool enable_cache;
+};
+
+/**
+ * mtrr_open() - Prepare to adjust MTRRs
+ *
+ * Use mtrr_open() passing in a structure - this function will init it. Then
+ * when done, pass the same structure to mtrr_close() to re-enable MTRRs and
+ * possibly the cache.
+ *
+ * @state: Empty structure to pass in to hold settings
+ */
+void mtrr_open(struct mtrr_state *state);
+
+/**
+ * mtrr_open() - Clean up after adjusting MTRRs, and enable them
+ *
+ * This uses the structure containing information returned from mtrr_open().
+ *
+ * @state: Structure from mtrr_open()
+ */
+/* */
+void mtrr_close(struct mtrr_state *state);
+
+/**
+ * mtrr_add_request() - Add a new MTRR request
+ *
+ * This adds a request for a memory region to be set up in a particular way.
+ *
+ * @type: Requested type (MTRR_TYPE_)
+ * @start: Start address
+ * @size: Size
+ */
+int mtrr_add_request(int type, uint64_t start, uint64_t size);
+
+/**
+ * mtrr_commit() - set up the MTRR registers based on current requests
+ *
+ * This sets up MTRRs for the available DRAM and the requests received so far.
+ * It must be called with caches disabled.
+ *
+ * @do_caches: true if caches are currently on
*/
-void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb);
-void enable_fixed_mtrr(void);
-void x86_setup_fixed_mtrrs(void);
-/* Set up fixed MTRRs but do not enable them. */
-void x86_setup_fixed_mtrrs_no_enable(void);
-int x86_mtrr_check(void);
-/* ROM caching can be used after variable MTRRs are set up. Beware that
- * enabling CONFIG_CACHE_ROM will eat through quite a few MTRRs based on
- * one's IO hole size and WRCOMB resources. Be sure to check the console
- * log when enabling CONFIG_CACHE_ROM or adding WRCOMB resources. Beware that
- * on CPUs with core-scoped MTRR registers such as hyperthreaded CPUs the
- * rom caching will be disabled if all threads run the MTRR code. Therefore,
- * one needs to call x86_mtrr_enable_rom_caching() after all threads of the
- * same core have run the MTRR code. */
-#if CONFIG_CACHE_ROM
-void x86_mtrr_enable_rom_caching(void);
-void x86_mtrr_disable_rom_caching(void);
-/* Return the variable range MTRR index of the ROM cache. */
-long x86_mtrr_rom_cache_var_index(void);
-#else
-static inline void x86_mtrr_enable_rom_caching(void) {}
-static inline void x86_mtrr_disable_rom_caching(void) {}
-static inline long x86_mtrr_rom_cache_var_index(void) { return -1; }
-#endif /* CONFIG_CACHE_ROM */
+int mtrr_commit(bool do_caches);
#endif