2 * (C) Copyright 2004, Psyent Corporation <www.psyent.com>
3 * Scott McNutt <smcnutt@psyent.com>
5 * See file CREDITS for list of people who contributed to this
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
26 #include <timestamp.h>
29 /*************************************************************************
31 ************************************************************************/
37 /* ICACHE INIT -- only the icache line at the reset address
38 * is invalidated at reset. So the init must stay within
39 * the cache line size (8 words). If GERMS is used, we'll
40 * just be invalidating the cache a second time. If cache
41 * is not implemented initi behaves as nop.
43 ori r4, r0, %lo(CONFIG_SYS_ICACHELINE_SIZE)
44 movhi r5, %hi(CONFIG_SYS_ICACHE_SIZE)
45 ori r5, r5, %lo(CONFIG_SYS_ICACHE_SIZE)
50 br _except_end /* Skip the tramp */
52 /* EXCEPTION TRAMPOLINE -- the following gets copied
53 * to the exception address (below), but is otherwise at the
54 * default exception vector offset (0x0020).
57 movhi et, %hi(_exception)
58 ori et, et, %lo(_exception)
62 /* INTERRUPTS -- for now, all interrupts masked and globally
65 wrctl status, r0 /* Disable interrupts */
66 wrctl ienable, r0 /* All disabled */
68 /* DCACHE INIT -- if dcache not implemented, initd behaves as
71 movhi r4, %hi(CONFIG_SYS_DCACHELINE_SIZE)
72 ori r4, r4, %lo(CONFIG_SYS_DCACHELINE_SIZE)
73 movhi r5, %hi(CONFIG_SYS_DCACHE_SIZE)
74 ori r5, r5, %lo(CONFIG_SYS_DCACHE_SIZE)
80 /* RELOCATE CODE, DATA & COMMAND TABLE -- the following code
81 * assumes code, data and the command table are all
82 * contiguous. This lets us relocate everything as a single
83 * block. Make sure the linker script matches this ;-)
86 _cur: movhi r5, %hi(_cur - _start)
87 ori r5, r5, %lo(_cur - _start)
88 sub r4, r4, r5 /* r4 <- cur _start */
91 ori r5, r5, %lo(_start) /* r5 <- linked _start */
95 ori r6, r6, %lo(_edata)
103 /* ZERO BSS/SBSS -- bss and sbss are assumed to be adjacent
104 * and between __bss_start and _end.
106 movhi r5, %hi(__bss_start)
107 ori r5, r5, %lo(__bss_start)
109 ori r6, r6, %lo(_end)
117 /* GLOBAL POINTER -- the global pointer is used to reference
118 * "small data" (see -G switch). The linker script must
119 * provide the gp address.
124 /* JUMP TO RELOC ADDR */
125 movhi r4, %hi(_reloc)
126 ori r4, r4, %lo(_reloc)
130 /* COPY EXCEPTION TRAMPOLINE -- copy the tramp to the
131 * exception address. Define CONFIG_ROM_STUBS to prevent
132 * the copy (e.g. exception in flash or in other
133 * softare/firmware component).
135 #if !defined(CONFIG_ROM_STUBS)
136 movhi r4, %hi(_except_start)
137 ori r4, r4, %lo(_except_start)
138 movhi r5, %hi(_except_end)
139 ori r5, r5, %lo(_except_end)
140 movhi r6, %hi(CONFIG_SYS_EXCEPTION_ADDR)
141 ori r6, r6, %lo(CONFIG_SYS_EXCEPTION_ADDR)
142 beq r4, r6, 7f /* Skip if at proper addr */
152 /* STACK INIT -- zero top two words for call back chain.
154 movhi sp, %hi(CONFIG_SYS_INIT_SP)
155 ori sp, sp, %lo(CONFIG_SYS_INIT_SP)
162 * Call board_init -- never returns
164 movhi r4, %hi(board_init@h)
165 ori r4, r4, %lo(board_init@h)
168 /* NEVER RETURNS -- but branch to the _start just
175 * dly_clks -- Nios2 (like Nios1) doesn't have a timebase in
176 * the core. For simple delay loops, we do our best by counting
177 * instruction cycles.
179 * Instruction performance varies based on the core. For cores
180 * with icache and static/dynamic branch prediction (II/f, II/s):
182 * Normal ALU (e.g. add, cmp, etc): 1 cycle
183 * Branch (correctly predicted, taken): 2 cycles
184 * Negative offset is predicted (II/s).
186 * For cores without icache and no branch prediction (II/e):
188 * Normal ALU (e.g. add, cmp, etc): 6 cycles
189 * Branch (no prediction): 6 cycles
191 * For simplicity, if an instruction cache is implemented we
192 * assume II/f or II/s. Otherwise, we use the II/e.
199 #if (CONFIG_SYS_ICACHE_SIZE > 0)
200 subi r4, r4, 3 /* 3 clocks/loop */
202 subi r4, r4, 12 /* 12 clocks/loop */
208 #if !defined(CONFIG_IDENT_STRING)
209 #define CONFIG_IDENT_STRING ""
212 .globl version_string
215 .ascii U_BOOT_VERSION
216 .ascii " (", U_BOOT_DATE, " - ", U_BOOT_TIME, ")"
217 .ascii CONFIG_IDENT_STRING, "\0"