2 * Copyright (C) 2005 by Dominic Rath
5 * Copyright (C) 2006 by Magnus Lundin
8 * Copyright (C) 2008 by Spencer Oliver
11 * Copyright (C) 2009 by Øyvind Harboe
12 * oyvind.harboe@zylin.com
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the
26 * Free Software Foundation, Inc.,
27 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
29 #ifndef __ARM_OPCODES_H
30 #define __ARM_OPCODES_H
34 * Macros used to generate various ARM or Thumb opcodes.
37 /* ARM mode instructions */
39 /* Store multiple increment after
41 * List: for each bit in list: store register
42 * S: in priviledged mode: store user-mode registers
43 * W = 1: update the base register. W = 0: leave the base register untouched
45 #define ARMV4_5_STMIA(Rn, List, S, W) \
46 (0xe8800000 | ((S) << 22) | ((W) << 21) | ((Rn) << 16) | (List))
48 /* Load multiple increment after
50 * List: for each bit in list: store register
51 * S: in priviledged mode: store user-mode registers
52 * W = 1: update the base register. W = 0: leave the base register untouched
54 #define ARMV4_5_LDMIA(Rn, List, S, W) \
55 (0xe8900000 | ((S) << 22) | ((W) << 21) | ((Rn) << 16) | (List))
58 #define ARMV4_5_NOP (0xe1a08008)
60 /* Move PSR to general purpose register
61 * R = 1: SPSR R = 0: CPSR
64 #define ARMV4_5_MRS(Rn, R) (0xe10f0000 | ((R) << 22) | ((Rn) << 12))
67 * Rd: register to store
70 #define ARMV4_5_STR(Rd, Rn) (0xe5800000 | ((Rd) << 12) | ((Rn) << 16))
73 * Rd: register to load
76 #define ARMV4_5_LDR(Rd, Rn) (0xe5900000 | ((Rd) << 12) | ((Rn) << 16))
78 /* Move general purpose register to PSR
79 * R = 1: SPSR R = 0: CPSR
81 * 1: control field 2: extension field 4: status field 8: flags field
84 #define ARMV4_5_MSR_GP(Rm, Field, R) \
85 (0xe120f000 | (Rm) | ((Field) << 16) | ((R) << 22))
86 #define ARMV4_5_MSR_IM(Im, Rotate, Field, R) \
87 (0xe320f000 | (Im) | ((Rotate) << 8) | ((Field) << 16) | ((R) << 22))
89 /* Load Register Word Immediate Post-Index
90 * Rd: register to load
93 #define ARMV4_5_LDRW_IP(Rd, Rn) (0xe4900004 | ((Rd) << 12) | ((Rn) << 16))
95 /* Load Register Halfword Immediate Post-Index
96 * Rd: register to load
99 #define ARMV4_5_LDRH_IP(Rd, Rn) (0xe0d000b2 | ((Rd) << 12) | ((Rn) << 16))
101 /* Load Register Byte Immediate Post-Index
102 * Rd: register to load
105 #define ARMV4_5_LDRB_IP(Rd, Rn) (0xe4d00001 | ((Rd) << 12) | ((Rn) << 16))
107 /* Store register Word Immediate Post-Index
108 * Rd: register to store
111 #define ARMV4_5_STRW_IP(Rd, Rn) (0xe4800004 | ((Rd) << 12) | ((Rn) << 16))
113 /* Store register Halfword Immediate Post-Index
114 * Rd: register to store
117 #define ARMV4_5_STRH_IP(Rd, Rn) (0xe0c000b2 | ((Rd) << 12) | ((Rn) << 16))
119 /* Store register Byte Immediate Post-Index
120 * Rd: register to store
123 #define ARMV4_5_STRB_IP(Rd, Rn) (0xe4c00001 | ((Rd) << 12) | ((Rn) << 16))
126 * Im: Branch target (left-shifted by 2 bits, added to PC)
127 * L: 1: branch and link 0: branch only
129 #define ARMV4_5_B(Im, L) (0xea000000 | (Im) | ((L) << 24))
131 /* Branch and exchange (ARM state)
132 * Rm: register holding branch target address
134 #define ARMV4_5_BX(Rm) (0xe12fff10 | (Rm))
136 /* Move to ARM register from coprocessor
137 * CP: Coprocessor number
138 * op1: Coprocessor opcode
139 * Rd: destination register
140 * CRn: first coprocessor operand
141 * CRm: second coprocessor operand
142 * op2: Second coprocessor opcode
144 #define ARMV4_5_MRC(CP, op1, Rd, CRn, CRm, op2) \
145 (0xee100010 | (CRm) | ((op2) << 5) | ((CP) << 8) \
146 | ((Rd) << 12) | ((CRn) << 16) | ((op1) << 21))
148 /* Move to coprocessor from ARM register
149 * CP: Coprocessor number
150 * op1: Coprocessor opcode
151 * Rd: destination register
152 * CRn: first coprocessor operand
153 * CRm: second coprocessor operand
154 * op2: Second coprocessor opcode
156 #define ARMV4_5_MCR(CP, op1, Rd, CRn, CRm, op2) \
157 (0xee000010 | (CRm) | ((op2) << 5) | ((CP) << 8) \
158 | ((Rd) << 12) | ((CRn) << 16) | ((op1) << 21))
160 /* Breakpoint instruction (ARMv5)
161 * Im: 16-bit immediate
163 #define ARMV5_BKPT(Im) (0xe1200070 | ((Im & 0xfff0) << 8) | (Im & 0xf))
166 /* Thumb mode instructions
168 * NOTE: these 16-bit opcodes fill both halves of a word with the same
169 * value. The reason for this is that when we need to execute Thumb
170 * opcodes on ARM7/ARM9 cores (to switch to ARM state on debug entry),
171 * we must shift 32 bits to the bus using scan chain 1 ... if we write
172 * both halves, we don't need to track which half matters. On ARMv6 and
173 * ARMv7 we don't execute Thumb instructions in debug mode; the ITR
174 * register does not accept Thumb (or Thumb2) opcodes.
177 /* Store register (Thumb mode)
178 * Rd: source register
181 #define ARMV4_5_T_STR(Rd, Rn) \
182 ((0x6000 | (Rd) | ((Rn) << 3)) | \
183 ((0x6000 | (Rd) | ((Rn) << 3)) << 16))
185 /* Load register (Thumb state)
186 * Rd: destination register
189 #define ARMV4_5_T_LDR(Rd, Rn) \
190 ((0x6800 | ((Rn) << 3) | (Rd)) \
191 | ((0x6800 | ((Rn) << 3) | (Rd)) << 16))
193 /* Load multiple (Thumb state)
195 * List: for each bit in list: store register
197 #define ARMV4_5_T_LDMIA(Rn, List) \
198 ((0xc800 | ((Rn) << 8) | (List)) \
199 | ((0xc800 | ((Rn) << 8) | (List)) << 16))
201 /* Load register with PC relative addressing
202 * Rd: register to load
204 #define ARMV4_5_T_LDR_PCREL(Rd) \
205 ((0x4800 | ((Rd) << 8)) \
206 | ((0x4800 | ((Rd) << 8)) << 16))
208 /* Move hi register (Thumb mode)
209 * Rd: destination register
210 * Rm: source register
212 #define ARMV4_5_T_MOV(Rd, Rm) \
213 ((0x4600 | ((Rd) & 0x7) | (((Rd) & 0x8) << 4) | \
214 (((Rm) & 0x7) << 3) | (((Rm) & 0x8) << 3)) \
215 | ((0x4600 | ((Rd) & 0x7) | (((Rd) & 0x8) << 4) | \
216 (((Rm) & 0x7) << 3) | (((Rm) & 0x8) << 3)) << 16))
218 /* No operation (Thumb mode)
219 * NOTE: this is "MOV r8, r8" ... Thumb2 adds two
220 * architected NOPs, 16-bit and 32-bit.
222 #define ARMV4_5_T_NOP (0x46c0 | (0x46c0 << 16))
224 /* Move immediate to register (Thumb state)
225 * Rd: destination register
226 * Im: 8-bit immediate value
228 #define ARMV4_5_T_MOV_IM(Rd, Im) \
229 ((0x2000 | ((Rd) << 8) | (Im)) \
230 | ((0x2000 | ((Rd) << 8) | (Im)) << 16))
232 /* Branch and Exchange
233 * Rm: register containing branch target
235 #define ARMV4_5_T_BX(Rm) \
236 ((0x4700 | ((Rm) << 3)) \
237 | ((0x4700 | ((Rm) << 3)) << 16))
239 /* Branch (Thumb state)
242 #define ARMV4_5_T_B(Imm) \
244 | ((0xe000 | (Imm)) << 16))
246 /* Breakpoint instruction (ARMv5) (Thumb state)
247 * Im: 8-bit immediate
249 #define ARMV5_T_BKPT(Im) \
251 | ((0xbe00 | (Im)) << 16))
253 /* Move to Register from Special Register
254 * 32 bit Thumb2 instruction
255 * Rd: destination register
256 * SYSm: source special register
258 #define ARM_T2_MRS(Rd, SYSm) \
259 ((0xF3EF) | ((0x8000 | (Rd << 8) | SYSm) << 16))
261 /* Move from Register from Special Register
262 * 32 bit Thumb2 instruction
263 * Rd: source register
264 * SYSm: destination special register
266 #define ARM_T2_MSR(SYSm, Rn) \
267 ((0xF380 | (Rn << 8)) | ((0x8800 | SYSm) << 16))
269 /* Change Processor State.
270 * 16 bit Thumb2 instruction
271 * Rd: source register
272 * IF: A_FLAG and/or I_FLAG and/or F_FLAG
277 #define ARM_T2_CPSID(IF) \
278 ((0xB660 | (1 << 8) | ((IF)&0x3)) \
279 | ((0xB660 | (1 << 8) | ((IF)&0x3)) << 16))
280 #define ARM_T2_CPSIE(IF) \
281 ((0xB660 | (0 << 8) | ((IF)&0x3)) \
282 | ((0xB660 | (0 << 8) | ((IF)&0x3)) << 16))
284 #endif /* __ARM_OPCODES_H */