2 * Copyright 2010, Google Inc.
4 * Brought in from coreboot uldivmod.S
6 * SPDX-License-Identifier: GPL-2.0
9 #include <linux/linkage.h>
10 #include <asm/assembler.h>
13 * A, Q = r0 + (r1 << 32)
14 * B, R = r2 + (r3 << 32)
36 .pushsection .text.__aeabi_uldivmod, "ax"
37 ENTRY(__aeabi_uldivmod)
39 stmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) lr}
41 orrs ip, B_0, B_1 @ Z set -> B == 0
43 @ Test if B is power of 2: (B & (B - 1)) == 0
49 @ Test if A_1 == B_1 == 0
54 /* CLZ only exists in ARM architecture version 5 and above. */
68 @ if clz B - clz A > 0
71 @ B <<= (clz B - clz A)
74 movmi B_1, B_1, lsl D_0
75 ARM( orrmi B_1, B_1, B_0, lsr ip )
76 THUMB( lsrmi TMP, B_0, ip )
77 THUMB( orrmi B_1, B_1, TMP )
78 movpl B_1, B_0, lsl D_1
80 @ C = 1 << (clz B - clz A)
81 movmi C_1, C_1, lsl D_0
82 ARM( orrmi C_1, C_1, C_0, lsr ip )
83 THUMB( lsrmi TMP, C_0, ip )
84 THUMB( orrmi C_1, C_1, TMP )
85 movpl C_1, C_0, lsl D_1
90 @ C: current bit; D: result
92 @ C: current bit; D: result
104 orr B_1, B_1, B_0, lsr #28
108 orr C_1, C_1, C_0, lsr #28
118 orr B_1, B_1, B_0, lsr #31
122 orr C_1, C_1, C_0, lsr #31
142 movs C_1, C_1, lsr #1
148 movs B_1, B_1, lsr #1
152 @ Note: A, B & Q, R are aliases
157 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
160 @ Note: A_0 & r0 are aliases
167 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
171 @ Note: A, B and Q, R are aliases
176 @ Note: B must not be 0 here!
183 mov A_0, A_1, lsr D_0
186 movpl A_0, A_0, lsr D_0
187 ARM( orrpl A_0, A_0, A_1, lsl D_1 )
188 THUMB( lslpl TMP, A_1, D_1 )
189 THUMB( orrpl A_0, A_0, TMP )
190 mov A_1, A_1, lsr D_0
194 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
196 @ Note: A, B and Q, R are aliases
201 @ Note: B must not be 0 here!
202 @ Count the leading zeroes in B.
205 @ If B is greater than 1 << 31, divide A and B by 1 << 32.
209 @ Count the remaining leading zeroes in B.
210 movs B_1, B_0, lsl #16
212 moveq B_0, B_0, lsr #16
215 moveq B_0, B_0, lsr #8
218 moveq B_0, B_0, lsr #4
221 moveq B_0, B_0, lsr #2
224 @ Shift A to the right by the appropriate amount.
226 mov Q_0, A_0, lsr D_0
227 ARM( orr Q_0, Q_0, A_1, lsl D_1 )
228 THUMB( lsl A_1, D_1 )
229 THUMB( orr Q_0, A_1 )
230 mov Q_1, A_1, lsr D_0
234 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
239 @ As wrong as it could be
244 ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
245 ENDPROC(__aeabi_uldivmod)