2 * bitops.h: Bit string operations on the ppc
8 #include <asm/byteorder.h>
10 extern void set_bit(int nr, volatile void *addr);
11 extern void clear_bit(int nr, volatile void *addr);
12 extern void change_bit(int nr, volatile void *addr);
13 extern int test_and_set_bit(int nr, volatile void *addr);
14 extern int test_and_clear_bit(int nr, volatile void *addr);
15 extern int test_and_change_bit(int nr, volatile void *addr);
18 * Arguably these bit operations don't imply any memory barrier or
19 * SMP ordering, but in fact a lot of drivers expect them to imply
20 * both, since they do on x86 cpus.
23 #define SMP_WMB "eieio\n"
24 #define SMP_MB "\nsync"
28 #endif /* CONFIG_SMP */
30 #define __INLINE_BITOPS 1
34 * These used to be if'd out here because using : "cc" as a constraint
35 * resulted in errors from egcs. Things may be OK with gcc-2.95.
37 extern __inline__ void set_bit(int nr, volatile void * addr)
40 unsigned long mask = 1 << (nr & 0x1f);
41 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
43 __asm__ __volatile__(SMP_WMB "\
49 : "=&r" (old), "=m" (*p)
50 : "r" (mask), "r" (p), "m" (*p)
54 extern __inline__ void clear_bit(int nr, volatile void *addr)
57 unsigned long mask = 1 << (nr & 0x1f);
58 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
60 __asm__ __volatile__(SMP_WMB "\
66 : "=&r" (old), "=m" (*p)
67 : "r" (mask), "r" (p), "m" (*p)
71 extern __inline__ void change_bit(int nr, volatile void *addr)
74 unsigned long mask = 1 << (nr & 0x1f);
75 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
77 __asm__ __volatile__(SMP_WMB "\
83 : "=&r" (old), "=m" (*p)
84 : "r" (mask), "r" (p), "m" (*p)
88 extern __inline__ int test_and_set_bit(int nr, volatile void *addr)
91 unsigned int mask = 1 << (nr & 0x1f);
92 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
94 __asm__ __volatile__(SMP_WMB "\
100 : "=&r" (old), "=&r" (t), "=m" (*p)
101 : "r" (mask), "r" (p), "m" (*p)
104 return (old & mask) != 0;
107 extern __inline__ int test_and_clear_bit(int nr, volatile void *addr)
110 unsigned int mask = 1 << (nr & 0x1f);
111 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
113 __asm__ __volatile__(SMP_WMB "\
119 : "=&r" (old), "=&r" (t), "=m" (*p)
120 : "r" (mask), "r" (p), "m" (*p)
123 return (old & mask) != 0;
126 extern __inline__ int test_and_change_bit(int nr, volatile void *addr)
129 unsigned int mask = 1 << (nr & 0x1f);
130 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
132 __asm__ __volatile__(SMP_WMB "\
138 : "=&r" (old), "=&r" (t), "=m" (*p)
139 : "r" (mask), "r" (p), "m" (*p)
142 return (old & mask) != 0;
144 #endif /* __INLINE_BITOPS */
146 extern __inline__ int test_bit(int nr, __const__ volatile void *addr)
148 __const__ unsigned int *p = (__const__ unsigned int *) addr;
150 return ((p[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
153 /* Return the bit position of the most significant 1 bit in a word */
154 /* - the result is undefined when x == 0 */
155 extern __inline__ int __ilog2(unsigned int x)
159 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
163 extern __inline__ int ffz(unsigned int x)
167 return __ilog2(x & -x);
171 * fls: find last (most-significant) bit set.
172 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
174 * On powerpc, __ilog2(0) returns -1, but this is not safe in general
176 static __inline__ int fls(unsigned int x)
178 return __ilog2(x) + 1;
183 * fls64 - find last set bit in a 64-bit word
184 * @x: the word to search
186 * This is defined in a similar way as the libc and compiler builtin
187 * ffsll, but returns the position of the most significant set bit.
189 * fls64(value) returns 0 if value is 0 or the position of the last
190 * set bit if value is nonzero. The last (most significant) bit is
193 #if BITS_PER_LONG == 32
194 static inline int fls64(__u64 x)
201 #elif BITS_PER_LONG == 64
202 static inline int fls64(__u64 x)
206 return __ilog2(x) + 1;
209 #error BITS_PER_LONG not 32 or 64
212 static inline int __ilog2_u64(u64 n)
217 static inline int ffs64(u64 x)
219 return __ilog2_u64(x & -x) + 1ull;
225 * ffs: find first bit set. This is defined the same way as
226 * the libc and compiler builtin ffs routines, therefore
227 * differs in spirit from the above ffz (man ffs).
229 extern __inline__ int ffs(int x)
231 return __ilog2(x & -x) + 1;
236 * hweightN: returns the hamming weight (i.e. the number
237 * of bits set) of a N-bit word
240 #define hweight32(x) generic_hweight32(x)
241 #define hweight16(x) generic_hweight16(x)
242 #define hweight8(x) generic_hweight8(x)
244 #endif /* __KERNEL__ */
247 * This implementation of find_{first,next}_zero_bit was stolen from
248 * Linus' asm-alpha/bitops.h.
250 #define find_first_zero_bit(addr, size) \
251 find_next_zero_bit((addr), (size), 0)
253 extern __inline__ unsigned long find_next_zero_bit(void * addr,
254 unsigned long size, unsigned long offset)
256 unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
257 unsigned int result = offset & ~31UL;
266 tmp |= ~0UL >> (32-offset);
275 if ((tmp = *p++) != ~0U)
286 return result + ffz(tmp);
290 #define _EXT2_HAVE_ASM_BITOPS_
294 * test_and_{set,clear}_bit guarantee atomicity without
295 * disabling interrupts.
297 #define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x18, addr)
298 #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, addr)
301 extern __inline__ int ext2_set_bit(int nr, void * addr)
304 unsigned char *ADDR = (unsigned char *) addr;
308 mask = 1 << (nr & 0x07);
309 oldbit = (*ADDR & mask) ? 1 : 0;
314 extern __inline__ int ext2_clear_bit(int nr, void * addr)
317 unsigned char *ADDR = (unsigned char *) addr;
321 mask = 1 << (nr & 0x07);
322 oldbit = (*ADDR & mask) ? 1 : 0;
323 *ADDR = *ADDR & ~mask;
326 #endif /* __KERNEL__ */
328 extern __inline__ int ext2_test_bit(int nr, __const__ void * addr)
330 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
332 return (ADDR[nr >> 3] >> (nr & 7)) & 1;
336 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
337 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
340 #define ext2_find_first_zero_bit(addr, size) \
341 ext2_find_next_zero_bit((addr), (size), 0)
343 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
344 unsigned long size, unsigned long offset)
346 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
347 unsigned int result = offset & ~31UL;
355 tmp = cpu_to_le32p(p++);
356 tmp |= ~0UL >> (32-offset);
365 if ((tmp = cpu_to_le32p(p++)) != ~0U)
372 tmp = cpu_to_le32p(p);
376 return result + ffz(tmp);
379 /* Bitmap functions for the minix filesystem. */
380 #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
381 #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
382 #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
383 #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
384 #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
386 #endif /* _PPC_BITOPS_H */