2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 2000 Silicon Graphics, Inc.
12 #include <linux/types.h>
13 #include <asm/byteorder.h> /* sigh ... */
17 #include <asm/sgidefs.h>
18 #include <asm/system.h>
20 #include <asm-generic/bitops/fls.h>
21 #include <asm-generic/bitops/__fls.h>
22 #include <asm-generic/bitops/fls64.h>
23 #include <asm-generic/bitops/__ffs.h>
26 * clear_bit() doesn't provide any barrier for the compiler.
28 #define smp_mb__before_clear_bit() barrier()
29 #define smp_mb__after_clear_bit() barrier()
32 * Only disable interrupt for kernel mode stuff to keep usermode stuff
33 * that dares to use kernel include files alive.
35 #define __bi_flags unsigned long flags
36 #define __bi_cli() __cli()
37 #define __bi_save_flags(x) __save_flags(x)
38 #define __bi_save_and_cli(x) __save_and_cli(x)
39 #define __bi_restore_flags(x) __restore_flags(x)
43 #define __bi_save_flags(x)
44 #define __bi_save_and_cli(x)
45 #define __bi_restore_flags(x)
46 #endif /* __KERNEL__ */
48 #ifdef CONFIG_CPU_HAS_LLSC
50 #include <asm/mipsregs.h>
53 * These functions for MIPS ISA > 1 are interrupt and SMP proof and
58 * set_bit - Atomically set a bit in memory
60 * @addr: the address to start counting from
62 * This function is atomic and may not be reordered. See __set_bit()
63 * if you do not require the atomic guarantees.
64 * Note that @nr may be almost arbitrarily large; this function is not
65 * restricted to acting on a single-word quantity.
67 static __inline__ void
68 set_bit(int nr, volatile void *addr)
70 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
74 "1:\tll\t%0, %1\t\t# set_bit\n\t"
78 : "=&r" (temp), "=m" (*m)
79 : "ir" (1UL << (nr & 0x1f)), "m" (*m));
83 * __set_bit - Set a bit in memory
85 * @addr: the address to start counting from
87 * Unlike set_bit(), this function is non-atomic and may be reordered.
88 * If it's called on the same region of memory simultaneously, the effect
89 * may be that only one operation succeeds.
91 static __inline__ void __set_bit(int nr, volatile void * addr)
93 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
95 *m |= 1UL << (nr & 31);
97 #define PLATFORM__SET_BIT
100 * clear_bit - Clears a bit in memory
102 * @addr: Address to start counting from
104 * clear_bit() is atomic and may not be reordered. However, it does
105 * not contain a memory barrier, so if it is used for locking purposes,
106 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
107 * in order to ensure changes are visible on other processors.
109 static __inline__ void
110 clear_bit(int nr, volatile void *addr)
112 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
115 __asm__ __volatile__(
116 "1:\tll\t%0, %1\t\t# clear_bit\n\t"
120 : "=&r" (temp), "=m" (*m)
121 : "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
125 * change_bit - Toggle a bit in memory
127 * @addr: Address to start counting from
129 * change_bit() is atomic and may not be reordered.
130 * Note that @nr may be almost arbitrarily large; this function is not
131 * restricted to acting on a single-word quantity.
133 static __inline__ void
134 change_bit(int nr, volatile void *addr)
136 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
139 __asm__ __volatile__(
140 "1:\tll\t%0, %1\t\t# change_bit\n\t"
144 : "=&r" (temp), "=m" (*m)
145 : "ir" (1UL << (nr & 0x1f)), "m" (*m));
149 * __change_bit - Toggle a bit in memory
150 * @nr: the bit to set
151 * @addr: the address to start counting from
153 * Unlike change_bit(), this function is non-atomic and may be reordered.
154 * If it's called on the same region of memory simultaneously, the effect
155 * may be that only one operation succeeds.
157 static __inline__ void __change_bit(int nr, volatile void * addr)
159 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
161 *m ^= 1UL << (nr & 31);
165 * test_and_set_bit - Set a bit and return its old value
167 * @addr: Address to count from
169 * This operation is atomic and cannot be reordered.
170 * It also implies a memory barrier.
172 static __inline__ int
173 test_and_set_bit(int nr, volatile void *addr)
175 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
176 unsigned long temp, res;
178 __asm__ __volatile__(
179 ".set\tnoreorder\t\t# test_and_set_bit\n"
184 " and\t%2, %0, %3\n\t"
186 : "=&r" (temp), "=m" (*m), "=&r" (res)
187 : "r" (1UL << (nr & 0x1f)), "m" (*m)
194 * __test_and_set_bit - Set a bit and return its old value
196 * @addr: Address to count from
198 * This operation is non-atomic and can be reordered.
199 * If two examples of this operation race, one can appear to succeed
200 * but actually fail. You must protect multiple accesses with a lock.
202 static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
205 volatile int *a = addr;
208 mask = 1 << (nr & 0x1f);
209 retval = (mask & *a) != 0;
216 * test_and_clear_bit - Clear a bit and return its old value
218 * @addr: Address to count from
220 * This operation is atomic and cannot be reordered.
221 * It also implies a memory barrier.
223 static __inline__ int
224 test_and_clear_bit(int nr, volatile void *addr)
226 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
227 unsigned long temp, res;
229 __asm__ __volatile__(
230 ".set\tnoreorder\t\t# test_and_clear_bit\n"
236 " and\t%2, %0, %3\n\t"
238 : "=&r" (temp), "=m" (*m), "=&r" (res)
239 : "r" (1UL << (nr & 0x1f)), "m" (*m)
246 * __test_and_clear_bit - Clear a bit and return its old value
248 * @addr: Address to count from
250 * This operation is non-atomic and can be reordered.
251 * If two examples of this operation race, one can appear to succeed
252 * but actually fail. You must protect multiple accesses with a lock.
254 static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
257 volatile int *a = addr;
260 mask = 1 << (nr & 0x1f);
261 retval = (mask & *a) != 0;
268 * test_and_change_bit - Change a bit and return its new value
270 * @addr: Address to count from
272 * This operation is atomic and cannot be reordered.
273 * It also implies a memory barrier.
275 static __inline__ int
276 test_and_change_bit(int nr, volatile void *addr)
278 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
279 unsigned long temp, res;
281 __asm__ __volatile__(
282 ".set\tnoreorder\t\t# test_and_change_bit\n"
284 "xor\t%2, %0, %3\n\t"
287 " and\t%2, %0, %3\n\t"
289 : "=&r" (temp), "=m" (*m), "=&r" (res)
290 : "r" (1UL << (nr & 0x1f)), "m" (*m)
297 * __test_and_change_bit - Change a bit and return its old value
299 * @addr: Address to count from
301 * This operation is non-atomic and can be reordered.
302 * If two examples of this operation race, one can appear to succeed
303 * but actually fail. You must protect multiple accesses with a lock.
305 static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
308 volatile int *a = addr;
311 mask = 1 << (nr & 0x1f);
312 retval = (mask & *a) != 0;
321 * set_bit - Atomically set a bit in memory
322 * @nr: the bit to set
323 * @addr: the address to start counting from
325 * This function is atomic and may not be reordered. See __set_bit()
326 * if you do not require the atomic guarantees.
327 * Note that @nr may be almost arbitrarily large; this function is not
328 * restricted to acting on a single-word quantity.
330 static __inline__ void set_bit(int nr, volatile void * addr)
333 volatile int *a = addr;
337 mask = 1 << (nr & 0x1f);
338 __bi_save_and_cli(flags);
340 __bi_restore_flags(flags);
344 * __set_bit - Set a bit in memory
345 * @nr: the bit to set
346 * @addr: the address to start counting from
348 * Unlike set_bit(), this function is non-atomic and may be reordered.
349 * If it's called on the same region of memory simultaneously, the effect
350 * may be that only one operation succeeds.
352 static __inline__ void __set_bit(int nr, volatile void * addr)
355 volatile int *a = addr;
358 mask = 1 << (nr & 0x1f);
363 * clear_bit - Clears a bit in memory
365 * @addr: Address to start counting from
367 * clear_bit() is atomic and may not be reordered. However, it does
368 * not contain a memory barrier, so if it is used for locking purposes,
369 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
370 * in order to ensure changes are visible on other processors.
372 static __inline__ void clear_bit(int nr, volatile void * addr)
375 volatile int *a = addr;
379 mask = 1 << (nr & 0x1f);
380 __bi_save_and_cli(flags);
382 __bi_restore_flags(flags);
386 * change_bit - Toggle a bit in memory
388 * @addr: Address to start counting from
390 * change_bit() is atomic and may not be reordered.
391 * Note that @nr may be almost arbitrarily large; this function is not
392 * restricted to acting on a single-word quantity.
394 static __inline__ void change_bit(int nr, volatile void * addr)
397 volatile int *a = addr;
401 mask = 1 << (nr & 0x1f);
402 __bi_save_and_cli(flags);
404 __bi_restore_flags(flags);
408 * __change_bit - Toggle a bit in memory
409 * @nr: the bit to set
410 * @addr: the address to start counting from
412 * Unlike change_bit(), this function is non-atomic and may be reordered.
413 * If it's called on the same region of memory simultaneously, the effect
414 * may be that only one operation succeeds.
416 static __inline__ void __change_bit(int nr, volatile void * addr)
418 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
420 *m ^= 1UL << (nr & 31);
424 * test_and_set_bit - Set a bit and return its old value
426 * @addr: Address to count from
428 * This operation is atomic and cannot be reordered.
429 * It also implies a memory barrier.
431 static __inline__ int test_and_set_bit(int nr, volatile void * addr)
434 volatile int *a = addr;
438 mask = 1 << (nr & 0x1f);
439 __bi_save_and_cli(flags);
440 retval = (mask & *a) != 0;
442 __bi_restore_flags(flags);
448 * __test_and_set_bit - Set a bit and return its old value
450 * @addr: Address to count from
452 * This operation is non-atomic and can be reordered.
453 * If two examples of this operation race, one can appear to succeed
454 * but actually fail. You must protect multiple accesses with a lock.
456 static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
459 volatile int *a = addr;
462 mask = 1 << (nr & 0x1f);
463 retval = (mask & *a) != 0;
470 * test_and_clear_bit - Clear a bit and return its old value
472 * @addr: Address to count from
474 * This operation is atomic and cannot be reordered.
475 * It also implies a memory barrier.
477 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
480 volatile int *a = addr;
484 mask = 1 << (nr & 0x1f);
485 __bi_save_and_cli(flags);
486 retval = (mask & *a) != 0;
488 __bi_restore_flags(flags);
494 * __test_and_clear_bit - Clear a bit and return its old value
496 * @addr: Address to count from
498 * This operation is non-atomic and can be reordered.
499 * If two examples of this operation race, one can appear to succeed
500 * but actually fail. You must protect multiple accesses with a lock.
502 static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
505 volatile int *a = addr;
508 mask = 1 << (nr & 0x1f);
509 retval = (mask & *a) != 0;
516 * test_and_change_bit - Change a bit and return its new value
518 * @addr: Address to count from
520 * This operation is atomic and cannot be reordered.
521 * It also implies a memory barrier.
523 static __inline__ int test_and_change_bit(int nr, volatile void * addr)
526 volatile int *a = addr;
530 mask = 1 << (nr & 0x1f);
531 __bi_save_and_cli(flags);
532 retval = (mask & *a) != 0;
534 __bi_restore_flags(flags);
540 * __test_and_change_bit - Change a bit and return its old value
542 * @addr: Address to count from
544 * This operation is non-atomic and can be reordered.
545 * If two examples of this operation race, one can appear to succeed
546 * but actually fail. You must protect multiple accesses with a lock.
548 static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
551 volatile int *a = addr;
554 mask = 1 << (nr & 0x1f);
555 retval = (mask & *a) != 0;
563 #undef __bi_save_flags
564 #undef __bi_restore_flags
569 * test_bit - Determine whether a bit is set
570 * @nr: bit number to test
571 * @addr: Address to start counting from
573 static __inline__ int test_bit(int nr, const volatile void *addr)
575 return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
580 /* Little endian versions. */
583 * find_first_zero_bit - find the first zero bit in a memory region
584 * @addr: The address to start the search at
585 * @size: The maximum size to search
587 * Returns the bit-number of the first zero bit, not the number of the byte
590 static __inline__ int find_first_zero_bit (void *addr, unsigned size)
598 __asm__ (".set\tnoreorder\n\t"
600 "1:\tsubu\t$1,%6,%0\n\t"
604 #if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
605 (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
606 (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
616 #error "Fix this for big endian"
617 #endif /* __MIPSEB__ */
619 "1:\tand\t%2,$1,%1\n\t"
627 : "=r" (res), "=r" (dummy), "=r" (addr)
628 : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
629 "2" (addr), "r" (size)
636 * find_next_zero_bit - find the first zero bit in a memory region
637 * @addr: The address to base the search on
638 * @offset: The bitnumber to start searching at
639 * @size: The maximum size to search
641 static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
643 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
644 int set = 0, bit = offset & 31, res;
649 * Look for zero in first byte
652 #error "Fix this for big endian byte order"
654 __asm__(".set\tnoreorder\n\t"
656 "1:\tand\t$1,%4,%1\n\t"
664 : "=r" (set), "=r" (dummy)
665 : "0" (0), "1" (1 << bit), "r" (*p)
667 if (set < (32 - bit))
673 * No zero yet, search remaining full bytes for a zero
675 res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
676 return offset + set + res;
679 #endif /* !(__MIPSEB__) */
682 * ffz - find first zero in word.
683 * @word: The word to search
685 * Undefined if no zero exists, so code should check against ~0UL first.
687 static __inline__ unsigned long ffz(unsigned long word)
690 unsigned int mask = 1;
693 ".set\tnoreorder\n\t"
696 "1:\tand\t$1,%2,%1\n\t"
704 : "=&r" (__res), "=r" (mask)
705 : "r" (word), "1" (mask)
714 * hweightN - returns the hamming weight of a N-bit word
715 * @x: the word to weigh
717 * The Hamming Weight of a number is the total number of bits set in it.
720 #define hweight32(x) generic_hweight32(x)
721 #define hweight16(x) generic_hweight16(x)
722 #define hweight8(x) generic_hweight8(x)
724 #endif /* __KERNEL__ */
728 * find_next_zero_bit - find the first zero bit in a memory region
729 * @addr: The address to base the search on
730 * @offset: The bitnumber to start searching at
731 * @size: The maximum size to search
733 static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
735 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
736 unsigned long result = offset & ~31UL;
745 tmp |= ~0UL >> (32-offset);
753 while (size & ~31UL) {
766 return result + ffz(tmp);
769 /* Linus sez that gcc can optimize the following correctly, we'll see if this
770 * holds on the Sparc as it does for the ALPHA.
773 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
775 * find_first_zero_bit - find the first zero bit in a memory region
776 * @addr: The address to start the search at
777 * @size: The maximum size to search
779 * Returns the bit-number of the first zero bit, not the number of the byte
782 static int find_first_zero_bit (void *addr, unsigned size);
785 #define find_first_zero_bit(addr, size) \
786 find_next_zero_bit((addr), (size), 0)
788 #endif /* (__MIPSEB__) */
790 /* Now for the ext2 filesystem bit operations and helper routines. */
793 static __inline__ int ext2_set_bit(int nr, void * addr)
795 int mask, retval, flags;
796 unsigned char *ADDR = (unsigned char *) addr;
799 mask = 1 << (nr & 0x07);
801 retval = (mask & *ADDR) != 0;
803 restore_flags(flags);
807 static __inline__ int ext2_clear_bit(int nr, void * addr)
809 int mask, retval, flags;
810 unsigned char *ADDR = (unsigned char *) addr;
813 mask = 1 << (nr & 0x07);
815 retval = (mask & *ADDR) != 0;
817 restore_flags(flags);
821 static __inline__ int ext2_test_bit(int nr, const void * addr)
824 const unsigned char *ADDR = (const unsigned char *) addr;
827 mask = 1 << (nr & 0x07);
828 return ((mask & *ADDR) != 0);
831 #define ext2_find_first_zero_bit(addr, size) \
832 ext2_find_next_zero_bit((addr), (size), 0)
834 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
836 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
837 unsigned long result = offset & ~31UL;
845 /* We hold the little endian value in tmp, but then the
846 * shift is illegal. So we could keep a big endian value
849 * tmp = __swab32(*(p++));
850 * tmp |= ~0UL >> (32-offset);
852 * but this would decrease preformance, so we change the
856 tmp |= __swab32(~0UL >> (32-offset));
864 while(size & ~31UL) {
875 /* tmp is little endian, so we would have to swab the shift,
876 * see above. But then we have to swab tmp below for ffz, so
877 * we might as well do this here.
879 return result + ffz(__swab32(tmp) | (~0UL << size));
881 return result + ffz(__swab32(tmp));
883 #else /* !(__MIPSEB__) */
885 /* Native ext2 byte ordering, just collapse using defines. */
886 #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
887 #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
888 #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
889 #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
890 #define ext2_find_next_zero_bit(addr, size, offset) \
891 find_next_zero_bit((addr), (size), (offset))
893 #endif /* !(__MIPSEB__) */
896 * Bitmap functions for the minix filesystem.
897 * FIXME: These assume that Minix uses the native byte/bitorder.
898 * This limits the Minix filesystem's value for data exchange very much.
900 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
901 #define minix_set_bit(nr,addr) set_bit(nr,addr)
902 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
903 #define minix_test_bit(nr,addr) test_bit(nr,addr)
904 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
906 #endif /* _ASM_BITOPS_H */