2 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
3 * Copyright (c) 2000 Silicon Graphics, Inc.
5 * SPDX-License-Identifier: GPL-2.0
10 #include <linux/types.h>
11 #include <asm/byteorder.h> /* sigh ... */
15 #include <asm/sgidefs.h>
16 #include <asm/system.h>
18 #include <asm-generic/bitops/fls.h>
19 #include <asm-generic/bitops/__fls.h>
20 #include <asm-generic/bitops/fls64.h>
21 #include <asm-generic/bitops/__ffs.h>
24 * clear_bit() doesn't provide any barrier for the compiler.
26 #define smp_mb__before_clear_bit() barrier()
27 #define smp_mb__after_clear_bit() barrier()
30 * Only disable interrupt for kernel mode stuff to keep usermode stuff
31 * that dares to use kernel include files alive.
33 #define __bi_flags unsigned long flags
34 #define __bi_cli() __cli()
35 #define __bi_save_flags(x) __save_flags(x)
36 #define __bi_save_and_cli(x) __save_and_cli(x)
37 #define __bi_restore_flags(x) __restore_flags(x)
41 #define __bi_save_flags(x)
42 #define __bi_save_and_cli(x)
43 #define __bi_restore_flags(x)
44 #endif /* __KERNEL__ */
46 #ifdef CONFIG_CPU_HAS_LLSC
48 #include <asm/mipsregs.h>
51 * These functions for MIPS ISA > 1 are interrupt and SMP proof and
56 * set_bit - Atomically set a bit in memory
58 * @addr: the address to start counting from
60 * This function is atomic and may not be reordered. See __set_bit()
61 * if you do not require the atomic guarantees.
62 * Note that @nr may be almost arbitrarily large; this function is not
63 * restricted to acting on a single-word quantity.
65 static __inline__ void
66 set_bit(int nr, volatile void *addr)
68 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
72 "1:\tll\t%0, %1\t\t# set_bit\n\t"
76 : "=&r" (temp), "=m" (*m)
77 : "ir" (1UL << (nr & 0x1f)), "m" (*m));
81 * __set_bit - Set a bit in memory
83 * @addr: the address to start counting from
85 * Unlike set_bit(), this function is non-atomic and may be reordered.
86 * If it's called on the same region of memory simultaneously, the effect
87 * may be that only one operation succeeds.
89 static __inline__ void __set_bit(int nr, volatile void * addr)
91 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
93 *m |= 1UL << (nr & 31);
95 #define PLATFORM__SET_BIT
98 * clear_bit - Clears a bit in memory
100 * @addr: Address to start counting from
102 * clear_bit() is atomic and may not be reordered. However, it does
103 * not contain a memory barrier, so if it is used for locking purposes,
104 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
105 * in order to ensure changes are visible on other processors.
107 static __inline__ void
108 clear_bit(int nr, volatile void *addr)
110 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
113 __asm__ __volatile__(
114 "1:\tll\t%0, %1\t\t# clear_bit\n\t"
118 : "=&r" (temp), "=m" (*m)
119 : "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
123 * change_bit - Toggle a bit in memory
125 * @addr: Address to start counting from
127 * change_bit() is atomic and may not be reordered.
128 * Note that @nr may be almost arbitrarily large; this function is not
129 * restricted to acting on a single-word quantity.
131 static __inline__ void
132 change_bit(int nr, volatile void *addr)
134 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
137 __asm__ __volatile__(
138 "1:\tll\t%0, %1\t\t# change_bit\n\t"
142 : "=&r" (temp), "=m" (*m)
143 : "ir" (1UL << (nr & 0x1f)), "m" (*m));
147 * __change_bit - Toggle a bit in memory
148 * @nr: the bit to set
149 * @addr: the address to start counting from
151 * Unlike change_bit(), this function is non-atomic and may be reordered.
152 * If it's called on the same region of memory simultaneously, the effect
153 * may be that only one operation succeeds.
155 static __inline__ void __change_bit(int nr, volatile void * addr)
157 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
159 *m ^= 1UL << (nr & 31);
163 * test_and_set_bit - Set a bit and return its old value
165 * @addr: Address to count from
167 * This operation is atomic and cannot be reordered.
168 * It also implies a memory barrier.
170 static __inline__ int
171 test_and_set_bit(int nr, volatile void *addr)
173 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
174 unsigned long temp, res;
176 __asm__ __volatile__(
177 ".set\tnoreorder\t\t# test_and_set_bit\n"
182 " and\t%2, %0, %3\n\t"
184 : "=&r" (temp), "=m" (*m), "=&r" (res)
185 : "r" (1UL << (nr & 0x1f)), "m" (*m)
192 * __test_and_set_bit - Set a bit and return its old value
194 * @addr: Address to count from
196 * This operation is non-atomic and can be reordered.
197 * If two examples of this operation race, one can appear to succeed
198 * but actually fail. You must protect multiple accesses with a lock.
200 static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
203 volatile int *a = addr;
206 mask = 1 << (nr & 0x1f);
207 retval = (mask & *a) != 0;
214 * test_and_clear_bit - Clear a bit and return its old value
216 * @addr: Address to count from
218 * This operation is atomic and cannot be reordered.
219 * It also implies a memory barrier.
221 static __inline__ int
222 test_and_clear_bit(int nr, volatile void *addr)
224 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
225 unsigned long temp, res;
227 __asm__ __volatile__(
228 ".set\tnoreorder\t\t# test_and_clear_bit\n"
234 " and\t%2, %0, %3\n\t"
236 : "=&r" (temp), "=m" (*m), "=&r" (res)
237 : "r" (1UL << (nr & 0x1f)), "m" (*m)
244 * __test_and_clear_bit - Clear a bit and return its old value
246 * @addr: Address to count from
248 * This operation is non-atomic and can be reordered.
249 * If two examples of this operation race, one can appear to succeed
250 * but actually fail. You must protect multiple accesses with a lock.
252 static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
255 volatile int *a = addr;
258 mask = 1 << (nr & 0x1f);
259 retval = (mask & *a) != 0;
266 * test_and_change_bit - Change a bit and return its new value
268 * @addr: Address to count from
270 * This operation is atomic and cannot be reordered.
271 * It also implies a memory barrier.
273 static __inline__ int
274 test_and_change_bit(int nr, volatile void *addr)
276 unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
277 unsigned long temp, res;
279 __asm__ __volatile__(
280 ".set\tnoreorder\t\t# test_and_change_bit\n"
282 "xor\t%2, %0, %3\n\t"
285 " and\t%2, %0, %3\n\t"
287 : "=&r" (temp), "=m" (*m), "=&r" (res)
288 : "r" (1UL << (nr & 0x1f)), "m" (*m)
295 * __test_and_change_bit - Change a bit and return its old value
297 * @addr: Address to count from
299 * This operation is non-atomic and can be reordered.
300 * If two examples of this operation race, one can appear to succeed
301 * but actually fail. You must protect multiple accesses with a lock.
303 static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
306 volatile int *a = addr;
309 mask = 1 << (nr & 0x1f);
310 retval = (mask & *a) != 0;
319 * set_bit - Atomically set a bit in memory
320 * @nr: the bit to set
321 * @addr: the address to start counting from
323 * This function is atomic and may not be reordered. See __set_bit()
324 * if you do not require the atomic guarantees.
325 * Note that @nr may be almost arbitrarily large; this function is not
326 * restricted to acting on a single-word quantity.
328 static __inline__ void set_bit(int nr, volatile void * addr)
331 volatile int *a = addr;
335 mask = 1 << (nr & 0x1f);
336 __bi_save_and_cli(flags);
338 __bi_restore_flags(flags);
342 * __set_bit - Set a bit in memory
343 * @nr: the bit to set
344 * @addr: the address to start counting from
346 * Unlike set_bit(), this function is non-atomic and may be reordered.
347 * If it's called on the same region of memory simultaneously, the effect
348 * may be that only one operation succeeds.
350 static __inline__ void __set_bit(int nr, volatile void * addr)
353 volatile int *a = addr;
356 mask = 1 << (nr & 0x1f);
361 * clear_bit - Clears a bit in memory
363 * @addr: Address to start counting from
365 * clear_bit() is atomic and may not be reordered. However, it does
366 * not contain a memory barrier, so if it is used for locking purposes,
367 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
368 * in order to ensure changes are visible on other processors.
370 static __inline__ void clear_bit(int nr, volatile void * addr)
373 volatile int *a = addr;
377 mask = 1 << (nr & 0x1f);
378 __bi_save_and_cli(flags);
380 __bi_restore_flags(flags);
384 * change_bit - Toggle a bit in memory
386 * @addr: Address to start counting from
388 * change_bit() is atomic and may not be reordered.
389 * Note that @nr may be almost arbitrarily large; this function is not
390 * restricted to acting on a single-word quantity.
392 static __inline__ void change_bit(int nr, volatile void * addr)
395 volatile int *a = addr;
399 mask = 1 << (nr & 0x1f);
400 __bi_save_and_cli(flags);
402 __bi_restore_flags(flags);
406 * __change_bit - Toggle a bit in memory
407 * @nr: the bit to set
408 * @addr: the address to start counting from
410 * Unlike change_bit(), this function is non-atomic and may be reordered.
411 * If it's called on the same region of memory simultaneously, the effect
412 * may be that only one operation succeeds.
414 static __inline__ void __change_bit(int nr, volatile void * addr)
416 unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
418 *m ^= 1UL << (nr & 31);
422 * test_and_set_bit - Set a bit and return its old value
424 * @addr: Address to count from
426 * This operation is atomic and cannot be reordered.
427 * It also implies a memory barrier.
429 static __inline__ int test_and_set_bit(int nr, volatile void * addr)
432 volatile int *a = addr;
436 mask = 1 << (nr & 0x1f);
437 __bi_save_and_cli(flags);
438 retval = (mask & *a) != 0;
440 __bi_restore_flags(flags);
446 * __test_and_set_bit - Set a bit and return its old value
448 * @addr: Address to count from
450 * This operation is non-atomic and can be reordered.
451 * If two examples of this operation race, one can appear to succeed
452 * but actually fail. You must protect multiple accesses with a lock.
454 static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
457 volatile int *a = addr;
460 mask = 1 << (nr & 0x1f);
461 retval = (mask & *a) != 0;
468 * test_and_clear_bit - Clear a bit and return its old value
470 * @addr: Address to count from
472 * This operation is atomic and cannot be reordered.
473 * It also implies a memory barrier.
475 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
478 volatile int *a = addr;
482 mask = 1 << (nr & 0x1f);
483 __bi_save_and_cli(flags);
484 retval = (mask & *a) != 0;
486 __bi_restore_flags(flags);
492 * __test_and_clear_bit - Clear a bit and return its old value
494 * @addr: Address to count from
496 * This operation is non-atomic and can be reordered.
497 * If two examples of this operation race, one can appear to succeed
498 * but actually fail. You must protect multiple accesses with a lock.
500 static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
503 volatile int *a = addr;
506 mask = 1 << (nr & 0x1f);
507 retval = (mask & *a) != 0;
514 * test_and_change_bit - Change a bit and return its new value
516 * @addr: Address to count from
518 * This operation is atomic and cannot be reordered.
519 * It also implies a memory barrier.
521 static __inline__ int test_and_change_bit(int nr, volatile void * addr)
524 volatile int *a = addr;
528 mask = 1 << (nr & 0x1f);
529 __bi_save_and_cli(flags);
530 retval = (mask & *a) != 0;
532 __bi_restore_flags(flags);
538 * __test_and_change_bit - Change a bit and return its old value
540 * @addr: Address to count from
542 * This operation is non-atomic and can be reordered.
543 * If two examples of this operation race, one can appear to succeed
544 * but actually fail. You must protect multiple accesses with a lock.
546 static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
549 volatile int *a = addr;
552 mask = 1 << (nr & 0x1f);
553 retval = (mask & *a) != 0;
561 #undef __bi_save_flags
562 #undef __bi_restore_flags
567 * test_bit - Determine whether a bit is set
568 * @nr: bit number to test
569 * @addr: Address to start counting from
571 static __inline__ int test_bit(int nr, const volatile void *addr)
573 return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
578 /* Little endian versions. */
581 * find_first_zero_bit - find the first zero bit in a memory region
582 * @addr: The address to start the search at
583 * @size: The maximum size to search
585 * Returns the bit-number of the first zero bit, not the number of the byte
588 static __inline__ int find_first_zero_bit (void *addr, unsigned size)
596 __asm__ (".set\tnoreorder\n\t"
598 "1:\tsubu\t$1,%6,%0\n\t"
602 #if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
603 (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
604 (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
614 #error "Fix this for big endian"
615 #endif /* __MIPSEB__ */
617 "1:\tand\t%2,$1,%1\n\t"
625 : "=r" (res), "=r" (dummy), "=r" (addr)
626 : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
627 "2" (addr), "r" (size)
634 * find_next_zero_bit - find the first zero bit in a memory region
635 * @addr: The address to base the search on
636 * @offset: The bitnumber to start searching at
637 * @size: The maximum size to search
639 static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
641 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
642 int set = 0, bit = offset & 31, res;
647 * Look for zero in first byte
650 #error "Fix this for big endian byte order"
652 __asm__(".set\tnoreorder\n\t"
654 "1:\tand\t$1,%4,%1\n\t"
662 : "=r" (set), "=r" (dummy)
663 : "0" (0), "1" (1 << bit), "r" (*p)
665 if (set < (32 - bit))
671 * No zero yet, search remaining full bytes for a zero
673 res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
674 return offset + set + res;
677 #endif /* !(__MIPSEB__) */
680 * ffz - find first zero in word.
681 * @word: The word to search
683 * Undefined if no zero exists, so code should check against ~0UL first.
685 static __inline__ unsigned long ffz(unsigned long word)
688 unsigned int mask = 1;
691 ".set\tnoreorder\n\t"
694 "1:\tand\t$1,%2,%1\n\t"
702 : "=&r" (__res), "=r" (mask)
703 : "r" (word), "1" (mask)
712 * hweightN - returns the hamming weight of a N-bit word
713 * @x: the word to weigh
715 * The Hamming Weight of a number is the total number of bits set in it.
718 #define hweight32(x) generic_hweight32(x)
719 #define hweight16(x) generic_hweight16(x)
720 #define hweight8(x) generic_hweight8(x)
722 #endif /* __KERNEL__ */
726 * find_next_zero_bit - find the first zero bit in a memory region
727 * @addr: The address to base the search on
728 * @offset: The bitnumber to start searching at
729 * @size: The maximum size to search
731 static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
733 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
734 unsigned long result = offset & ~31UL;
743 tmp |= ~0UL >> (32-offset);
751 while (size & ~31UL) {
764 return result + ffz(tmp);
767 /* Linus sez that gcc can optimize the following correctly, we'll see if this
768 * holds on the Sparc as it does for the ALPHA.
771 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
773 * find_first_zero_bit - find the first zero bit in a memory region
774 * @addr: The address to start the search at
775 * @size: The maximum size to search
777 * Returns the bit-number of the first zero bit, not the number of the byte
780 static int find_first_zero_bit (void *addr, unsigned size);
783 #define find_first_zero_bit(addr, size) \
784 find_next_zero_bit((addr), (size), 0)
786 #endif /* (__MIPSEB__) */
788 /* Now for the ext2 filesystem bit operations and helper routines. */
791 static __inline__ int ext2_set_bit(int nr, void * addr)
793 int mask, retval, flags;
794 unsigned char *ADDR = (unsigned char *) addr;
797 mask = 1 << (nr & 0x07);
799 retval = (mask & *ADDR) != 0;
801 restore_flags(flags);
805 static __inline__ int ext2_clear_bit(int nr, void * addr)
807 int mask, retval, flags;
808 unsigned char *ADDR = (unsigned char *) addr;
811 mask = 1 << (nr & 0x07);
813 retval = (mask & *ADDR) != 0;
815 restore_flags(flags);
819 static __inline__ int ext2_test_bit(int nr, const void * addr)
822 const unsigned char *ADDR = (const unsigned char *) addr;
825 mask = 1 << (nr & 0x07);
826 return ((mask & *ADDR) != 0);
829 #define ext2_find_first_zero_bit(addr, size) \
830 ext2_find_next_zero_bit((addr), (size), 0)
832 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
834 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
835 unsigned long result = offset & ~31UL;
843 /* We hold the little endian value in tmp, but then the
844 * shift is illegal. So we could keep a big endian value
847 * tmp = __swab32(*(p++));
848 * tmp |= ~0UL >> (32-offset);
850 * but this would decrease preformance, so we change the
854 tmp |= __swab32(~0UL >> (32-offset));
862 while(size & ~31UL) {
873 /* tmp is little endian, so we would have to swab the shift,
874 * see above. But then we have to swab tmp below for ffz, so
875 * we might as well do this here.
877 return result + ffz(__swab32(tmp) | (~0UL << size));
879 return result + ffz(__swab32(tmp));
881 #else /* !(__MIPSEB__) */
883 /* Native ext2 byte ordering, just collapse using defines. */
884 #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
885 #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
886 #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
887 #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
888 #define ext2_find_next_zero_bit(addr, size, offset) \
889 find_next_zero_bit((addr), (size), (offset))
891 #endif /* !(__MIPSEB__) */
894 * Bitmap functions for the minix filesystem.
895 * FIXME: These assume that Minix uses the native byte/bitorder.
896 * This limits the Minix filesystem's value for data exchange very much.
898 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
899 #define minix_set_bit(nr,addr) set_bit(nr,addr)
900 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
901 #define minix_test_bit(nr,addr) test_bit(nr,addr)
902 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
904 #endif /* _ASM_BITOPS_H */