forked from luck/tmp_suning_uos_patched
1da177e4c3
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
128 lines
2.7 KiB
C
128 lines
2.7 KiB
C
#include <linux/bitops.h>
|
|
|
|
/**
|
|
* find_next_bit - find the next set bit in a memory region
|
|
* @addr: The address to base the search on
|
|
* @offset: The bitnumber to start searching at
|
|
* @size: The maximum size to search
|
|
*/
|
|
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
|
unsigned long offset)
|
|
{
|
|
const unsigned long *p = addr + (offset >> 6);
|
|
unsigned long result = offset & ~63UL;
|
|
unsigned long tmp;
|
|
|
|
if (offset >= size)
|
|
return size;
|
|
size -= result;
|
|
offset &= 63UL;
|
|
if (offset) {
|
|
tmp = *(p++);
|
|
tmp &= (~0UL << offset);
|
|
if (size < 64)
|
|
goto found_first;
|
|
if (tmp)
|
|
goto found_middle;
|
|
size -= 64;
|
|
result += 64;
|
|
}
|
|
while (size & ~63UL) {
|
|
if ((tmp = *(p++)))
|
|
goto found_middle;
|
|
result += 64;
|
|
size -= 64;
|
|
}
|
|
if (!size)
|
|
return result;
|
|
tmp = *p;
|
|
|
|
found_first:
|
|
tmp &= (~0UL >> (64 - size));
|
|
if (tmp == 0UL) /* Are any bits set? */
|
|
return result + size; /* Nope. */
|
|
found_middle:
|
|
return result + __ffs(tmp);
|
|
}
|
|
|
|
/* find_next_zero_bit() finds the first zero bit in a bit string of length
|
|
* 'size' bits, starting the search at bit 'offset'. This is largely based
|
|
* on Linus's ALPHA routines, which are pretty portable BTW.
|
|
*/
|
|
|
|
unsigned long find_next_zero_bit(const unsigned long *addr,
|
|
unsigned long size, unsigned long offset)
|
|
{
|
|
const unsigned long *p = addr + (offset >> 6);
|
|
unsigned long result = offset & ~63UL;
|
|
unsigned long tmp;
|
|
|
|
if (offset >= size)
|
|
return size;
|
|
size -= result;
|
|
offset &= 63UL;
|
|
if (offset) {
|
|
tmp = *(p++);
|
|
tmp |= ~0UL >> (64-offset);
|
|
if (size < 64)
|
|
goto found_first;
|
|
if (~tmp)
|
|
goto found_middle;
|
|
size -= 64;
|
|
result += 64;
|
|
}
|
|
while (size & ~63UL) {
|
|
if (~(tmp = *(p++)))
|
|
goto found_middle;
|
|
result += 64;
|
|
size -= 64;
|
|
}
|
|
if (!size)
|
|
return result;
|
|
tmp = *p;
|
|
|
|
found_first:
|
|
tmp |= ~0UL << size;
|
|
if (tmp == ~0UL) /* Are any bits zero? */
|
|
return result + size; /* Nope. */
|
|
found_middle:
|
|
return result + ffz(tmp);
|
|
}
|
|
|
|
unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset)
|
|
{
|
|
unsigned long *p = addr + (offset >> 6);
|
|
unsigned long result = offset & ~63UL;
|
|
unsigned long tmp;
|
|
|
|
if (offset >= size)
|
|
return size;
|
|
size -= result;
|
|
offset &= 63UL;
|
|
if(offset) {
|
|
tmp = __swab64p(p++);
|
|
tmp |= (~0UL >> (64-offset));
|
|
if(size < 64)
|
|
goto found_first;
|
|
if(~tmp)
|
|
goto found_middle;
|
|
size -= 64;
|
|
result += 64;
|
|
}
|
|
while(size & ~63) {
|
|
if(~(tmp = __swab64p(p++)))
|
|
goto found_middle;
|
|
result += 64;
|
|
size -= 64;
|
|
}
|
|
if(!size)
|
|
return result;
|
|
tmp = __swab64p(p);
|
|
found_first:
|
|
tmp |= (~0UL << size);
|
|
if (tmp == ~0UL) /* Are any bits zero? */
|
|
return result + size; /* Nope. */
|
|
found_middle:
|
|
return result + ffz(tmp);
|
|
}
|