android_kernel_xiaomi_sm8350/include/linux/bitops.h
Stephen Hemminger 3821af2fe1 [FLS64]: generic version
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2006-01-03 13:11:06 -08:00

179 lines
3.6 KiB
C

#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
#include <asm/types.h>
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static inline int generic_ffs(int x)
{
int r = 1;
if (!x)
return 0;
if (!(x & 0xffff)) {
x >>= 16;
r += 16;
}
if (!(x & 0xff)) {
x >>= 8;
r += 8;
}
if (!(x & 0xf)) {
x >>= 4;
r += 4;
}
if (!(x & 3)) {
x >>= 2;
r += 2;
}
if (!(x & 1)) {
x >>= 1;
r += 1;
}
return r;
}
/*
* fls: find last bit set.
*/
static __inline__ int generic_fls(int x)
{
int r = 32;
if (!x)
return 0;
if (!(x & 0xffff0000u)) {
x <<= 16;
r -= 16;
}
if (!(x & 0xff000000u)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf0000000u)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc0000000u)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x80000000u)) {
x <<= 1;
r -= 1;
}
return r;
}
/*
* Include this here because some architectures need generic_ffs/fls in
* scope
*/
#include <asm/bitops.h>
static inline int generic_fls64(__u64 x)
{
__u32 h = x >> 32;
if (h)
return fls(x) + 32;
return fls(x);
}
static __inline__ int get_bitmask_order(unsigned int count)
{
int order;
order = fls(count);
return order; /* We could be slightly more clever with -1 here... */
}
static __inline__ int get_count_order(unsigned int count)
{
int order;
order = fls(count) - 1;
if (count & (count - 1))
order++;
return order;
}
/*
* hweightN: returns the hamming weight (i.e. the number
* of bits set) of a N-bit word
*/
static inline unsigned int generic_hweight32(unsigned int w)
{
unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555);
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F);
res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF);
return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF);
}
static inline unsigned int generic_hweight16(unsigned int w)
{
unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555);
res = (res & 0x3333) + ((res >> 2) & 0x3333);
res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F);
return (res & 0x00FF) + ((res >> 8) & 0x00FF);
}
static inline unsigned int generic_hweight8(unsigned int w)
{
unsigned int res = (w & 0x55) + ((w >> 1) & 0x55);
res = (res & 0x33) + ((res >> 2) & 0x33);
return (res & 0x0F) + ((res >> 4) & 0x0F);
}
static inline unsigned long generic_hweight64(__u64 w)
{
#if BITS_PER_LONG < 64
return generic_hweight32((unsigned int)(w >> 32)) +
generic_hweight32((unsigned int)w);
#else
u64 res;
res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul);
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 0x0F0F0F0F0F0F0F0Ful);
res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 0x00FF00FF00FF00FFul);
res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 0x0000FFFF0000FFFFul);
return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 0x00000000FFFFFFFFul);
#endif
}
static inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? generic_hweight32(w) : generic_hweight64(w);
}
/*
* rol32 - rotate a 32-bit value left
*
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> (32 - shift));
}
/*
* ror32 - rotate a 32-bit value right
*
* @word: value to rotate
* @shift: bits to roll
*/
static inline __u32 ror32(__u32 word, unsigned int shift)
{
return (word >> shift) | (word << (32 - shift));
}
#endif