[PATCH] i386: Implement vmi_kmap_atomic_pte
[linux-2.6-block.git] / include / linux / bitops.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_BITOPS_H
2#define _LINUX_BITOPS_H
3#include <asm/types.h>
4
1da177e4
LT
5/*
6 * Include this here because some architectures need generic_ffs/fls in
7 * scope
8 */
9#include <asm/bitops.h>
10
11static __inline__ int get_bitmask_order(unsigned int count)
12{
13 int order;
14
15 order = fls(count);
16 return order; /* We could be slightly more clever with -1 here... */
17}
18
94605eff
SS
19static __inline__ int get_count_order(unsigned int count)
20{
21 int order;
22
23 order = fls(count) - 1;
24 if (count & (count - 1))
25 order++;
26 return order;
27}
28
1da177e4
LT
29static inline unsigned long hweight_long(unsigned long w)
30{
e9bebd6f 31 return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
1da177e4
LT
32}
33
45f8bde0 34/**
1da177e4 35 * rol32 - rotate a 32-bit value left
1da177e4
LT
36 * @word: value to rotate
37 * @shift: bits to roll
38 */
39static inline __u32 rol32(__u32 word, unsigned int shift)
40{
41 return (word << shift) | (word >> (32 - shift));
42}
43
45f8bde0 44/**
1da177e4 45 * ror32 - rotate a 32-bit value right
1da177e4
LT
46 * @word: value to rotate
47 * @shift: bits to roll
48 */
49static inline __u32 ror32(__u32 word, unsigned int shift)
50{
51 return (word >> shift) | (word << (32 - shift));
52}
53
962749af
AM
54static inline unsigned fls_long(unsigned long l)
55{
56 if (sizeof(l) == 4)
57 return fls(l);
58 return fls64(l);
59}
60
1da177e4 61#endif