Commit | Line | Data |
---|---|---|
bc2aa80e HH |
1 | #ifndef _LINUX_BYTEORDER_H |
2 | #define _LINUX_BYTEORDER_H | |
3 | ||
4 | #include <linux/types.h> | |
5 | #include <linux/swab.h> | |
6 | ||
7 | #if defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN) | |
8 | # error Fix asm/byteorder.h to define one endianness | |
9 | #endif | |
10 | ||
11 | #if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) | |
12 | # error Fix asm/byteorder.h to define arch endianness | |
13 | #endif | |
14 | ||
15 | #ifdef __LITTLE_ENDIAN | |
16 | # undef __LITTLE_ENDIAN | |
17 | # define __LITTLE_ENDIAN 1234 | |
18 | #endif | |
19 | ||
20 | #ifdef __BIG_ENDIAN | |
21 | # undef __BIG_ENDIAN | |
22 | # define __BIG_ENDIAN 4321 | |
23 | #endif | |
24 | ||
25 | #if defined(__LITTLE_ENDIAN) && !defined(__LITTLE_ENDIAN_BITFIELD) | |
26 | # define __LITTLE_ENDIAN_BITFIELD | |
27 | #endif | |
28 | ||
29 | #if defined(__BIG_ENDIAN) && !defined(__BIG_ENDIAN_BITFIELD) | |
30 | # define __BIG_ENDIAN_BITFIELD | |
31 | #endif | |
32 | ||
33 | #ifdef __LITTLE_ENDIAN | |
34 | # define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) | |
35 | # define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) | |
36 | # define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) | |
37 | # define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) | |
38 | # define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) | |
39 | # define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) | |
40 | ||
41 | # define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) | |
42 | # define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) | |
43 | # define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) | |
44 | # define __cpu_to_be16(x) ((__force __be16)__swab16(x)) | |
45 | # define __cpu_to_be32(x) ((__force __be32)__swab32(x)) | |
46 | # define __cpu_to_be64(x) ((__force __be64)__swab64(x)) | |
47 | #endif | |
48 | ||
49 | #ifdef __BIG_ENDIAN | |
50 | # define __be16_to_cpu(x) ((__force __u16)(__be16)(x)) | |
51 | # define __be32_to_cpu(x) ((__force __u32)(__be32)(x)) | |
52 | # define __be64_to_cpu(x) ((__force __u64)(__be64)(x)) | |
53 | # define __cpu_to_be16(x) ((__force __be16)(__u16)(x)) | |
54 | # define __cpu_to_be32(x) ((__force __be32)(__u32)(x)) | |
55 | # define __cpu_to_be64(x) ((__force __be64)(__u64)(x)) | |
56 | ||
57 | # define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x)) | |
58 | # define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x)) | |
59 | # define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x)) | |
60 | # define __cpu_to_le16(x) ((__force __le16)__swab16(x)) | |
61 | # define __cpu_to_le32(x) ((__force __le32)__swab32(x)) | |
62 | # define __cpu_to_le64(x) ((__force __le64)__swab64(x)) | |
63 | #endif | |
64 | ||
65 | /* | |
66 | * These helpers could be phased out over time as the base version | |
67 | * handles constant folding. | |
68 | */ | |
69 | #define __constant_htonl(x) __cpu_to_be32(x) | |
70 | #define __constant_ntohl(x) __be32_to_cpu(x) | |
71 | #define __constant_htons(x) __cpu_to_be16(x) | |
72 | #define __constant_ntohs(x) __be16_to_cpu(x) | |
73 | ||
74 | #define __constant_le16_to_cpu(x) __le16_to_cpu(x) | |
75 | #define __constant_le32_to_cpu(x) __le32_to_cpu(x) | |
76 | #define __constant_le64_to_cpu(x) __le64_to_cpu(x) | |
77 | #define __constant_be16_to_cpu(x) __be16_to_cpu(x) | |
78 | #define __constant_be32_to_cpu(x) __be32_to_cpu(x) | |
79 | #define __constant_be64_to_cpu(x) __be64_to_cpu(x) | |
80 | ||
81 | #define __constant_cpu_to_le16(x) __cpu_to_le16(x) | |
82 | #define __constant_cpu_to_le32(x) __cpu_to_le32(x) | |
83 | #define __constant_cpu_to_le64(x) __cpu_to_le64(x) | |
84 | #define __constant_cpu_to_be16(x) __cpu_to_be16(x) | |
85 | #define __constant_cpu_to_be32(x) __cpu_to_be32(x) | |
86 | #define __constant_cpu_to_be64(x) __cpu_to_be64(x) | |
87 | ||
88 | static inline void __le16_to_cpus(__u16 *p) | |
89 | { | |
90 | #ifdef __BIG_ENDIAN | |
91 | __swab16s(p); | |
92 | #endif | |
93 | } | |
94 | ||
95 | static inline void __cpu_to_le16s(__u16 *p) | |
96 | { | |
97 | #ifdef __BIG_ENDIAN | |
98 | __swab16s(p); | |
99 | #endif | |
100 | } | |
101 | ||
102 | static inline void __le32_to_cpus(__u32 *p) | |
103 | { | |
104 | #ifdef __BIG_ENDIAN | |
105 | __swab32s(p); | |
106 | #endif | |
107 | } | |
108 | ||
109 | static inline void __cpu_to_le32s(__u32 *p) | |
110 | { | |
111 | #ifdef __BIG_ENDIAN | |
112 | __swab32s(p); | |
113 | #endif | |
114 | } | |
115 | ||
116 | static inline void __le64_to_cpus(__u64 *p) | |
117 | { | |
118 | #ifdef __BIG_ENDIAN | |
119 | __swab64s(p); | |
120 | #endif | |
121 | } | |
122 | ||
123 | static inline void __cpu_to_le64s(__u64 *p) | |
124 | { | |
125 | #ifdef __BIG_ENDIAN | |
126 | __swab64s(p); | |
127 | #endif | |
128 | } | |
129 | ||
130 | static inline void __be16_to_cpus(__u16 *p) | |
131 | { | |
132 | #ifdef __LITTLE_ENDIAN | |
133 | __swab16s(p); | |
134 | #endif | |
135 | } | |
136 | ||
137 | static inline void __cpu_to_be16s(__u16 *p) | |
138 | { | |
139 | #ifdef __LITTLE_ENDIAN | |
140 | __swab16s(p); | |
141 | #endif | |
142 | } | |
143 | ||
144 | static inline void __be32_to_cpus(__u32 *p) | |
145 | { | |
146 | #ifdef __LITTLE_ENDIAN | |
147 | __swab32s(p); | |
148 | #endif | |
149 | } | |
150 | ||
151 | static inline void __cpu_to_be32s(__u32 *p) | |
152 | { | |
153 | #ifdef __LITTLE_ENDIAN | |
154 | __swab32s(p); | |
155 | #endif | |
156 | } | |
157 | ||
158 | static inline void __be64_to_cpus(__u64 *p) | |
159 | { | |
160 | #ifdef __LITTLE_ENDIAN | |
161 | __swab64s(p); | |
162 | #endif | |
163 | } | |
164 | ||
165 | static inline void __cpu_to_be64s(__u64 *p) | |
166 | { | |
167 | #ifdef __LITTLE_ENDIAN | |
168 | __swab64s(p); | |
169 | #endif | |
170 | } | |
171 | ||
172 | static inline __u16 __le16_to_cpup(const __le16 *p) | |
173 | { | |
174 | #ifdef __LITTLE_ENDIAN | |
175 | return (__force __u16)*p; | |
176 | #else | |
177 | return __swab16p((__force __u16 *)p); | |
178 | #endif | |
179 | } | |
180 | ||
181 | static inline __u32 __le32_to_cpup(const __le32 *p) | |
182 | { | |
183 | #ifdef __LITTLE_ENDIAN | |
184 | return (__force __u32)*p; | |
185 | #else | |
186 | return __swab32p((__force __u32 *)p); | |
187 | #endif | |
188 | } | |
189 | ||
190 | static inline __u64 __le64_to_cpup(const __le64 *p) | |
191 | { | |
192 | #ifdef __LITTLE_ENDIAN | |
193 | return (__force __u64)*p; | |
194 | #else | |
195 | return __swab64p((__force __u64 *)p); | |
196 | #endif | |
197 | } | |
198 | ||
199 | static inline __le16 __cpu_to_le16p(const __u16 *p) | |
200 | { | |
201 | #ifdef __LITTLE_ENDIAN | |
202 | return (__force __le16)*p; | |
203 | #else | |
204 | return (__force __le16)__swab16p(p); | |
205 | #endif | |
206 | } | |
207 | ||
208 | static inline __le32 __cpu_to_le32p(const __u32 *p) | |
209 | { | |
210 | #ifdef __LITTLE_ENDIAN | |
211 | return (__force __le32)*p; | |
212 | #else | |
213 | return (__force __le32)__swab32p(p); | |
214 | #endif | |
215 | } | |
216 | ||
217 | static inline __le64 __cpu_to_le64p(const __u64 *p) | |
218 | { | |
219 | #ifdef __LITTLE_ENDIAN | |
220 | return (__force __le64)*p; | |
221 | #else | |
222 | return (__force __le64)__swab64p(p); | |
223 | #endif | |
224 | } | |
225 | ||
226 | static inline __u16 __be16_to_cpup(const __be16 *p) | |
227 | { | |
228 | #ifdef __BIG_ENDIAN | |
229 | return (__force __u16)*p; | |
230 | #else | |
231 | return __swab16p((__force __u16 *)p); | |
232 | #endif | |
233 | } | |
234 | ||
235 | static inline __u32 __be32_to_cpup(const __be32 *p) | |
236 | { | |
237 | #ifdef __BIG_ENDIAN | |
238 | return (__force __u32)*p; | |
239 | #else | |
240 | return __swab32p((__force __u32 *)p); | |
241 | #endif | |
242 | } | |
243 | ||
244 | static inline __u64 __be64_to_cpup(const __be64 *p) | |
245 | { | |
246 | #ifdef __BIG_ENDIAN | |
247 | return (__force __u64)*p; | |
248 | #else | |
249 | return __swab64p((__force __u64 *)p); | |
250 | #endif | |
251 | } | |
252 | ||
253 | static inline __be16 __cpu_to_be16p(const __u16 *p) | |
254 | { | |
255 | #ifdef __BIG_ENDIAN | |
256 | return (__force __be16)*p; | |
257 | #else | |
258 | return (__force __be16)__swab16p(p); | |
259 | #endif | |
260 | } | |
261 | ||
262 | static inline __be32 __cpu_to_be32p(const __u32 *p) | |
263 | { | |
264 | #ifdef __BIG_ENDIAN | |
265 | return (__force __be32)*p; | |
266 | #else | |
267 | return (__force __be32)__swab32p(p); | |
268 | #endif | |
269 | } | |
270 | ||
271 | static inline __be64 __cpu_to_be64p(const __u64 *p) | |
272 | { | |
273 | #ifdef __BIG_ENDIAN | |
274 | return (__force __be64)*p; | |
275 | #else | |
276 | return (__force __be64)__swab64p(p); | |
277 | #endif | |
278 | } | |
279 | ||
280 | #ifdef __KERNEL__ | |
281 | ||
282 | # define le16_to_cpu __le16_to_cpu | |
283 | # define le32_to_cpu __le32_to_cpu | |
284 | # define le64_to_cpu __le64_to_cpu | |
285 | # define be16_to_cpu __be16_to_cpu | |
286 | # define be32_to_cpu __be32_to_cpu | |
287 | # define be64_to_cpu __be64_to_cpu | |
288 | # define cpu_to_le16 __cpu_to_le16 | |
289 | # define cpu_to_le32 __cpu_to_le32 | |
290 | # define cpu_to_le64 __cpu_to_le64 | |
291 | # define cpu_to_be16 __cpu_to_be16 | |
292 | # define cpu_to_be32 __cpu_to_be32 | |
293 | # define cpu_to_be64 __cpu_to_be64 | |
294 | ||
295 | # define le16_to_cpup __le16_to_cpup | |
296 | # define le32_to_cpup __le32_to_cpup | |
297 | # define le64_to_cpup __le64_to_cpup | |
298 | # define be16_to_cpup __be16_to_cpup | |
299 | # define be32_to_cpup __be32_to_cpup | |
300 | # define be64_to_cpup __be64_to_cpup | |
301 | # define cpu_to_le16p __cpu_to_le16p | |
302 | # define cpu_to_le32p __cpu_to_le32p | |
303 | # define cpu_to_le64p __cpu_to_le64p | |
304 | # define cpu_to_be16p __cpu_to_be16p | |
305 | # define cpu_to_be32p __cpu_to_be32p | |
306 | # define cpu_to_be64p __cpu_to_be64p | |
307 | ||
308 | # define le16_to_cpus __le16_to_cpus | |
309 | # define le32_to_cpus __le32_to_cpus | |
310 | # define le64_to_cpus __le64_to_cpus | |
311 | # define be16_to_cpus __be16_to_cpus | |
312 | # define be32_to_cpus __be32_to_cpus | |
313 | # define be64_to_cpus __be64_to_cpus | |
314 | # define cpu_to_le16s __cpu_to_le16s | |
315 | # define cpu_to_le32s __cpu_to_le32s | |
316 | # define cpu_to_le64s __cpu_to_le64s | |
317 | # define cpu_to_be16s __cpu_to_be16s | |
318 | # define cpu_to_be32s __cpu_to_be32s | |
319 | # define cpu_to_be64s __cpu_to_be64s | |
320 | ||
321 | /* | |
322 | * They have to be macros in order to do the constant folding | |
323 | * correctly - if the argument passed into a inline function | |
324 | * it is no longer constant according to gcc.. | |
325 | */ | |
326 | # undef ntohl | |
327 | # undef ntohs | |
328 | # undef htonl | |
329 | # undef htons | |
330 | ||
331 | # define ___htonl(x) __cpu_to_be32(x) | |
332 | # define ___htons(x) __cpu_to_be16(x) | |
333 | # define ___ntohl(x) __be32_to_cpu(x) | |
334 | # define ___ntohs(x) __be16_to_cpu(x) | |
335 | ||
336 | # define htonl(x) ___htonl(x) | |
337 | # define ntohl(x) ___ntohl(x) | |
338 | # define htons(x) ___htons(x) | |
339 | # define ntohs(x) ___ntohs(x) | |
340 | ||
341 | static inline void le16_add_cpu(__le16 *var, u16 val) | |
342 | { | |
343 | *var = cpu_to_le16(le16_to_cpup(var) + val); | |
344 | } | |
345 | ||
346 | static inline void le32_add_cpu(__le32 *var, u32 val) | |
347 | { | |
348 | *var = cpu_to_le32(le32_to_cpup(var) + val); | |
349 | } | |
350 | ||
351 | static inline void le64_add_cpu(__le64 *var, u64 val) | |
352 | { | |
353 | *var = cpu_to_le64(le64_to_cpup(var) + val); | |
354 | } | |
355 | ||
356 | static inline void be16_add_cpu(__be16 *var, u16 val) | |
357 | { | |
358 | *var = cpu_to_be16(be16_to_cpup(var) + val); | |
359 | } | |
360 | ||
361 | static inline void be32_add_cpu(__be32 *var, u32 val) | |
362 | { | |
363 | *var = cpu_to_be32(be32_to_cpup(var) + val); | |
364 | } | |
365 | ||
366 | static inline void be64_add_cpu(__be64 *var, u64 val) | |
367 | { | |
368 | *var = cpu_to_be64(be64_to_cpup(var) + val); | |
369 | } | |
370 | ||
371 | #endif /* __KERNEL__ */ | |
372 | #endif /* _LINUX_BYTEORDER_H */ |