Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf | |
19 | * | |
20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
21 | * CA 95054 USA or visit www.sun.com if you need additional information or | |
22 | * have any questions. | |
23 | * | |
24 | * GPL HEADER END | |
25 | */ | |
26 | /* | |
27 | * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. | |
28 | * Use is subject to license terms. | |
29 | * | |
30 | * Copyright (c) 2011, 2012, Intel Corporation. | |
31 | */ | |
32 | /* | |
33 | * This file is part of Lustre, http://www.lustre.org/ | |
34 | * Lustre is a trademark of Sun Microsystems, Inc. | |
35 | * | |
36 | * libcfs/include/libcfs/libcfs_private.h | |
37 | * | |
38 | * Various defines for libcfs. | |
39 | * | |
40 | */ | |
41 | ||
42 | #ifndef __LIBCFS_PRIVATE_H__ | |
43 | #define __LIBCFS_PRIVATE_H__ | |
44 | ||
d7e09d03 PT |
45 | #ifndef DEBUG_SUBSYSTEM |
46 | # define DEBUG_SUBSYSTEM S_UNDEFINED | |
47 | #endif | |
48 | ||
d7e09d03 PT |
49 | /* |
50 | * When this is on, LASSERT macro includes check for assignment used instead | |
51 | * of equality check, but doesn't have unlikely(). Turn this on from time to | |
52 | * time to make test-builds. This shouldn't be on for production release. | |
53 | */ | |
54 | #define LASSERT_CHECKED (0) | |
55 | ||
d7e09d03 PT |
56 | #define LASSERTF(cond, fmt, ...) \ |
57 | do { \ | |
58 | if (unlikely(!(cond))) { \ | |
59 | LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \ | |
60 | libcfs_debug_msg(&__msg_data, \ | |
61 | "ASSERTION( %s ) failed: " fmt, #cond, \ | |
62 | ## __VA_ARGS__); \ | |
63 | lbug_with_loc(&__msg_data); \ | |
64 | } \ | |
65 | } while (0) | |
66 | ||
67 | #define LASSERT(cond) LASSERTF(cond, "\n") | |
68 | ||
4b5b4c72 PT |
69 | #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK |
70 | /** | |
71 | * This is for more expensive checks that one doesn't want to be enabled all | |
72 | * the time. LINVRNT() has to be explicitly enabled by | |
73 | * CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK option. | |
74 | */ | |
75 | # define LINVRNT(exp) LASSERT(exp) | |
76 | #else | |
a393fd54 | 77 | # define LINVRNT(exp) ((void)sizeof !!(exp)) |
4b5b4c72 | 78 | #endif |
d7e09d03 PT |
79 | |
80 | #define KLASSERT(e) LASSERT(e) | |
81 | ||
2877f245 | 82 | void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *); |
d7e09d03 PT |
83 | |
84 | #define LBUG() \ | |
85 | do { \ | |
86 | LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \ | |
87 | lbug_with_loc(&msgdata); \ | |
a393fd54 | 88 | } while (0) |
d7e09d03 | 89 | |
d7e09d03 PT |
90 | #ifndef LIBCFS_VMALLOC_SIZE |
91 | #define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */ | |
92 | #endif | |
93 | ||
94 | #define LIBCFS_ALLOC_PRE(size, mask) \ | |
95 | do { \ | |
96 | LASSERT(!in_interrupt() || \ | |
97 | ((size) <= LIBCFS_VMALLOC_SIZE && \ | |
d0164adc | 98 | !gfpflags_allow_blocking(mask))); \ |
d7e09d03 PT |
99 | } while (0) |
100 | ||
101 | #define LIBCFS_ALLOC_POST(ptr, size) \ | |
102 | do { \ | |
103 | if (unlikely((ptr) == NULL)) { \ | |
104 | CERROR("LNET: out of memory at %s:%d (tried to alloc '" \ | |
105 | #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \ | |
d7e09d03 PT |
106 | } else { \ |
107 | memset((ptr), 0, (size)); \ | |
323b0b2c | 108 | } \ |
d7e09d03 PT |
109 | } while (0) |
110 | ||
111 | /** | |
112 | * allocate memory with GFP flags @mask | |
113 | */ | |
114 | #define LIBCFS_ALLOC_GFP(ptr, size, mask) \ | |
115 | do { \ | |
116 | LIBCFS_ALLOC_PRE((size), (mask)); \ | |
117 | (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \ | |
118 | kmalloc((size), (mask)) : vmalloc(size); \ | |
119 | LIBCFS_ALLOC_POST((ptr), (size)); \ | |
120 | } while (0) | |
121 | ||
122 | /** | |
123 | * default allocator | |
124 | */ | |
125 | #define LIBCFS_ALLOC(ptr, size) \ | |
0be19afa | 126 | LIBCFS_ALLOC_GFP(ptr, size, GFP_NOFS) |
d7e09d03 PT |
127 | |
128 | /** | |
129 | * non-sleeping allocator | |
130 | */ | |
131 | #define LIBCFS_ALLOC_ATOMIC(ptr, size) \ | |
132 | LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC) | |
133 | ||
134 | /** | |
135 | * allocate memory for specified CPU partition | |
136 | * \a cptab != NULL, \a cpt is CPU partition id of \a cptab | |
137 | * \a cptab == NULL, \a cpt is HW NUMA node id | |
138 | */ | |
139 | #define LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, mask) \ | |
140 | do { \ | |
141 | LIBCFS_ALLOC_PRE((size), (mask)); \ | |
142 | (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \ | |
49c02a75 PT |
143 | kmalloc_node((size), (mask), cfs_cpt_spread_node(cptab, cpt)) :\ |
144 | vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); \ | |
d7e09d03 PT |
145 | LIBCFS_ALLOC_POST((ptr), (size)); \ |
146 | } while (0) | |
147 | ||
148 | /** default numa allocator */ | |
149 | #define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \ | |
0be19afa | 150 | LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, GFP_NOFS) |
d7e09d03 PT |
151 | |
152 | #define LIBCFS_FREE(ptr, size) \ | |
153 | do { \ | |
d7e09d03 PT |
154 | if (unlikely((ptr) == NULL)) { \ |
155 | CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \ | |
1d5cfdb0 | 156 | "%s:%d\n", (int)(size), __FILE__, __LINE__); \ |
d7e09d03 PT |
157 | break; \ |
158 | } \ | |
1d5cfdb0 | 159 | kvfree(ptr); \ |
d7e09d03 PT |
160 | } while (0) |
161 | ||
162 | /******************************************************************************/ | |
163 | ||
164 | /* htonl hack - either this, or compile with -O2. Stupid byteorder/generic.h */ | |
165 | #if defined(__GNUC__) && (__GNUC__ >= 2) && !defined(__OPTIMIZE__) | |
166 | #define ___htonl(x) __cpu_to_be32(x) | |
167 | #define ___htons(x) __cpu_to_be16(x) | |
168 | #define ___ntohl(x) __be32_to_cpu(x) | |
169 | #define ___ntohs(x) __be16_to_cpu(x) | |
170 | #define htonl(x) ___htonl(x) | |
171 | #define ntohl(x) ___ntohl(x) | |
172 | #define htons(x) ___htons(x) | |
173 | #define ntohs(x) ___ntohs(x) | |
174 | #endif | |
175 | ||
d7e09d03 PT |
176 | void libcfs_run_upcall(char **argv); |
177 | void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *); | |
178 | void libcfs_debug_dumplog(void); | |
179 | int libcfs_debug_init(unsigned long bufsize); | |
180 | int libcfs_debug_cleanup(void); | |
181 | int libcfs_debug_clear_buffer(void); | |
182 | int libcfs_debug_mark_buffer(const char *text); | |
183 | ||
d7e09d03 PT |
184 | /* |
185 | * allocate per-cpu-partition data, returned value is an array of pointers, | |
186 | * variable can be indexed by CPU ID. | |
187 | * cptable != NULL: size of array is number of CPU partitions | |
188 | * cptable == NULL: size of array is number of HW cores | |
189 | */ | |
190 | void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size); | |
191 | /* | |
253d50eb | 192 | * destroy per-cpu-partition variable |
d7e09d03 PT |
193 | */ |
194 | void cfs_percpt_free(void *vars); | |
195 | int cfs_percpt_number(void *vars); | |
196 | void *cfs_percpt_current(void *vars); | |
197 | void *cfs_percpt_index(void *vars, int idx); | |
198 | ||
199 | #define cfs_percpt_for_each(var, i, vars) \ | |
200 | for (i = 0; i < cfs_percpt_number(vars) && \ | |
201 | ((var) = (vars)[i]) != NULL; i++) | |
202 | ||
203 | /* | |
204 | * allocate a variable array, returned value is an array of pointers. | |
205 | * Caller can specify length of array by count. | |
206 | */ | |
207 | void *cfs_array_alloc(int count, unsigned int size); | |
208 | void cfs_array_free(void *vars); | |
209 | ||
210 | #define LASSERT_ATOMIC_ENABLED (1) | |
211 | ||
212 | #if LASSERT_ATOMIC_ENABLED | |
213 | ||
214 | /** assert value of @a is equal to @v */ | |
215 | #define LASSERT_ATOMIC_EQ(a, v) \ | |
216 | do { \ | |
217 | LASSERTF(atomic_read(a) == v, \ | |
218 | "value: %d\n", atomic_read((a))); \ | |
219 | } while (0) | |
220 | ||
221 | /** assert value of @a is unequal to @v */ | |
222 | #define LASSERT_ATOMIC_NE(a, v) \ | |
223 | do { \ | |
224 | LASSERTF(atomic_read(a) != v, \ | |
225 | "value: %d\n", atomic_read((a))); \ | |
226 | } while (0) | |
227 | ||
228 | /** assert value of @a is little than @v */ | |
229 | #define LASSERT_ATOMIC_LT(a, v) \ | |
230 | do { \ | |
231 | LASSERTF(atomic_read(a) < v, \ | |
232 | "value: %d\n", atomic_read((a))); \ | |
233 | } while (0) | |
234 | ||
235 | /** assert value of @a is little/equal to @v */ | |
236 | #define LASSERT_ATOMIC_LE(a, v) \ | |
237 | do { \ | |
238 | LASSERTF(atomic_read(a) <= v, \ | |
239 | "value: %d\n", atomic_read((a))); \ | |
240 | } while (0) | |
241 | ||
242 | /** assert value of @a is great than @v */ | |
243 | #define LASSERT_ATOMIC_GT(a, v) \ | |
244 | do { \ | |
245 | LASSERTF(atomic_read(a) > v, \ | |
246 | "value: %d\n", atomic_read((a))); \ | |
247 | } while (0) | |
248 | ||
249 | /** assert value of @a is great/equal to @v */ | |
250 | #define LASSERT_ATOMIC_GE(a, v) \ | |
251 | do { \ | |
252 | LASSERTF(atomic_read(a) >= v, \ | |
253 | "value: %d\n", atomic_read((a))); \ | |
254 | } while (0) | |
255 | ||
256 | /** assert value of @a is great than @v1 and little than @v2 */ | |
257 | #define LASSERT_ATOMIC_GT_LT(a, v1, v2) \ | |
258 | do { \ | |
259 | int __v = atomic_read(a); \ | |
260 | LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \ | |
261 | } while (0) | |
262 | ||
263 | /** assert value of @a is great than @v1 and little/equal to @v2 */ | |
264 | #define LASSERT_ATOMIC_GT_LE(a, v1, v2) \ | |
265 | do { \ | |
266 | int __v = atomic_read(a); \ | |
267 | LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \ | |
268 | } while (0) | |
269 | ||
270 | /** assert value of @a is great/equal to @v1 and little than @v2 */ | |
271 | #define LASSERT_ATOMIC_GE_LT(a, v1, v2) \ | |
272 | do { \ | |
273 | int __v = atomic_read(a); \ | |
274 | LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \ | |
275 | } while (0) | |
276 | ||
277 | /** assert value of @a is great/equal to @v1 and little/equal to @v2 */ | |
278 | #define LASSERT_ATOMIC_GE_LE(a, v1, v2) \ | |
279 | do { \ | |
280 | int __v = atomic_read(a); \ | |
281 | LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \ | |
282 | } while (0) | |
283 | ||
284 | #else /* !LASSERT_ATOMIC_ENABLED */ | |
285 | ||
286 | #define LASSERT_ATOMIC_EQ(a, v) do {} while (0) | |
287 | #define LASSERT_ATOMIC_NE(a, v) do {} while (0) | |
288 | #define LASSERT_ATOMIC_LT(a, v) do {} while (0) | |
289 | #define LASSERT_ATOMIC_LE(a, v) do {} while (0) | |
290 | #define LASSERT_ATOMIC_GT(a, v) do {} while (0) | |
291 | #define LASSERT_ATOMIC_GE(a, v) do {} while (0) | |
292 | #define LASSERT_ATOMIC_GT_LT(a, v1, v2) do {} while (0) | |
293 | #define LASSERT_ATOMIC_GT_LE(a, v1, v2) do {} while (0) | |
294 | #define LASSERT_ATOMIC_GE_LT(a, v1, v2) do {} while (0) | |
295 | #define LASSERT_ATOMIC_GE_LE(a, v1, v2) do {} while (0) | |
296 | ||
297 | #endif /* LASSERT_ATOMIC_ENABLED */ | |
298 | ||
299 | #define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0) | |
300 | #define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0) | |
301 | ||
06133e80 MR |
302 | #define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr))) |
303 | #define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr))) | |
d7e09d03 PT |
304 | |
305 | /* | |
306 | * percpu partition lock | |
307 | * | |
308 | * There are some use-cases like this in Lustre: | |
309 | * . each CPU partition has it's own private data which is frequently changed, | |
310 | * and mostly by the local CPU partition. | |
311 | * . all CPU partitions share some global data, these data are rarely changed. | |
312 | * | |
313 | * LNet is typical example. | |
314 | * CPU partition lock is designed for this kind of use-cases: | |
315 | * . each CPU partition has it's own private lock | |
316 | * . change on private data just needs to take the private lock | |
317 | * . read on shared data just needs to take _any_ of private locks | |
318 | * . change on shared data needs to take _all_ private locks, | |
319 | * which is slow and should be really rare. | |
320 | */ | |
321 | ||
322 | enum { | |
323 | CFS_PERCPT_LOCK_EX = -1, /* negative */ | |
324 | }; | |
325 | ||
d7e09d03 PT |
326 | struct cfs_percpt_lock { |
327 | /* cpu-partition-table for this lock */ | |
328 | struct cfs_cpt_table *pcl_cptab; | |
329 | /* exclusively locked */ | |
330 | unsigned int pcl_locked; | |
331 | /* private lock table */ | |
332 | spinlock_t **pcl_locks; | |
333 | }; | |
334 | ||
335 | /* return number of private locks */ | |
336 | static inline int | |
337 | cfs_percpt_lock_num(struct cfs_percpt_lock *pcl) | |
338 | { | |
339 | return cfs_cpt_number(pcl->pcl_cptab); | |
340 | } | |
341 | ||
d7e09d03 PT |
342 | /* |
343 | * create a cpu-partition lock based on CPU partition table \a cptab, | |
344 | * each private lock has extra \a psize bytes padding data | |
345 | */ | |
346 | struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab); | |
347 | /* destroy a cpu-partition lock */ | |
348 | void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl); | |
349 | ||
350 | /* lock private lock \a index of \a pcl */ | |
351 | void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index); | |
352 | /* unlock private lock \a index of \a pcl */ | |
353 | void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index); | |
354 | /* create percpt (atomic) refcount based on @cptab */ | |
355 | atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val); | |
356 | /* destroy percpt refcount */ | |
357 | void cfs_percpt_atomic_free(atomic_t **refs); | |
358 | /* return sum of all percpu refs */ | |
359 | int cfs_percpt_atomic_summary(atomic_t **refs); | |
360 | ||
d7e09d03 PT |
361 | /** Compile-time assertion. |
362 | ||
363 | * Check an invariant described by a constant expression at compile time by | |
364 | * forcing a compiler error if it does not hold. \a cond must be a constant | |
365 | * expression as defined by the ISO C Standard: | |
366 | * | |
367 | * 6.8.4.2 The switch statement | |
368 | * .... | |
369 | * [#3] The expression of each case label shall be an integer | |
370 | * constant expression and no two of the case constant | |
371 | * expressions in the same switch statement shall have the same | |
372 | * value after conversion... | |
373 | * | |
374 | */ | |
a393fd54 | 375 | #define CLASSERT(cond) do {switch (42) {case (cond): case 0: break; } } while (0) |
d7e09d03 | 376 | |
d7e09d03 PT |
377 | /* max value for numeric network address */ |
378 | #define MAX_NUMERIC_VALUE 0xffffffff | |
379 | ||
380 | /* implication */ | |
381 | #define ergo(a, b) (!(a) || (b)) | |
382 | /* logical equivalence */ | |
383 | #define equi(a, b) (!!(a) == !!(b)) | |
384 | ||
d7e09d03 PT |
385 | /* -------------------------------------------------------------------- |
386 | * Light-weight trace | |
387 | * Support for temporary event tracing with minimal Heisenberg effect. | |
d7e09d03 PT |
388 | * -------------------------------------------------------------------- */ |
389 | ||
75c49d40 | 390 | struct libcfs_device_userstate { |
d7e09d03 PT |
391 | int ldu_memhog_pages; |
392 | struct page *ldu_memhog_root_page; | |
393 | }; | |
394 | ||
a393fd54 | 395 | #define MKSTR(ptr) ((ptr)) ? (ptr) : "" |
d7e09d03 | 396 | |
a393fd54 | 397 | static inline int cfs_size_round4(int val) |
d7e09d03 PT |
398 | { |
399 | return (val + 3) & (~0x3); | |
400 | } | |
401 | ||
402 | #ifndef HAVE_CFS_SIZE_ROUND | |
a393fd54 | 403 | static inline int cfs_size_round(int val) |
d7e09d03 PT |
404 | { |
405 | return (val + 7) & (~0x7); | |
406 | } | |
a393fd54 | 407 | |
d7e09d03 PT |
408 | #define HAVE_CFS_SIZE_ROUND |
409 | #endif | |
410 | ||
411 | static inline int cfs_size_round16(int val) | |
412 | { | |
413 | return (val + 0xf) & (~0xf); | |
414 | } | |
415 | ||
416 | static inline int cfs_size_round32(int val) | |
417 | { | |
418 | return (val + 0x1f) & (~0x1f); | |
419 | } | |
420 | ||
421 | static inline int cfs_size_round0(int val) | |
422 | { | |
423 | if (!val) | |
424 | return 0; | |
425 | return (val + 1 + 7) & (~0x7); | |
426 | } | |
427 | ||
428 | static inline size_t cfs_round_strlen(char *fset) | |
429 | { | |
430 | return (size_t)cfs_size_round((int)strlen(fset) + 1); | |
431 | } | |
432 | ||
a393fd54 | 433 | #define LOGL(var, len, ptr) \ |
d7e09d03 PT |
434 | do { \ |
435 | if (var) \ | |
436 | memcpy((char *)ptr, (const char *)var, len); \ | |
437 | ptr += cfs_size_round(len); \ | |
438 | } while (0) | |
439 | ||
a393fd54 | 440 | #define LOGU(var, len, ptr) \ |
d7e09d03 PT |
441 | do { \ |
442 | if (var) \ | |
443 | memcpy((char *)var, (const char *)ptr, len); \ | |
444 | ptr += cfs_size_round(len); \ | |
445 | } while (0) | |
446 | ||
a393fd54 | 447 | #define LOGL0(var, len, ptr) \ |
d7e09d03 PT |
448 | do { \ |
449 | if (!len) \ | |
450 | break; \ | |
451 | memcpy((char *)ptr, (const char *)var, len); \ | |
452 | *((char *)(ptr) + len) = 0; \ | |
453 | ptr += cfs_size_round(len + 1); \ | |
454 | } while (0) | |
455 | ||
d7e09d03 | 456 | #endif |