4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/include/libcfs/libcfs_private.h
38 * Various defines for libcfs.
42 #ifndef __LIBCFS_PRIVATE_H__
43 #define __LIBCFS_PRIVATE_H__
45 #ifndef DEBUG_SUBSYSTEM
46 # define DEBUG_SUBSYSTEM S_UNDEFINED
50 * When this is on, LASSERT macro includes check for assignment used instead
51 * of equality check, but doesn't have unlikely(). Turn this on from time to
52 * time to make test-builds. This shouldn't be on for production release.
54 #define LASSERT_CHECKED (0)
56 #define LASSERTF(cond, fmt, ...) \
58 if (unlikely(!(cond))) { \
59 LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \
60 libcfs_debug_msg(&__msg_data, \
61 "ASSERTION( %s ) failed: " fmt, #cond, \
63 lbug_with_loc(&__msg_data); \
67 #define LASSERT(cond) LASSERTF(cond, "\n")
69 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
71 * This is for more expensive checks that one doesn't want to be enabled all
72 * the time. LINVRNT() has to be explicitly enabled by
73 * CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK option.
75 # define LINVRNT(exp) LASSERT(exp)
77 # define LINVRNT(exp) ((void)sizeof !!(exp))
80 #define KLASSERT(e) LASSERT(e)
82 void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *);
86 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \
87 lbug_with_loc(&msgdata); \
90 #ifndef LIBCFS_VMALLOC_SIZE
91 #define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */
94 #define LIBCFS_ALLOC_PRE(size, mask) \
96 LASSERT(!in_interrupt() || \
97 ((size) <= LIBCFS_VMALLOC_SIZE && \
98 !gfpflags_allow_blocking(mask))); \
101 #define LIBCFS_ALLOC_POST(ptr, size) \
103 if (unlikely((ptr) == NULL)) { \
104 CERROR("LNET: out of memory at %s:%d (tried to alloc '" \
105 #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \
107 memset((ptr), 0, (size)); \
112 * allocate memory with GFP flags @mask
114 #define LIBCFS_ALLOC_GFP(ptr, size, mask) \
116 LIBCFS_ALLOC_PRE((size), (mask)); \
117 (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
118 kmalloc((size), (mask)) : vmalloc(size); \
119 LIBCFS_ALLOC_POST((ptr), (size)); \
125 #define LIBCFS_ALLOC(ptr, size) \
126 LIBCFS_ALLOC_GFP(ptr, size, GFP_NOFS)
129 * non-sleeping allocator
131 #define LIBCFS_ALLOC_ATOMIC(ptr, size) \
132 LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC)
135 * allocate memory for specified CPU partition
136 * \a cptab != NULL, \a cpt is CPU partition id of \a cptab
137 * \a cptab == NULL, \a cpt is HW NUMA node id
139 #define LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, mask) \
141 LIBCFS_ALLOC_PRE((size), (mask)); \
142 (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
143 kmalloc_node((size), (mask), cfs_cpt_spread_node(cptab, cpt)) :\
144 vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); \
145 LIBCFS_ALLOC_POST((ptr), (size)); \
148 /** default numa allocator */
149 #define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \
150 LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, GFP_NOFS)
152 #define LIBCFS_FREE(ptr, size) \
155 if (unlikely((ptr) == NULL)) { \
156 CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
157 "%s:%d\n", s, __FILE__, __LINE__); \
160 if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \
166 /******************************************************************************/
168 /* htonl hack - either this, or compile with -O2. Stupid byteorder/generic.h */
169 #if defined(__GNUC__) && (__GNUC__ >= 2) && !defined(__OPTIMIZE__)
170 #define ___htonl(x) __cpu_to_be32(x)
171 #define ___htons(x) __cpu_to_be16(x)
172 #define ___ntohl(x) __be32_to_cpu(x)
173 #define ___ntohs(x) __be16_to_cpu(x)
174 #define htonl(x) ___htonl(x)
175 #define ntohl(x) ___ntohl(x)
176 #define htons(x) ___htons(x)
177 #define ntohs(x) ___ntohs(x)
180 void libcfs_run_upcall(char **argv);
181 void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
182 void libcfs_debug_dumplog(void);
183 int libcfs_debug_init(unsigned long bufsize);
184 int libcfs_debug_cleanup(void);
185 int libcfs_debug_clear_buffer(void);
186 int libcfs_debug_mark_buffer(const char *text);
188 void libcfs_debug_set_level(unsigned int debug_level);
191 * allocate per-cpu-partition data, returned value is an array of pointers,
192 * variable can be indexed by CPU ID.
193 * cptable != NULL: size of array is number of CPU partitions
194 * cptable == NULL: size of array is number of HW cores
196 void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
198 * destroy per-cpu-partition variable
200 void cfs_percpt_free(void *vars);
201 int cfs_percpt_number(void *vars);
202 void *cfs_percpt_current(void *vars);
203 void *cfs_percpt_index(void *vars, int idx);
205 #define cfs_percpt_for_each(var, i, vars) \
206 for (i = 0; i < cfs_percpt_number(vars) && \
207 ((var) = (vars)[i]) != NULL; i++)
210 * allocate a variable array, returned value is an array of pointers.
211 * Caller can specify length of array by count.
213 void *cfs_array_alloc(int count, unsigned int size);
214 void cfs_array_free(void *vars);
216 #define LASSERT_ATOMIC_ENABLED (1)
218 #if LASSERT_ATOMIC_ENABLED
220 /** assert value of @a is equal to @v */
221 #define LASSERT_ATOMIC_EQ(a, v) \
223 LASSERTF(atomic_read(a) == v, \
224 "value: %d\n", atomic_read((a))); \
227 /** assert value of @a is unequal to @v */
228 #define LASSERT_ATOMIC_NE(a, v) \
230 LASSERTF(atomic_read(a) != v, \
231 "value: %d\n", atomic_read((a))); \
234 /** assert value of @a is little than @v */
235 #define LASSERT_ATOMIC_LT(a, v) \
237 LASSERTF(atomic_read(a) < v, \
238 "value: %d\n", atomic_read((a))); \
241 /** assert value of @a is little/equal to @v */
242 #define LASSERT_ATOMIC_LE(a, v) \
244 LASSERTF(atomic_read(a) <= v, \
245 "value: %d\n", atomic_read((a))); \
248 /** assert value of @a is great than @v */
249 #define LASSERT_ATOMIC_GT(a, v) \
251 LASSERTF(atomic_read(a) > v, \
252 "value: %d\n", atomic_read((a))); \
255 /** assert value of @a is great/equal to @v */
256 #define LASSERT_ATOMIC_GE(a, v) \
258 LASSERTF(atomic_read(a) >= v, \
259 "value: %d\n", atomic_read((a))); \
262 /** assert value of @a is great than @v1 and little than @v2 */
263 #define LASSERT_ATOMIC_GT_LT(a, v1, v2) \
265 int __v = atomic_read(a); \
266 LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \
269 /** assert value of @a is great than @v1 and little/equal to @v2 */
270 #define LASSERT_ATOMIC_GT_LE(a, v1, v2) \
272 int __v = atomic_read(a); \
273 LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \
276 /** assert value of @a is great/equal to @v1 and little than @v2 */
277 #define LASSERT_ATOMIC_GE_LT(a, v1, v2) \
279 int __v = atomic_read(a); \
280 LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \
283 /** assert value of @a is great/equal to @v1 and little/equal to @v2 */
284 #define LASSERT_ATOMIC_GE_LE(a, v1, v2) \
286 int __v = atomic_read(a); \
287 LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \
290 #else /* !LASSERT_ATOMIC_ENABLED */
292 #define LASSERT_ATOMIC_EQ(a, v) do {} while (0)
293 #define LASSERT_ATOMIC_NE(a, v) do {} while (0)
294 #define LASSERT_ATOMIC_LT(a, v) do {} while (0)
295 #define LASSERT_ATOMIC_LE(a, v) do {} while (0)
296 #define LASSERT_ATOMIC_GT(a, v) do {} while (0)
297 #define LASSERT_ATOMIC_GE(a, v) do {} while (0)
298 #define LASSERT_ATOMIC_GT_LT(a, v1, v2) do {} while (0)
299 #define LASSERT_ATOMIC_GT_LE(a, v1, v2) do {} while (0)
300 #define LASSERT_ATOMIC_GE_LT(a, v1, v2) do {} while (0)
301 #define LASSERT_ATOMIC_GE_LE(a, v1, v2) do {} while (0)
303 #endif /* LASSERT_ATOMIC_ENABLED */
305 #define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0)
306 #define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0)
308 #define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr)))
309 #define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)))
312 * percpu partition lock
314 * There are some use-cases like this in Lustre:
315 * . each CPU partition has it's own private data which is frequently changed,
316 * and mostly by the local CPU partition.
317 * . all CPU partitions share some global data, these data are rarely changed.
319 * LNet is typical example.
320 * CPU partition lock is designed for this kind of use-cases:
321 * . each CPU partition has it's own private lock
322 * . change on private data just needs to take the private lock
323 * . read on shared data just needs to take _any_ of private locks
324 * . change on shared data needs to take _all_ private locks,
325 * which is slow and should be really rare.
329 CFS_PERCPT_LOCK_EX = -1, /* negative */
332 struct cfs_percpt_lock {
333 /* cpu-partition-table for this lock */
334 struct cfs_cpt_table *pcl_cptab;
335 /* exclusively locked */
336 unsigned int pcl_locked;
337 /* private lock table */
338 spinlock_t **pcl_locks;
341 /* return number of private locks */
343 cfs_percpt_lock_num(struct cfs_percpt_lock *pcl)
345 return cfs_cpt_number(pcl->pcl_cptab);
349 * create a cpu-partition lock based on CPU partition table \a cptab,
350 * each private lock has extra \a psize bytes padding data
352 struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab);
353 /* destroy a cpu-partition lock */
354 void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
356 /* lock private lock \a index of \a pcl */
357 void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
358 /* unlock private lock \a index of \a pcl */
359 void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
360 /* create percpt (atomic) refcount based on @cptab */
361 atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
362 /* destroy percpt refcount */
363 void cfs_percpt_atomic_free(atomic_t **refs);
364 /* return sum of all percpu refs */
365 int cfs_percpt_atomic_summary(atomic_t **refs);
367 /** Compile-time assertion.
369 * Check an invariant described by a constant expression at compile time by
370 * forcing a compiler error if it does not hold. \a cond must be a constant
371 * expression as defined by the ISO C Standard:
373 * 6.8.4.2 The switch statement
375 * [#3] The expression of each case label shall be an integer
376 * constant expression and no two of the case constant
377 * expressions in the same switch statement shall have the same
378 * value after conversion...
381 #define CLASSERT(cond) do {switch (42) {case (cond): case 0: break; } } while (0)
383 /* max value for numeric network address */
384 #define MAX_NUMERIC_VALUE 0xffffffff
387 #define ergo(a, b) (!(a) || (b))
388 /* logical equivalence */
389 #define equi(a, b) (!!(a) == !!(b))
391 /* --------------------------------------------------------------------
393 * Support for temporary event tracing with minimal Heisenberg effect.
394 * -------------------------------------------------------------------- */
396 struct libcfs_device_userstate {
397 int ldu_memhog_pages;
398 struct page *ldu_memhog_root_page;
401 #define MKSTR(ptr) ((ptr)) ? (ptr) : ""
403 static inline int cfs_size_round4(int val)
405 return (val + 3) & (~0x3);
408 #ifndef HAVE_CFS_SIZE_ROUND
409 static inline int cfs_size_round(int val)
411 return (val + 7) & (~0x7);
414 #define HAVE_CFS_SIZE_ROUND
417 static inline int cfs_size_round16(int val)
419 return (val + 0xf) & (~0xf);
422 static inline int cfs_size_round32(int val)
424 return (val + 0x1f) & (~0x1f);
427 static inline int cfs_size_round0(int val)
431 return (val + 1 + 7) & (~0x7);
434 static inline size_t cfs_round_strlen(char *fset)
436 return (size_t)cfs_size_round((int)strlen(fset) + 1);
439 #define LOGL(var, len, ptr) \
442 memcpy((char *)ptr, (const char *)var, len); \
443 ptr += cfs_size_round(len); \
446 #define LOGU(var, len, ptr) \
449 memcpy((char *)var, (const char *)ptr, len); \
450 ptr += cfs_size_round(len); \
453 #define LOGL0(var, len, ptr) \
457 memcpy((char *)ptr, (const char *)var, len); \
458 *((char *)(ptr) + len) = 0; \
459 ptr += cfs_size_round(len + 1); \