4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/include/libcfs/libcfs_private.h
38 * Various defines for libcfs.
42 #ifndef __LIBCFS_PRIVATE_H__
43 #define __LIBCFS_PRIVATE_H__
45 #ifndef DEBUG_SUBSYSTEM
46 # define DEBUG_SUBSYSTEM S_UNDEFINED
50 * When this is on, LASSERT macro includes check for assignment used instead
51 * of equality check, but doesn't have unlikely(). Turn this on from time to
52 * time to make test-builds. This shouldn't be on for production release.
54 #define LASSERT_CHECKED (0)
56 #define LASSERTF(cond, fmt, ...) \
58 if (unlikely(!(cond))) { \
59 LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \
60 libcfs_debug_msg(&__msg_data, \
61 "ASSERTION( %s ) failed: " fmt, #cond, \
63 lbug_with_loc(&__msg_data); \
67 #define LASSERT(cond) LASSERTF(cond, "\n")
69 #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
71 * This is for more expensive checks that one doesn't want to be enabled all
72 * the time. LINVRNT() has to be explicitly enabled by
73 * CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK option.
75 # define LINVRNT(exp) LASSERT(exp)
77 # define LINVRNT(exp) ((void)sizeof !!(exp))
80 #define KLASSERT(e) LASSERT(e)
82 void __noreturn lbug_with_loc(struct libcfs_debug_msg_data *);
86 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \
87 lbug_with_loc(&msgdata); \
90 #ifndef LIBCFS_VMALLOC_SIZE
91 #define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */
94 #define LIBCFS_ALLOC_PRE(size, mask) \
96 LASSERT(!in_interrupt() || \
97 ((size) <= LIBCFS_VMALLOC_SIZE && \
98 !gfpflags_allow_blocking(mask))); \
101 #define LIBCFS_ALLOC_POST(ptr, size) \
103 if (unlikely((ptr) == NULL)) { \
104 CERROR("LNET: out of memory at %s:%d (tried to alloc '" \
105 #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \
107 memset((ptr), 0, (size)); \
112 * allocate memory with GFP flags @mask
114 #define LIBCFS_ALLOC_GFP(ptr, size, mask) \
116 LIBCFS_ALLOC_PRE((size), (mask)); \
117 (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
118 kmalloc((size), (mask)) : vmalloc(size); \
119 LIBCFS_ALLOC_POST((ptr), (size)); \
125 #define LIBCFS_ALLOC(ptr, size) \
126 LIBCFS_ALLOC_GFP(ptr, size, GFP_NOFS)
129 * non-sleeping allocator
131 #define LIBCFS_ALLOC_ATOMIC(ptr, size) \
132 LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC)
135 * allocate memory for specified CPU partition
136 * \a cptab != NULL, \a cpt is CPU partition id of \a cptab
137 * \a cptab == NULL, \a cpt is HW NUMA node id
139 #define LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, mask) \
141 LIBCFS_ALLOC_PRE((size), (mask)); \
142 (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \
143 kmalloc_node((size), (mask), cfs_cpt_spread_node(cptab, cpt)) :\
144 vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); \
145 LIBCFS_ALLOC_POST((ptr), (size)); \
148 /** default numa allocator */
149 #define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \
150 LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, GFP_NOFS)
152 #define LIBCFS_FREE(ptr, size) \
154 if (unlikely((ptr) == NULL)) { \
155 CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \
156 "%s:%d\n", (int)(size), __FILE__, __LINE__); \
162 /******************************************************************************/
164 /* htonl hack - either this, or compile with -O2. Stupid byteorder/generic.h */
165 #if defined(__GNUC__) && (__GNUC__ >= 2) && !defined(__OPTIMIZE__)
166 #define ___htonl(x) __cpu_to_be32(x)
167 #define ___htons(x) __cpu_to_be16(x)
168 #define ___ntohl(x) __be32_to_cpu(x)
169 #define ___ntohs(x) __be16_to_cpu(x)
170 #define htonl(x) ___htonl(x)
171 #define ntohl(x) ___ntohl(x)
172 #define htons(x) ___htons(x)
173 #define ntohs(x) ___ntohs(x)
176 void libcfs_run_upcall(char **argv);
177 void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
178 void libcfs_debug_dumplog(void);
179 int libcfs_debug_init(unsigned long bufsize);
180 int libcfs_debug_cleanup(void);
181 int libcfs_debug_clear_buffer(void);
182 int libcfs_debug_mark_buffer(const char *text);
185 * allocate per-cpu-partition data, returned value is an array of pointers,
186 * variable can be indexed by CPU ID.
187 * cptable != NULL: size of array is number of CPU partitions
188 * cptable == NULL: size of array is number of HW cores
190 void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
192 * destroy per-cpu-partition variable
194 void cfs_percpt_free(void *vars);
195 int cfs_percpt_number(void *vars);
196 void *cfs_percpt_current(void *vars);
197 void *cfs_percpt_index(void *vars, int idx);
199 #define cfs_percpt_for_each(var, i, vars) \
200 for (i = 0; i < cfs_percpt_number(vars) && \
201 ((var) = (vars)[i]) != NULL; i++)
204 * allocate a variable array, returned value is an array of pointers.
205 * Caller can specify length of array by count.
207 void *cfs_array_alloc(int count, unsigned int size);
208 void cfs_array_free(void *vars);
210 #define LASSERT_ATOMIC_ENABLED (1)
212 #if LASSERT_ATOMIC_ENABLED
214 /** assert value of @a is equal to @v */
215 #define LASSERT_ATOMIC_EQ(a, v) \
217 LASSERTF(atomic_read(a) == v, \
218 "value: %d\n", atomic_read((a))); \
221 /** assert value of @a is unequal to @v */
222 #define LASSERT_ATOMIC_NE(a, v) \
224 LASSERTF(atomic_read(a) != v, \
225 "value: %d\n", atomic_read((a))); \
228 /** assert value of @a is little than @v */
229 #define LASSERT_ATOMIC_LT(a, v) \
231 LASSERTF(atomic_read(a) < v, \
232 "value: %d\n", atomic_read((a))); \
235 /** assert value of @a is little/equal to @v */
236 #define LASSERT_ATOMIC_LE(a, v) \
238 LASSERTF(atomic_read(a) <= v, \
239 "value: %d\n", atomic_read((a))); \
242 /** assert value of @a is great than @v */
243 #define LASSERT_ATOMIC_GT(a, v) \
245 LASSERTF(atomic_read(a) > v, \
246 "value: %d\n", atomic_read((a))); \
249 /** assert value of @a is great/equal to @v */
250 #define LASSERT_ATOMIC_GE(a, v) \
252 LASSERTF(atomic_read(a) >= v, \
253 "value: %d\n", atomic_read((a))); \
256 /** assert value of @a is great than @v1 and little than @v2 */
257 #define LASSERT_ATOMIC_GT_LT(a, v1, v2) \
259 int __v = atomic_read(a); \
260 LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \
263 /** assert value of @a is great than @v1 and little/equal to @v2 */
264 #define LASSERT_ATOMIC_GT_LE(a, v1, v2) \
266 int __v = atomic_read(a); \
267 LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \
270 /** assert value of @a is great/equal to @v1 and little than @v2 */
271 #define LASSERT_ATOMIC_GE_LT(a, v1, v2) \
273 int __v = atomic_read(a); \
274 LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \
277 /** assert value of @a is great/equal to @v1 and little/equal to @v2 */
278 #define LASSERT_ATOMIC_GE_LE(a, v1, v2) \
280 int __v = atomic_read(a); \
281 LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \
284 #else /* !LASSERT_ATOMIC_ENABLED */
286 #define LASSERT_ATOMIC_EQ(a, v) do {} while (0)
287 #define LASSERT_ATOMIC_NE(a, v) do {} while (0)
288 #define LASSERT_ATOMIC_LT(a, v) do {} while (0)
289 #define LASSERT_ATOMIC_LE(a, v) do {} while (0)
290 #define LASSERT_ATOMIC_GT(a, v) do {} while (0)
291 #define LASSERT_ATOMIC_GE(a, v) do {} while (0)
292 #define LASSERT_ATOMIC_GT_LT(a, v1, v2) do {} while (0)
293 #define LASSERT_ATOMIC_GT_LE(a, v1, v2) do {} while (0)
294 #define LASSERT_ATOMIC_GE_LT(a, v1, v2) do {} while (0)
295 #define LASSERT_ATOMIC_GE_LE(a, v1, v2) do {} while (0)
297 #endif /* LASSERT_ATOMIC_ENABLED */
299 #define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0)
300 #define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0)
302 #define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr)))
303 #define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)))
306 * percpu partition lock
308 * There are some use-cases like this in Lustre:
309 * . each CPU partition has it's own private data which is frequently changed,
310 * and mostly by the local CPU partition.
311 * . all CPU partitions share some global data, these data are rarely changed.
313 * LNet is typical example.
314 * CPU partition lock is designed for this kind of use-cases:
315 * . each CPU partition has it's own private lock
316 * . change on private data just needs to take the private lock
317 * . read on shared data just needs to take _any_ of private locks
318 * . change on shared data needs to take _all_ private locks,
319 * which is slow and should be really rare.
323 CFS_PERCPT_LOCK_EX = -1, /* negative */
326 struct cfs_percpt_lock {
327 /* cpu-partition-table for this lock */
328 struct cfs_cpt_table *pcl_cptab;
329 /* exclusively locked */
330 unsigned int pcl_locked;
331 /* private lock table */
332 spinlock_t **pcl_locks;
335 /* return number of private locks */
337 cfs_percpt_lock_num(struct cfs_percpt_lock *pcl)
339 return cfs_cpt_number(pcl->pcl_cptab);
343 * create a cpu-partition lock based on CPU partition table \a cptab,
344 * each private lock has extra \a psize bytes padding data
346 struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab);
347 /* destroy a cpu-partition lock */
348 void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
350 /* lock private lock \a index of \a pcl */
351 void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
352 /* unlock private lock \a index of \a pcl */
353 void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
354 /* create percpt (atomic) refcount based on @cptab */
355 atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
356 /* destroy percpt refcount */
357 void cfs_percpt_atomic_free(atomic_t **refs);
358 /* return sum of all percpu refs */
359 int cfs_percpt_atomic_summary(atomic_t **refs);
361 /** Compile-time assertion.
363 * Check an invariant described by a constant expression at compile time by
364 * forcing a compiler error if it does not hold. \a cond must be a constant
365 * expression as defined by the ISO C Standard:
367 * 6.8.4.2 The switch statement
369 * [#3] The expression of each case label shall be an integer
370 * constant expression and no two of the case constant
371 * expressions in the same switch statement shall have the same
372 * value after conversion...
375 #define CLASSERT(cond) do {switch (42) {case (cond): case 0: break; } } while (0)
377 /* max value for numeric network address */
378 #define MAX_NUMERIC_VALUE 0xffffffff
381 #define ergo(a, b) (!(a) || (b))
382 /* logical equivalence */
383 #define equi(a, b) (!!(a) == !!(b))
385 /* --------------------------------------------------------------------
387 * Support for temporary event tracing with minimal Heisenberg effect.
388 * -------------------------------------------------------------------- */
390 #define MKSTR(ptr) ((ptr)) ? (ptr) : ""
392 static inline int cfs_size_round4(int val)
394 return (val + 3) & (~0x3);
397 #ifndef HAVE_CFS_SIZE_ROUND
398 static inline int cfs_size_round(int val)
400 return (val + 7) & (~0x7);
403 #define HAVE_CFS_SIZE_ROUND
406 static inline int cfs_size_round16(int val)
408 return (val + 0xf) & (~0xf);
411 static inline int cfs_size_round32(int val)
413 return (val + 0x1f) & (~0x1f);
416 static inline int cfs_size_round0(int val)
420 return (val + 1 + 7) & (~0x7);
423 static inline size_t cfs_round_strlen(char *fset)
425 return (size_t)cfs_size_round((int)strlen(fset) + 1);
428 #define LOGL(var, len, ptr) \
431 memcpy((char *)ptr, (const char *)var, len); \
432 ptr += cfs_size_round(len); \
435 #define LOGU(var, len, ptr) \
438 memcpy((char *)var, (const char *)ptr, len); \
439 ptr += cfs_size_round(len); \
442 #define LOGL0(var, len, ptr) \
446 memcpy((char *)ptr, (const char *)var, len); \
447 *((char *)(ptr) + len) = 0; \
448 ptr += cfs_size_round(len + 1); \