4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Client Lustre Object.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
47 #define DEBUG_SUBSYSTEM S_CLASS
49 #include "../../include/linux/libcfs/libcfs.h"
50 /* class_put_type() */
51 #include "../include/obd_class.h"
52 #include "../include/obd_support.h"
53 #include "../include/lustre_fid.h"
54 #include <linux/list.h>
55 #include "../../include/linux/libcfs/libcfs_hash.h" /* for cfs_hash stuff */
56 #include "../include/cl_object.h"
57 #include "cl_internal.h"
59 static struct kmem_cache *cl_env_kmem;
61 /** Lock class of cl_object_header::coh_attr_guard */
62 static struct lock_class_key cl_attr_guard_class;
64 extern __u32 lu_context_tags_default;
65 extern __u32 lu_session_tags_default;
67 * Initialize cl_object_header.
69 int cl_object_header_init(struct cl_object_header *h)
73 result = lu_object_header_init(&h->coh_lu);
75 spin_lock_init(&h->coh_attr_guard);
76 lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
77 h->coh_page_bufsize = 0;
81 EXPORT_SYMBOL(cl_object_header_init);
84 * Returns a cl_object with a given \a fid.
86 * Returns either cached or newly created object. Additional reference on the
87 * returned object is acquired.
89 * \see lu_object_find(), cl_page_find(), cl_lock_find()
91 struct cl_object *cl_object_find(const struct lu_env *env,
92 struct cl_device *cd, const struct lu_fid *fid,
93 const struct cl_object_conf *c)
96 return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
98 EXPORT_SYMBOL(cl_object_find);
101 * Releases a reference on \a o.
103 * When last reference is released object is returned to the cache, unless
104 * lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header.
106 * \see cl_page_put(), cl_lock_put().
108 void cl_object_put(const struct lu_env *env, struct cl_object *o)
110 lu_object_put(env, &o->co_lu);
112 EXPORT_SYMBOL(cl_object_put);
115 * Acquire an additional reference to the object \a o.
117 * This can only be used to acquire _additional_ reference, i.e., caller
118 * already has to possess at least one reference to \a o before calling this.
120 * \see cl_page_get(), cl_lock_get().
122 void cl_object_get(struct cl_object *o)
124 lu_object_get(&o->co_lu);
126 EXPORT_SYMBOL(cl_object_get);
129 * Returns the top-object for a given \a o.
133 struct cl_object *cl_object_top(struct cl_object *o)
135 struct cl_object_header *hdr = cl_object_header(o);
136 struct cl_object *top;
138 while (hdr->coh_parent)
139 hdr = hdr->coh_parent;
141 top = lu2cl(lu_object_top(&hdr->coh_lu));
142 CDEBUG(D_TRACE, "%p -> %p\n", o, top);
145 EXPORT_SYMBOL(cl_object_top);
148 * Returns pointer to the lock protecting data-attributes for the given object
151 * Data-attributes are protected by the cl_object_header::coh_attr_guard
152 * spin-lock in the top-object.
154 * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
156 static spinlock_t *cl_object_attr_guard(struct cl_object *o)
158 return &cl_object_header(cl_object_top(o))->coh_attr_guard;
162 * Locks data-attributes.
164 * Prevents data-attributes from changing, until lock is released by
165 * cl_object_attr_unlock(). This has to be called before calls to
166 * cl_object_attr_get(), cl_object_attr_set().
168 void cl_object_attr_lock(struct cl_object *o)
169 __acquires(cl_object_attr_guard(o))
171 spin_lock(cl_object_attr_guard(o));
173 EXPORT_SYMBOL(cl_object_attr_lock);
176 * Releases data-attributes lock, acquired by cl_object_attr_lock().
178 void cl_object_attr_unlock(struct cl_object *o)
179 __releases(cl_object_attr_guard(o))
181 spin_unlock(cl_object_attr_guard(o));
183 EXPORT_SYMBOL(cl_object_attr_unlock);
186 * Returns data-attributes of an object \a obj.
188 * Every layer is asked (by calling cl_object_operations::coo_attr_get())
189 * top-to-bottom to fill in parts of \a attr that this layer is responsible
192 int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
193 struct cl_attr *attr)
195 struct lu_object_header *top;
198 assert_spin_locked(cl_object_attr_guard(obj));
200 top = obj->co_lu.lo_header;
202 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
203 if (obj->co_ops->coo_attr_get) {
204 result = obj->co_ops->coo_attr_get(env, obj, attr);
214 EXPORT_SYMBOL(cl_object_attr_get);
217 * Updates data-attributes of an object \a obj.
219 * Only attributes, mentioned in a validness bit-mask \a v are
220 * updated. Calls cl_object_operations::coo_attr_set() on every layer, bottom
223 int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
224 const struct cl_attr *attr, unsigned v)
226 struct lu_object_header *top;
229 assert_spin_locked(cl_object_attr_guard(obj));
231 top = obj->co_lu.lo_header;
233 list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
234 if (obj->co_ops->coo_attr_set) {
235 result = obj->co_ops->coo_attr_set(env, obj, attr, v);
245 EXPORT_SYMBOL(cl_object_attr_set);
248 * Notifies layers (bottom-to-top) that glimpse AST was received.
250 * Layers have to fill \a lvb fields with information that will be shipped
251 * back to glimpse issuer.
253 * \see cl_lock_operations::clo_glimpse()
255 int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
258 struct lu_object_header *top;
261 top = obj->co_lu.lo_header;
263 list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
264 if (obj->co_ops->coo_glimpse) {
265 result = obj->co_ops->coo_glimpse(env, obj, lvb);
270 LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
271 "size: %llu mtime: %llu atime: %llu ctime: %llu blocks: %llu\n",
272 lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
273 lvb->lvb_ctime, lvb->lvb_blocks);
276 EXPORT_SYMBOL(cl_object_glimpse);
279 * Updates a configuration of an object \a obj.
281 int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
282 const struct cl_object_conf *conf)
284 struct lu_object_header *top;
287 top = obj->co_lu.lo_header;
289 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
290 if (obj->co_ops->coo_conf_set) {
291 result = obj->co_ops->coo_conf_set(env, obj, conf);
298 EXPORT_SYMBOL(cl_conf_set);
301 * Prunes caches of pages and locks for this object.
303 int cl_object_prune(const struct lu_env *env, struct cl_object *obj)
305 struct lu_object_header *top;
309 top = obj->co_lu.lo_header;
311 list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) {
312 if (o->co_ops->coo_prune) {
313 result = o->co_ops->coo_prune(env, o);
321 EXPORT_SYMBOL(cl_object_prune);
324 * Helper function removing all object locks, and marking object for
325 * deletion. All object pages must have been deleted at this point.
327 * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
328 * and sub- objects respectively.
330 void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
332 struct cl_object_header *hdr = cl_object_header(obj);
334 set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
336 EXPORT_SYMBOL(cl_object_kill);
338 void cache_stats_init(struct cache_stats *cs, const char *name)
343 for (i = 0; i < CS_NR; i++)
344 atomic_set(&cs->cs_stats[i], 0);
347 static int cache_stats_print(const struct cache_stats *cs,
348 struct seq_file *m, int h)
352 * lookup hit total cached create
353 * env: ...... ...... ...... ...... ......
356 const char *names[CS_NR] = CS_NAMES;
358 seq_printf(m, "%6s", " ");
359 for (i = 0; i < CS_NR; i++)
360 seq_printf(m, "%8s", names[i]);
364 seq_printf(m, "%5.5s:", cs->cs_name);
365 for (i = 0; i < CS_NR; i++)
366 seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i]));
370 static void cl_env_percpu_refill(void);
373 * Initialize client site.
375 * Perform common initialization (lu_site_init()), and initialize statistical
376 * counters. Also perform global initializations on the first call.
378 int cl_site_init(struct cl_site *s, struct cl_device *d)
383 result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
385 cache_stats_init(&s->cs_pages, "pages");
386 for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
387 atomic_set(&s->cs_pages_state[0], 0);
388 cl_env_percpu_refill();
392 EXPORT_SYMBOL(cl_site_init);
395 * Finalize client site. Dual to cl_site_init().
397 void cl_site_fini(struct cl_site *s)
399 lu_site_fini(&s->cs_lu);
401 EXPORT_SYMBOL(cl_site_fini);
403 static struct cache_stats cl_env_stats = {
405 .cs_stats = { ATOMIC_INIT(0), }
409 * Outputs client site statistical counters into a buffer. Suitable for
410 * ll_rd_*()-style functions.
412 int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
415 static const char *pstate[] = {
423 lookup hit total busy create
424 pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
425 locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
426 env: ...... ...... ...... ...... ......
428 lu_site_stats_print(&site->cs_lu, m);
429 cache_stats_print(&site->cs_pages, m, 1);
431 for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
432 seq_printf(m, "%s: %u ", pstate[i],
433 atomic_read(&site->cs_pages_state[i]));
434 seq_printf(m, "]\n");
435 cache_stats_print(&cl_env_stats, m, 0);
439 EXPORT_SYMBOL(cl_site_stats_print);
441 /*****************************************************************************
443 * lu_env handling on client.
448 * The most efficient way is to store cl_env pointer in task specific
449 * structures. On Linux, it wont' be easy to use task_struct->journal_info
450 * because Lustre code may call into other fs which has certain assumptions
451 * about journal_info. Currently following fields in task_struct are identified
452 * can be used for this purpose:
453 * - tux_info: only on RedHat kernel.
455 * \note As long as we use task_struct to store cl_env, we assume that once
456 * called into Lustre, we'll never call into the other part of the kernel
457 * which will use those fields in task_struct without explicitly exiting
460 * If there's no space in task_struct is available, hash will be used.
464 static LIST_HEAD(cl_envs);
465 static unsigned int cl_envs_cached_nr;
466 static unsigned int cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
469 static DEFINE_SPINLOCK(cl_envs_guard);
474 struct lu_context ce_ses;
477 * This allows cl_env to be entered into cl_env_hash which implements
478 * the current thread -> client environment lookup.
480 struct hlist_node ce_node;
482 * Owner for the current cl_env.
484 * If LL_TASK_CL_ENV is defined, this point to the owning current,
485 * only for debugging purpose ;
486 * Otherwise hash is used, and this is the key for cfs_hash.
487 * Now current thread pid is stored. Note using thread pointer would
488 * lead to unbalanced hash because of its specific allocation locality
489 * and could be varied for different platforms and OSes, even different
495 * Linkage into global list of all client environments. Used for
496 * garbage collection.
498 struct list_head ce_linkage;
504 * Debugging field: address of the caller who made original
510 #define CL_ENV_INC(counter)
511 #define CL_ENV_DEC(counter)
513 static void cl_env_init0(struct cl_env *cle, void *debug)
515 LASSERT(cle->ce_ref == 0);
516 LASSERT(cle->ce_magic == &cl_env_init0);
517 LASSERT(!cle->ce_debug && !cle->ce_owner);
520 cle->ce_debug = debug;
525 * The implementation of using hash table to connect cl_env and thread
528 static struct cfs_hash *cl_env_hash;
530 static unsigned cl_env_hops_hash(struct cfs_hash *lh,
531 const void *key, unsigned mask)
533 #if BITS_PER_LONG == 64
534 return cfs_hash_u64_hash((__u64)key, mask);
536 return cfs_hash_u32_hash((__u32)key, mask);
540 static void *cl_env_hops_obj(struct hlist_node *hn)
542 struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
544 LASSERT(cle->ce_magic == &cl_env_init0);
548 static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
550 struct cl_env *cle = cl_env_hops_obj(hn);
552 LASSERT(cle->ce_owner);
553 return (key == cle->ce_owner);
556 static void cl_env_hops_noop(struct cfs_hash *hs, struct hlist_node *hn)
558 struct cl_env *cle = hlist_entry(hn, struct cl_env, ce_node);
560 LASSERT(cle->ce_magic == &cl_env_init0);
563 static struct cfs_hash_ops cl_env_hops = {
564 .hs_hash = cl_env_hops_hash,
565 .hs_key = cl_env_hops_obj,
566 .hs_keycmp = cl_env_hops_keycmp,
567 .hs_object = cl_env_hops_obj,
568 .hs_get = cl_env_hops_noop,
569 .hs_put_locked = cl_env_hops_noop,
572 static inline struct cl_env *cl_env_fetch(void)
576 cle = cfs_hash_lookup(cl_env_hash, (void *)(long)current->pid);
577 LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
581 static inline void cl_env_attach(struct cl_env *cle)
586 LASSERT(!cle->ce_owner);
587 cle->ce_owner = (void *)(long)current->pid;
588 rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
594 static inline void cl_env_do_detach(struct cl_env *cle)
598 LASSERT(cle->ce_owner == (void *)(long)current->pid);
599 cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
601 LASSERT(cookie == cle);
602 cle->ce_owner = NULL;
605 static int cl_env_store_init(void)
607 cl_env_hash = cfs_hash_create("cl_env",
608 HASH_CL_ENV_BITS, HASH_CL_ENV_BITS,
609 HASH_CL_ENV_BKT_BITS, 0,
613 CFS_HASH_RW_BKTLOCK);
614 return cl_env_hash ? 0 : -ENOMEM;
617 static void cl_env_store_fini(void)
619 cfs_hash_putref(cl_env_hash);
622 static inline struct cl_env *cl_env_detach(struct cl_env *cle)
625 cle = cl_env_fetch();
627 if (cle && cle->ce_owner)
628 cl_env_do_detach(cle);
633 static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
638 cle = kmem_cache_zalloc(cl_env_kmem, GFP_NOFS);
642 INIT_LIST_HEAD(&cle->ce_linkage);
643 cle->ce_magic = &cl_env_init0;
645 rc = lu_env_init(env, ctx_tags | LCT_CL_THREAD);
647 rc = lu_context_init(&cle->ce_ses,
648 ses_tags | LCT_SESSION);
650 lu_context_enter(&cle->ce_ses);
651 env->le_ses = &cle->ce_ses;
652 cl_env_init0(cle, debug);
658 kmem_cache_free(cl_env_kmem, cle);
665 env = ERR_PTR(-ENOMEM);
670 static void cl_env_fini(struct cl_env *cle)
673 lu_context_fini(&cle->ce_lu.le_ctx);
674 lu_context_fini(&cle->ce_ses);
675 kmem_cache_free(cl_env_kmem, cle);
678 static struct lu_env *cl_env_obtain(void *debug)
683 spin_lock(&cl_envs_guard);
684 LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
685 if (cl_envs_cached_nr > 0) {
688 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
689 list_del_init(&cle->ce_linkage);
691 spin_unlock(&cl_envs_guard);
694 rc = lu_env_refill(env);
696 cl_env_init0(cle, debug);
697 lu_context_enter(&env->le_ctx);
698 lu_context_enter(&cle->ce_ses);
704 spin_unlock(&cl_envs_guard);
705 env = cl_env_new(lu_context_tags_default,
706 lu_session_tags_default, debug);
711 static inline struct cl_env *cl_env_container(struct lu_env *env)
713 return container_of(env, struct cl_env, ce_lu);
716 static struct lu_env *cl_env_peek(int *refcheck)
723 /* check that we don't go far from untrusted pointer */
724 CLASSERT(offsetof(struct cl_env, ce_magic) == 0);
727 cle = cl_env_fetch();
731 *refcheck = ++cle->ce_ref;
733 CDEBUG(D_OTHER, "%d@%p\n", cle ? cle->ce_ref : 0, cle);
738 * Returns lu_env: if there already is an environment associated with the
739 * current thread, it is returned, otherwise, new environment is allocated.
741 * Allocations are amortized through the global cache of environments.
743 * \param refcheck pointer to a counter used to detect environment leaks. In
744 * the usual case cl_env_get() and cl_env_put() are called in the same lexical
745 * scope and pointer to the same integer is passed as \a refcheck. This is
746 * used to detect missed cl_env_put().
750 struct lu_env *cl_env_get(int *refcheck)
754 env = cl_env_peek(refcheck);
756 env = cl_env_obtain(__builtin_return_address(0));
760 cle = cl_env_container(env);
762 *refcheck = cle->ce_ref;
763 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
768 EXPORT_SYMBOL(cl_env_get);
771 * Forces an allocation of a fresh environment with given tags.
775 struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
779 LASSERT(!cl_env_peek(refcheck));
780 env = cl_env_new(tags, tags, __builtin_return_address(0));
784 cle = cl_env_container(env);
785 *refcheck = cle->ce_ref;
786 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
790 EXPORT_SYMBOL(cl_env_alloc);
792 static void cl_env_exit(struct cl_env *cle)
794 LASSERT(!cle->ce_owner);
795 lu_context_exit(&cle->ce_lu.le_ctx);
796 lu_context_exit(&cle->ce_ses);
800 * Finalizes and frees a given number of cached environments. This is done to
801 * (1) free some memory (not currently hooked into VM), or (2) release
802 * references to modules.
804 unsigned int cl_env_cache_purge(unsigned int nr)
808 spin_lock(&cl_envs_guard);
809 for (; !list_empty(&cl_envs) && nr > 0; --nr) {
810 cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
811 list_del_init(&cle->ce_linkage);
812 LASSERT(cl_envs_cached_nr > 0);
814 spin_unlock(&cl_envs_guard);
817 spin_lock(&cl_envs_guard);
819 LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
820 spin_unlock(&cl_envs_guard);
823 EXPORT_SYMBOL(cl_env_cache_purge);
826 * Release an environment.
828 * Decrement \a env reference counter. When counter drops to 0, nothing in
829 * this thread is using environment and it is returned to the allocation
830 * cache, or freed straight away, if cache is large enough.
832 void cl_env_put(struct lu_env *env, int *refcheck)
836 cle = cl_env_container(env);
838 LASSERT(cle->ce_ref > 0);
839 LASSERT(ergo(refcheck, cle->ce_ref == *refcheck));
841 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
842 if (--cle->ce_ref == 0) {
845 cle->ce_debug = NULL;
848 * Don't bother to take a lock here.
850 * Return environment to the cache only when it was allocated
851 * with the standard tags.
853 if (cl_envs_cached_nr < cl_envs_cached_max &&
854 (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
855 (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
856 spin_lock(&cl_envs_guard);
857 list_add(&cle->ce_linkage, &cl_envs);
859 spin_unlock(&cl_envs_guard);
865 EXPORT_SYMBOL(cl_env_put);
868 * Declares a point of re-entrancy.
870 * \see cl_env_reexit()
872 void *cl_env_reenter(void)
874 return cl_env_detach(NULL);
876 EXPORT_SYMBOL(cl_env_reenter);
881 void cl_env_reexit(void *cookie)
884 cl_env_attach(cookie);
886 EXPORT_SYMBOL(cl_env_reexit);
889 * Setup user-supplied \a env as a current environment. This is to be used to
890 * guaranteed that environment exists even when cl_env_get() fails. It is up
891 * to user to ensure proper concurrency control.
893 * \see cl_env_unplant()
895 void cl_env_implant(struct lu_env *env, int *refcheck)
897 struct cl_env *cle = cl_env_container(env);
899 LASSERT(cle->ce_ref > 0);
902 cl_env_get(refcheck);
903 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
905 EXPORT_SYMBOL(cl_env_implant);
908 * Detach environment installed earlier by cl_env_implant().
910 void cl_env_unplant(struct lu_env *env, int *refcheck)
912 struct cl_env *cle = cl_env_container(env);
914 LASSERT(cle->ce_ref > 1);
916 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
919 cl_env_put(env, refcheck);
921 EXPORT_SYMBOL(cl_env_unplant);
923 struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)
927 nest->cen_cookie = NULL;
928 env = cl_env_peek(&nest->cen_refcheck);
930 if (!cl_io_is_going(env))
932 cl_env_put(env, &nest->cen_refcheck);
933 nest->cen_cookie = cl_env_reenter();
935 env = cl_env_get(&nest->cen_refcheck);
937 cl_env_reexit(nest->cen_cookie);
941 LASSERT(!cl_io_is_going(env));
944 EXPORT_SYMBOL(cl_env_nested_get);
946 void cl_env_nested_put(struct cl_env_nest *nest, struct lu_env *env)
948 cl_env_put(env, &nest->cen_refcheck);
949 cl_env_reexit(nest->cen_cookie);
951 EXPORT_SYMBOL(cl_env_nested_put);
954 * Converts struct ost_lvb to struct cl_attr.
958 void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
960 attr->cat_size = lvb->lvb_size;
961 attr->cat_mtime = lvb->lvb_mtime;
962 attr->cat_atime = lvb->lvb_atime;
963 attr->cat_ctime = lvb->lvb_ctime;
964 attr->cat_blocks = lvb->lvb_blocks;
966 EXPORT_SYMBOL(cl_lvb2attr);
968 static struct cl_env cl_env_percpu[NR_CPUS];
970 static int cl_env_percpu_init(void)
973 int tags = LCT_REMEMBER | LCT_NOREF;
977 for_each_possible_cpu(i) {
980 cle = &cl_env_percpu[i];
983 INIT_LIST_HEAD(&cle->ce_linkage);
984 cle->ce_magic = &cl_env_init0;
985 rc = lu_env_init(env, LCT_CL_THREAD | tags);
987 rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags);
989 lu_context_enter(&cle->ce_ses);
990 env->le_ses = &cle->ce_ses;
999 /* Indices 0 to i (excluding i) were correctly initialized,
1000 * thus we must uninitialize up to i, the rest are undefined.
1002 for (j = 0; j < i; j++) {
1003 cle = &cl_env_percpu[i];
1004 lu_context_exit(&cle->ce_ses);
1005 lu_context_fini(&cle->ce_ses);
1006 lu_env_fini(&cle->ce_lu);
1013 static void cl_env_percpu_fini(void)
1017 for_each_possible_cpu(i) {
1018 struct cl_env *cle = &cl_env_percpu[i];
1020 lu_context_exit(&cle->ce_ses);
1021 lu_context_fini(&cle->ce_ses);
1022 lu_env_fini(&cle->ce_lu);
1026 static void cl_env_percpu_refill(void)
1030 for_each_possible_cpu(i)
1031 lu_env_refill(&cl_env_percpu[i].ce_lu);
1034 void cl_env_percpu_put(struct lu_env *env)
1039 cpu = smp_processor_id();
1040 cle = cl_env_container(env);
1041 LASSERT(cle == &cl_env_percpu[cpu]);
1044 LASSERT(cle->ce_ref == 0);
1048 cle->ce_debug = NULL;
1052 EXPORT_SYMBOL(cl_env_percpu_put);
1054 struct lu_env *cl_env_percpu_get(void)
1058 cle = &cl_env_percpu[get_cpu()];
1059 cl_env_init0(cle, __builtin_return_address(0));
1064 EXPORT_SYMBOL(cl_env_percpu_get);
1066 /*****************************************************************************
1068 * Temporary prototype thing: mirror obd-devices into cl devices.
1072 struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
1073 struct lu_device_type *ldt,
1074 struct lu_device *next)
1076 const char *typename;
1077 struct lu_device *d;
1079 typename = ldt->ldt_name;
1080 d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
1086 rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
1089 lu_ref_add(&d->ld_reference,
1090 "lu-stack", &lu_site_init);
1092 ldt->ldt_ops->ldto_device_free(env, d);
1093 CERROR("can't init device '%s', %d\n", typename, rc);
1097 CERROR("Cannot allocate device: '%s'\n", typename);
1099 return lu2cl_dev(d);
1101 EXPORT_SYMBOL(cl_type_setup);
1104 * Finalize device stack by calling lu_stack_fini().
1106 void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
1108 lu_stack_fini(env, cl2lu_dev(cl));
1110 EXPORT_SYMBOL(cl_stack_fini);
1112 static struct lu_context_key cl_key;
1114 struct cl_thread_info *cl_env_info(const struct lu_env *env)
1116 return lu_context_key_get(&env->le_ctx, &cl_key);
1119 /* defines cl0_key_{init,fini}() */
1120 LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
1122 static void *cl_key_init(const struct lu_context *ctx,
1123 struct lu_context_key *key)
1125 struct cl_thread_info *info;
1127 info = cl0_key_init(ctx, key);
1128 if (!IS_ERR(info)) {
1131 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1132 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1137 static void cl_key_fini(const struct lu_context *ctx,
1138 struct lu_context_key *key, void *data)
1140 struct cl_thread_info *info;
1144 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
1145 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1146 cl0_key_fini(ctx, key, data);
1149 static void cl_key_exit(const struct lu_context *ctx,
1150 struct lu_context_key *key, void *data)
1152 struct cl_thread_info *info = data;
1155 for (i = 0; i < ARRAY_SIZE(info->clt_counters); ++i) {
1156 LASSERT(info->clt_counters[i].ctc_nr_held == 0);
1157 LASSERT(info->clt_counters[i].ctc_nr_used == 0);
1158 LASSERT(info->clt_counters[i].ctc_nr_locks_acquired == 0);
1159 LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
1160 lu_ref_fini(&info->clt_counters[i].ctc_locks_locked);
1161 lu_ref_init(&info->clt_counters[i].ctc_locks_locked);
1165 static struct lu_context_key cl_key = {
1166 .lct_tags = LCT_CL_THREAD,
1167 .lct_init = cl_key_init,
1168 .lct_fini = cl_key_fini,
1169 .lct_exit = cl_key_exit
1172 static struct lu_kmem_descr cl_object_caches[] = {
1174 .ckd_cache = &cl_env_kmem,
1175 .ckd_name = "cl_env_kmem",
1176 .ckd_size = sizeof(struct cl_env)
1184 * Global initialization of cl-data. Create kmem caches, register
1185 * lu_context_key's, etc.
1187 * \see cl_global_fini()
1189 int cl_global_init(void)
1193 result = cl_env_store_init();
1197 result = lu_kmem_init(cl_object_caches);
1201 LU_CONTEXT_KEY_INIT(&cl_key);
1202 result = lu_context_key_register(&cl_key);
1206 result = cl_env_percpu_init();
1208 /* no cl_env_percpu_fini on error */
1214 lu_context_key_degister(&cl_key);
1216 lu_kmem_fini(cl_object_caches);
1218 cl_env_store_fini();
1223 * Finalization of global cl-data. Dual to cl_global_init().
1225 void cl_global_fini(void)
1227 cl_env_percpu_fini();
1228 lu_context_key_degister(&cl_key);
1229 lu_kmem_fini(cl_object_caches);
1230 cl_env_store_fini();