4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ldlm/ldlm_lockd.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Phil Schwan <phil@clusterfs.com>
42 #define DEBUG_SUBSYSTEM S_LDLM
44 #include "../../include/linux/libcfs/libcfs.h"
45 #include "../include/lustre_dlm.h"
46 #include "../include/obd_class.h"
47 #include <linux/list.h>
48 #include "ldlm_internal.h"
50 static int ldlm_num_threads;
51 module_param(ldlm_num_threads, int, 0444);
52 MODULE_PARM_DESC(ldlm_num_threads, "number of DLM service threads to start");
54 static char *ldlm_cpts;
55 module_param(ldlm_cpts, charp, 0444);
56 MODULE_PARM_DESC(ldlm_cpts, "CPU partitions ldlm threads should run on");
58 static struct mutex ldlm_ref_mutex;
59 static int ldlm_refcount;
61 struct kobject *ldlm_kobj;
62 struct kset *ldlm_ns_kset;
63 struct kset *ldlm_svc_kset;
65 struct ldlm_cb_async_args {
66 struct ldlm_cb_set_arg *ca_set_arg;
67 struct ldlm_lock *ca_lock;
72 static struct ldlm_state *ldlm_state;
74 inline unsigned long round_timeout(unsigned long timeout)
76 return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
79 /* timeout for initial callback (AST) reply (bz10399) */
80 static inline unsigned int ldlm_get_rq_timeout(void)
83 unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
85 return timeout < 1 ? 1 : timeout;
90 #define ELT_TERMINATE 2
96 * blp_prio_list is used for callbacks that should be handled
97 * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
100 struct list_head blp_prio_list;
103 * blp_list is used for all other callbacks which are likely
104 * to take longer to process.
106 struct list_head blp_list;
108 wait_queue_head_t blp_waitq;
109 struct completion blp_comp;
110 atomic_t blp_num_threads;
111 atomic_t blp_busy_threads;
116 struct ldlm_bl_work_item {
117 struct list_head blwi_entry;
118 struct ldlm_namespace *blwi_ns;
119 struct ldlm_lock_desc blwi_ld;
120 struct ldlm_lock *blwi_lock;
121 struct list_head blwi_head;
123 struct completion blwi_comp;
124 ldlm_cancel_flags_t blwi_flags;
125 int blwi_mem_pressure;
129 int ldlm_del_waiting_lock(struct ldlm_lock *lock)
134 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
142 * Callback handler for receiving incoming blocking ASTs.
144 * This can only happen on client side.
146 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
147 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
151 LDLM_DEBUG(lock, "client blocking AST callback handler");
153 lock_res_and_lock(lock);
154 lock->l_flags |= LDLM_FL_CBPENDING;
156 if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
157 lock->l_flags |= LDLM_FL_CANCEL;
159 do_ast = !lock->l_readers && !lock->l_writers;
160 unlock_res_and_lock(lock);
164 "Lock %p already unused, calling callback (%p)\n", lock,
165 lock->l_blocking_ast);
166 if (lock->l_blocking_ast != NULL)
167 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
171 "Lock %p is referenced, will be cancelled later\n",
175 LDLM_DEBUG(lock, "client blocking callback handler END");
176 LDLM_LOCK_RELEASE(lock);
180 * Callback handler for receiving incoming completion ASTs.
182 * This only can happen on client side.
184 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
185 struct ldlm_namespace *ns,
186 struct ldlm_request *dlm_req,
187 struct ldlm_lock *lock)
193 LDLM_DEBUG(lock, "client completion callback handler START");
195 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
196 int to = cfs_time_seconds(1);
199 set_current_state(TASK_INTERRUPTIBLE);
200 schedule_timeout(to);
201 if (lock->l_granted_mode == lock->l_req_mode ||
202 lock->l_flags & LDLM_FL_DESTROYED)
207 lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
209 LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", lvb_len);
212 } else if (lvb_len > 0) {
213 if (lock->l_lvb_len > 0) {
214 /* for extent lock, lvb contains ost_lvb{}. */
215 LASSERT(lock->l_lvb_data != NULL);
217 if (unlikely(lock->l_lvb_len < lvb_len)) {
218 LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
219 lock->l_lvb_len, lvb_len);
223 } else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has
227 lvb_data = kzalloc(lvb_len, GFP_NOFS);
228 if (lvb_data == NULL) {
229 LDLM_ERROR(lock, "No memory: %d.\n", lvb_len);
234 lock_res_and_lock(lock);
235 LASSERT(lock->l_lvb_data == NULL);
236 lock->l_lvb_type = LVB_T_LAYOUT;
237 lock->l_lvb_data = lvb_data;
238 lock->l_lvb_len = lvb_len;
239 unlock_res_and_lock(lock);
243 lock_res_and_lock(lock);
244 if ((lock->l_flags & LDLM_FL_DESTROYED) ||
245 lock->l_granted_mode == lock->l_req_mode) {
246 /* bug 11300: the lock has already been granted */
247 unlock_res_and_lock(lock);
248 LDLM_DEBUG(lock, "Double grant race happened");
253 /* If we receive the completion AST before the actual enqueue returned,
254 * then we might need to switch lock modes, resources, or extents. */
255 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
256 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
257 LDLM_DEBUG(lock, "completion AST, new lock mode");
260 if (lock->l_resource->lr_type != LDLM_PLAIN) {
261 ldlm_convert_policy_to_local(req->rq_export,
262 dlm_req->lock_desc.l_resource.lr_type,
263 &dlm_req->lock_desc.l_policy_data,
264 &lock->l_policy_data);
265 LDLM_DEBUG(lock, "completion AST, new policy data");
268 ldlm_resource_unlink_lock(lock);
269 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
270 &lock->l_resource->lr_name,
271 sizeof(lock->l_resource->lr_name)) != 0) {
272 unlock_res_and_lock(lock);
273 rc = ldlm_lock_change_resource(ns, lock,
274 &dlm_req->lock_desc.l_resource.lr_name);
276 LDLM_ERROR(lock, "Failed to allocate resource");
279 LDLM_DEBUG(lock, "completion AST, new resource");
280 CERROR("change resource!\n");
281 lock_res_and_lock(lock);
284 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
285 /* BL_AST locks are not needed in LRU.
286 * Let ldlm_cancel_lru() be fast. */
287 ldlm_lock_remove_from_lru(lock);
288 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
289 LDLM_DEBUG(lock, "completion AST includes blocking AST");
292 if (lock->l_lvb_len > 0) {
293 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_CLIENT,
294 lock->l_lvb_data, lvb_len);
296 unlock_res_and_lock(lock);
301 ldlm_grant_lock(lock, &ast_list);
302 unlock_res_and_lock(lock);
304 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
306 /* Let Enqueue to call osc_lock_upcall() and initialize
308 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
310 ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
312 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
318 lock_res_and_lock(lock);
319 lock->l_flags |= LDLM_FL_FAILED;
320 unlock_res_and_lock(lock);
321 wake_up(&lock->l_waitq);
323 LDLM_LOCK_RELEASE(lock);
327 * Callback handler for receiving incoming glimpse ASTs.
329 * This only can happen on client side. After handling the glimpse AST
330 * we also consider dropping the lock here if it is unused locally for a
333 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
334 struct ldlm_namespace *ns,
335 struct ldlm_request *dlm_req,
336 struct ldlm_lock *lock)
340 LDLM_DEBUG(lock, "client glimpse AST callback handler");
342 if (lock->l_glimpse_ast != NULL)
343 rc = lock->l_glimpse_ast(lock, req);
345 if (req->rq_repmsg != NULL) {
352 lock_res_and_lock(lock);
353 if (lock->l_granted_mode == LCK_PW &&
354 !lock->l_readers && !lock->l_writers &&
355 cfs_time_after(cfs_time_current(),
356 cfs_time_add(lock->l_last_used,
357 cfs_time_seconds(10)))) {
358 unlock_res_and_lock(lock);
359 if (ldlm_bl_to_thread_lock(ns, NULL, lock))
360 ldlm_handle_bl_callback(ns, NULL, lock);
364 unlock_res_and_lock(lock);
365 LDLM_LOCK_RELEASE(lock);
368 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
370 if (req->rq_no_reply)
374 if (!req->rq_packed_final) {
375 rc = lustre_pack_reply(req, 1, NULL, NULL);
379 return ptlrpc_reply(req);
382 static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
383 ldlm_cancel_flags_t cancel_flags)
385 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
387 spin_lock(&blp->blp_lock);
388 if (blwi->blwi_lock &&
389 blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
390 /* add LDLM_FL_DISCARD_DATA requests to the priority list */
391 list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
393 /* other blocking callbacks are added to the regular list */
394 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
396 spin_unlock(&blp->blp_lock);
398 wake_up(&blp->blp_waitq);
400 /* can not check blwi->blwi_flags as blwi could be already freed in
402 if (!(cancel_flags & LCF_ASYNC))
403 wait_for_completion(&blwi->blwi_comp);
408 static inline void init_blwi(struct ldlm_bl_work_item *blwi,
409 struct ldlm_namespace *ns,
410 struct ldlm_lock_desc *ld,
411 struct list_head *cancels, int count,
412 struct ldlm_lock *lock,
413 ldlm_cancel_flags_t cancel_flags)
415 init_completion(&blwi->blwi_comp);
416 INIT_LIST_HEAD(&blwi->blwi_head);
418 if (memory_pressure_get())
419 blwi->blwi_mem_pressure = 1;
422 blwi->blwi_flags = cancel_flags;
426 list_add(&blwi->blwi_head, cancels);
427 list_del_init(cancels);
428 blwi->blwi_count = count;
430 blwi->blwi_lock = lock;
435 * Queues a list of locks \a cancels containing \a count locks
436 * for later processing by a blocking thread. If \a count is zero,
437 * then the lock referenced as \a lock is queued instead.
439 * The blocking thread would then call ->l_blocking_ast callback in the lock.
440 * If list addition fails an error is returned and caller is supposed to
441 * call ->l_blocking_ast itself.
443 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
444 struct ldlm_lock_desc *ld,
445 struct ldlm_lock *lock,
446 struct list_head *cancels, int count,
447 ldlm_cancel_flags_t cancel_flags)
449 if (cancels && count == 0)
452 if (cancel_flags & LCF_ASYNC) {
453 struct ldlm_bl_work_item *blwi;
455 blwi = kzalloc(sizeof(*blwi), GFP_NOFS);
458 init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags);
460 return __ldlm_bl_to_thread(blwi, cancel_flags);
462 /* if it is synchronous call do minimum mem alloc, as it could
463 * be triggered from kernel shrinker
465 struct ldlm_bl_work_item blwi;
467 memset(&blwi, 0, sizeof(blwi));
468 init_blwi(&blwi, ns, ld, cancels, count, lock, cancel_flags);
469 return __ldlm_bl_to_thread(&blwi, cancel_flags);
474 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
475 struct ldlm_lock *lock)
477 return ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LCF_ASYNC);
480 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
481 struct list_head *cancels, int count,
482 ldlm_cancel_flags_t cancel_flags)
484 return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
487 /* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
488 static int ldlm_handle_setinfo(struct ptlrpc_request *req)
490 struct obd_device *obd = req->rq_export->exp_obd;
496 DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
498 req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
500 key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
502 DEBUG_REQ(D_IOCTL, req, "no set_info key");
505 keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
507 val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
509 DEBUG_REQ(D_IOCTL, req, "no set_info val");
512 vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
515 /* We are responsible for swabbing contents of val */
517 if (KEY_IS(KEY_HSM_COPYTOOL_SEND))
518 /* Pass it on to mdc (the "export" in this case) */
519 rc = obd_set_info_async(req->rq_svc_thread->t_env,
521 sizeof(KEY_HSM_COPYTOOL_SEND),
522 KEY_HSM_COPYTOOL_SEND,
525 DEBUG_REQ(D_WARNING, req, "ignoring unknown key %s", key);
530 static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
531 const char *msg, int rc,
532 struct lustre_handle *handle)
534 DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
535 "%s: [nid %s] [rc %d] [lock %#llx]",
536 msg, libcfs_id2str(req->rq_peer), rc,
537 handle ? handle->cookie : 0);
538 if (req->rq_no_reply)
539 CWARN("No reply was sent, maybe cause bug 21636.\n");
541 CWARN("Send reply failed, maybe cause bug 21636.\n");
544 static int ldlm_handle_qc_callback(struct ptlrpc_request *req)
546 struct obd_quotactl *oqctl;
547 struct client_obd *cli = &req->rq_export->exp_obd->u.cli;
549 oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
551 CERROR("Can't unpack obd_quotactl\n");
555 oqctl->qc_stat = ptlrpc_status_ntoh(oqctl->qc_stat);
557 cli->cl_qchk_stat = oqctl->qc_stat;
561 /* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
562 static int ldlm_callback_handler(struct ptlrpc_request *req)
564 struct ldlm_namespace *ns;
565 struct ldlm_request *dlm_req;
566 struct ldlm_lock *lock;
569 /* Requests arrive in sender's byte order. The ptlrpc service
570 * handler has already checked and, if necessary, byte-swapped the
571 * incoming request message body, but I am responsible for the
572 * message buffers. */
574 /* do nothing for sec context finalize */
575 if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
578 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
580 if (req->rq_export == NULL) {
581 rc = ldlm_callback_reply(req, -ENOTCONN);
582 ldlm_callback_errmsg(req, "Operate on unconnected server",
587 LASSERT(req->rq_export != NULL);
588 LASSERT(req->rq_export->exp_obd != NULL);
590 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
591 case LDLM_BL_CALLBACK:
592 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
595 case LDLM_CP_CALLBACK:
596 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
599 case LDLM_GL_CALLBACK:
600 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET))
604 rc = ldlm_handle_setinfo(req);
605 ldlm_callback_reply(req, rc);
607 case OBD_QC_CALLBACK:
608 req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
609 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
611 rc = ldlm_handle_qc_callback(req);
612 ldlm_callback_reply(req, rc);
615 CERROR("unknown opcode %u\n",
616 lustre_msg_get_opc(req->rq_reqmsg));
617 ldlm_callback_reply(req, -EPROTO);
621 ns = req->rq_export->exp_obd->obd_namespace;
624 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
626 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
627 if (dlm_req == NULL) {
628 rc = ldlm_callback_reply(req, -EPROTO);
629 ldlm_callback_errmsg(req, "Operate without parameter", rc,
634 /* Force a known safe race, send a cancel to the server for a lock
635 * which the server has already started a blocking callback on. */
636 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
637 lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
638 rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
640 CERROR("ldlm_cli_cancel: %d\n", rc);
643 lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
645 CDEBUG(D_DLMTRACE, "callback on lock %#llx - lock disappeared\n",
646 dlm_req->lock_handle[0].cookie);
647 rc = ldlm_callback_reply(req, -EINVAL);
648 ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
649 &dlm_req->lock_handle[0]);
653 if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
654 lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
655 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
657 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
658 lock_res_and_lock(lock);
659 lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
661 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
662 /* If somebody cancels lock and cache is already dropped,
663 * or lock is failed before cp_ast received on client,
664 * we can tell the server we have no lock. Otherwise, we
665 * should send cancel after dropping the cache. */
666 if (((lock->l_flags & LDLM_FL_CANCELING) &&
667 (lock->l_flags & LDLM_FL_BL_DONE)) ||
668 (lock->l_flags & LDLM_FL_FAILED)) {
669 LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n",
670 dlm_req->lock_handle[0].cookie);
671 unlock_res_and_lock(lock);
672 LDLM_LOCK_RELEASE(lock);
673 rc = ldlm_callback_reply(req, -EINVAL);
674 ldlm_callback_errmsg(req, "Operate on stale lock", rc,
675 &dlm_req->lock_handle[0]);
678 /* BL_AST locks are not needed in LRU.
679 * Let ldlm_cancel_lru() be fast. */
680 ldlm_lock_remove_from_lru(lock);
681 lock->l_flags |= LDLM_FL_BL_AST;
683 unlock_res_and_lock(lock);
685 /* We want the ost thread to get this reply so that it can respond
686 * to ost requests (write cache writeback) that might be triggered
689 * But we'd also like to be able to indicate in the reply that we're
690 * cancelling right now, because it's unused, or have an intent result
691 * in the reply, so we might have to push the responsibility for sending
692 * the reply down into the AST handlers, alas. */
694 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
695 case LDLM_BL_CALLBACK:
696 CDEBUG(D_INODE, "blocking ast\n");
697 req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
698 if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
699 rc = ldlm_callback_reply(req, 0);
700 if (req->rq_no_reply || rc)
701 ldlm_callback_errmsg(req, "Normal process", rc,
702 &dlm_req->lock_handle[0]);
704 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
705 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
707 case LDLM_CP_CALLBACK:
708 CDEBUG(D_INODE, "completion ast\n");
709 req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
710 ldlm_callback_reply(req, 0);
711 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
713 case LDLM_GL_CALLBACK:
714 CDEBUG(D_INODE, "glimpse ast\n");
715 req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
716 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
719 LBUG(); /* checked above */
726 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
728 struct ldlm_bl_work_item *blwi = NULL;
729 static unsigned int num_bl;
731 spin_lock(&blp->blp_lock);
732 /* process a request from the blp_list at least every blp_num_threads */
733 if (!list_empty(&blp->blp_list) &&
734 (list_empty(&blp->blp_prio_list) || num_bl == 0))
735 blwi = list_entry(blp->blp_list.next,
736 struct ldlm_bl_work_item, blwi_entry);
738 if (!list_empty(&blp->blp_prio_list))
739 blwi = list_entry(blp->blp_prio_list.next,
740 struct ldlm_bl_work_item,
744 if (++num_bl >= atomic_read(&blp->blp_num_threads))
746 list_del(&blwi->blwi_entry);
748 spin_unlock(&blp->blp_lock);
753 /* This only contains temporary data until the thread starts */
754 struct ldlm_bl_thread_data {
755 char bltd_name[CFS_CURPROC_COMM_MAX];
756 struct ldlm_bl_pool *bltd_blp;
757 struct completion bltd_comp;
761 static int ldlm_bl_thread_main(void *arg);
763 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
765 struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
766 struct task_struct *task;
768 init_completion(&bltd.bltd_comp);
769 bltd.bltd_num = atomic_read(&blp->blp_num_threads);
770 snprintf(bltd.bltd_name, sizeof(bltd.bltd_name),
771 "ldlm_bl_%02d", bltd.bltd_num);
772 task = kthread_run(ldlm_bl_thread_main, &bltd, "%s", bltd.bltd_name);
774 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
775 atomic_read(&blp->blp_num_threads), PTR_ERR(task));
776 return PTR_ERR(task);
778 wait_for_completion(&bltd.bltd_comp);
784 * Main blocking requests processing thread.
786 * Callers put locks into its queue by calling ldlm_bl_to_thread.
787 * This thread in the end ends up doing actual call to ->l_blocking_ast
790 static int ldlm_bl_thread_main(void *arg)
792 struct ldlm_bl_pool *blp;
795 struct ldlm_bl_thread_data *bltd = arg;
797 blp = bltd->bltd_blp;
799 atomic_inc(&blp->blp_num_threads);
800 atomic_inc(&blp->blp_busy_threads);
802 complete(&bltd->bltd_comp);
803 /* cannot use bltd after this, it is only on caller's stack */
807 struct l_wait_info lwi = { 0 };
808 struct ldlm_bl_work_item *blwi = NULL;
811 blwi = ldlm_bl_get_work(blp);
814 atomic_dec(&blp->blp_busy_threads);
815 l_wait_event_exclusive(blp->blp_waitq,
816 (blwi = ldlm_bl_get_work(blp)) != NULL,
818 busy = atomic_inc_return(&blp->blp_busy_threads);
820 busy = atomic_read(&blp->blp_busy_threads);
823 if (blwi->blwi_ns == NULL)
824 /* added by ldlm_cleanup() */
827 /* Not fatal if racy and have a few too many threads */
828 if (unlikely(busy < blp->blp_max_threads &&
829 busy >= atomic_read(&blp->blp_num_threads) &&
830 !blwi->blwi_mem_pressure))
831 /* discard the return value, we tried */
832 ldlm_bl_thread_start(blp);
834 if (blwi->blwi_mem_pressure)
835 memory_pressure_set();
837 if (blwi->blwi_count) {
839 /* The special case when we cancel locks in LRU
840 * asynchronously, we pass the list of locks here.
841 * Thus locks are marked LDLM_FL_CANCELING, but NOT
842 * canceled locally yet. */
843 count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
846 ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL,
849 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
852 if (blwi->blwi_mem_pressure)
853 memory_pressure_clr();
855 if (blwi->blwi_flags & LCF_ASYNC)
858 complete(&blwi->blwi_comp);
861 atomic_dec(&blp->blp_busy_threads);
862 atomic_dec(&blp->blp_num_threads);
863 complete(&blp->blp_comp);
868 static int ldlm_setup(void);
869 static int ldlm_cleanup(void);
871 int ldlm_get_ref(void)
875 mutex_lock(&ldlm_ref_mutex);
876 if (++ldlm_refcount == 1) {
881 mutex_unlock(&ldlm_ref_mutex);
885 EXPORT_SYMBOL(ldlm_get_ref);
887 void ldlm_put_ref(void)
889 mutex_lock(&ldlm_ref_mutex);
890 if (ldlm_refcount == 1) {
891 int rc = ldlm_cleanup();
894 CERROR("ldlm_cleanup failed: %d\n", rc);
900 mutex_unlock(&ldlm_ref_mutex);
902 EXPORT_SYMBOL(ldlm_put_ref);
905 * Export handle<->lock hash operations.
908 ldlm_export_lock_hash(struct cfs_hash *hs, const void *key, unsigned mask)
910 return cfs_hash_u64_hash(((struct lustre_handle *)key)->cookie, mask);
914 ldlm_export_lock_key(struct hlist_node *hnode)
916 struct ldlm_lock *lock;
918 lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
919 return &lock->l_remote_handle;
923 ldlm_export_lock_keycpy(struct hlist_node *hnode, void *key)
925 struct ldlm_lock *lock;
927 lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
928 lock->l_remote_handle = *(struct lustre_handle *)key;
932 ldlm_export_lock_keycmp(const void *key, struct hlist_node *hnode)
934 return lustre_handle_equal(ldlm_export_lock_key(hnode), key);
938 ldlm_export_lock_object(struct hlist_node *hnode)
940 return hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
944 ldlm_export_lock_get(struct cfs_hash *hs, struct hlist_node *hnode)
946 struct ldlm_lock *lock;
948 lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
953 ldlm_export_lock_put(struct cfs_hash *hs, struct hlist_node *hnode)
955 struct ldlm_lock *lock;
957 lock = hlist_entry(hnode, struct ldlm_lock, l_exp_hash);
958 LDLM_LOCK_RELEASE(lock);
961 static cfs_hash_ops_t ldlm_export_lock_ops = {
962 .hs_hash = ldlm_export_lock_hash,
963 .hs_key = ldlm_export_lock_key,
964 .hs_keycmp = ldlm_export_lock_keycmp,
965 .hs_keycpy = ldlm_export_lock_keycpy,
966 .hs_object = ldlm_export_lock_object,
967 .hs_get = ldlm_export_lock_get,
968 .hs_put = ldlm_export_lock_put,
969 .hs_put_locked = ldlm_export_lock_put,
972 int ldlm_init_export(struct obd_export *exp)
977 cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
978 HASH_EXP_LOCK_CUR_BITS,
979 HASH_EXP_LOCK_MAX_BITS,
980 HASH_EXP_LOCK_BKT_BITS, 0,
981 CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
982 &ldlm_export_lock_ops,
983 CFS_HASH_DEFAULT | CFS_HASH_REHASH_KEY |
984 CFS_HASH_NBLK_CHANGE);
986 if (!exp->exp_lock_hash)
989 rc = ldlm_init_flock_export(exp);
995 ldlm_destroy_export(exp);
998 EXPORT_SYMBOL(ldlm_init_export);
1000 void ldlm_destroy_export(struct obd_export *exp)
1002 cfs_hash_putref(exp->exp_lock_hash);
1003 exp->exp_lock_hash = NULL;
1005 ldlm_destroy_flock_export(exp);
1007 EXPORT_SYMBOL(ldlm_destroy_export);
1009 extern unsigned int ldlm_cancel_unused_locks_before_replay;
1011 static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj,
1012 struct attribute *attr,
1015 return sprintf(buf, "%d\n", ldlm_cancel_unused_locks_before_replay);
1017 static ssize_t cancel_unused_locks_before_replay_store(struct kobject *kobj,
1018 struct attribute *attr,
1025 rc = kstrtoul(buffer, 10, &val);
1029 ldlm_cancel_unused_locks_before_replay = val;
1033 LUSTRE_RW_ATTR(cancel_unused_locks_before_replay);
1035 /* These are for root of /sys/fs/lustre/ldlm */
1036 static struct attribute *ldlm_attrs[] = {
1037 &lustre_attr_cancel_unused_locks_before_replay.attr,
1041 static struct attribute_group ldlm_attr_group = {
1042 .attrs = ldlm_attrs,
1045 static int ldlm_setup(void)
1047 static struct ptlrpc_service_conf conf;
1048 struct ldlm_bl_pool *blp = NULL;
1052 if (ldlm_state != NULL)
1055 ldlm_state = kzalloc(sizeof(*ldlm_state), GFP_NOFS);
1056 if (ldlm_state == NULL)
1059 ldlm_kobj = kobject_create_and_add("ldlm", lustre_kobj);
1065 rc = sysfs_create_group(ldlm_kobj, &ldlm_attr_group);
1069 ldlm_ns_kset = kset_create_and_add("namespaces", NULL, ldlm_kobj);
1070 if (!ldlm_ns_kset) {
1075 ldlm_svc_kset = kset_create_and_add("services", NULL, ldlm_kobj);
1076 if (!ldlm_svc_kset) {
1081 rc = ldlm_debugfs_setup();
1085 memset(&conf, 0, sizeof(conf));
1086 conf = (typeof(conf)) {
1087 .psc_name = "ldlm_cbd",
1088 .psc_watchdog_factor = 2,
1090 .bc_nbufs = LDLM_CLIENT_NBUFS,
1091 .bc_buf_size = LDLM_BUFSIZE,
1092 .bc_req_max_size = LDLM_MAXREQSIZE,
1093 .bc_rep_max_size = LDLM_MAXREPSIZE,
1094 .bc_req_portal = LDLM_CB_REQUEST_PORTAL,
1095 .bc_rep_portal = LDLM_CB_REPLY_PORTAL,
1098 .tc_thr_name = "ldlm_cb",
1099 .tc_thr_factor = LDLM_THR_FACTOR,
1100 .tc_nthrs_init = LDLM_NTHRS_INIT,
1101 .tc_nthrs_base = LDLM_NTHRS_BASE,
1102 .tc_nthrs_max = LDLM_NTHRS_MAX,
1103 .tc_nthrs_user = ldlm_num_threads,
1104 .tc_cpu_affinity = 1,
1105 .tc_ctx_tags = LCT_MD_THREAD | LCT_DT_THREAD,
1108 .cc_pattern = ldlm_cpts,
1111 .so_req_handler = ldlm_callback_handler,
1114 ldlm_state->ldlm_cb_service =
1115 ptlrpc_register_service(&conf, ldlm_svc_kset,
1116 ldlm_svc_debugfs_dir);
1117 if (IS_ERR(ldlm_state->ldlm_cb_service)) {
1118 CERROR("failed to start service\n");
1119 rc = PTR_ERR(ldlm_state->ldlm_cb_service);
1120 ldlm_state->ldlm_cb_service = NULL;
1125 blp = kzalloc(sizeof(*blp), GFP_NOFS);
1130 ldlm_state->ldlm_bl_pool = blp;
1132 spin_lock_init(&blp->blp_lock);
1133 INIT_LIST_HEAD(&blp->blp_list);
1134 INIT_LIST_HEAD(&blp->blp_prio_list);
1135 init_waitqueue_head(&blp->blp_waitq);
1136 atomic_set(&blp->blp_num_threads, 0);
1137 atomic_set(&blp->blp_busy_threads, 0);
1139 if (ldlm_num_threads == 0) {
1140 blp->blp_min_threads = LDLM_NTHRS_INIT;
1141 blp->blp_max_threads = LDLM_NTHRS_MAX;
1143 blp->blp_min_threads = blp->blp_max_threads =
1144 min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
1148 for (i = 0; i < blp->blp_min_threads; i++) {
1149 rc = ldlm_bl_thread_start(blp);
1154 rc = ldlm_pools_init();
1156 CERROR("Failed to initialize LDLM pools: %d\n", rc);
1166 static int ldlm_cleanup(void)
1168 if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
1169 !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
1170 CERROR("ldlm still has namespaces; clean these up first.\n");
1171 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
1172 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
1178 if (ldlm_state->ldlm_bl_pool != NULL) {
1179 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1181 while (atomic_read(&blp->blp_num_threads) > 0) {
1182 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1184 init_completion(&blp->blp_comp);
1186 spin_lock(&blp->blp_lock);
1187 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1188 wake_up(&blp->blp_waitq);
1189 spin_unlock(&blp->blp_lock);
1191 wait_for_completion(&blp->blp_comp);
1197 if (ldlm_state->ldlm_cb_service != NULL)
1198 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1201 kset_unregister(ldlm_ns_kset);
1203 kset_unregister(ldlm_svc_kset);
1205 kobject_put(ldlm_kobj);
1207 ldlm_debugfs_cleanup();
1217 mutex_init(&ldlm_ref_mutex);
1218 mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
1219 mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1220 ldlm_resource_slab = kmem_cache_create("ldlm_resources",
1221 sizeof(struct ldlm_resource), 0,
1222 SLAB_HWCACHE_ALIGN, NULL);
1223 if (ldlm_resource_slab == NULL)
1226 ldlm_lock_slab = kmem_cache_create("ldlm_locks",
1227 sizeof(struct ldlm_lock), 0,
1228 SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL);
1229 if (ldlm_lock_slab == NULL) {
1230 kmem_cache_destroy(ldlm_resource_slab);
1234 ldlm_interval_slab = kmem_cache_create("interval_node",
1235 sizeof(struct ldlm_interval),
1236 0, SLAB_HWCACHE_ALIGN, NULL);
1237 if (ldlm_interval_slab == NULL) {
1238 kmem_cache_destroy(ldlm_resource_slab);
1239 kmem_cache_destroy(ldlm_lock_slab);
1242 #if LUSTRE_TRACKS_LOCK_EXP_REFS
1243 class_export_dump_hook = ldlm_dump_export_locks;
1248 void ldlm_exit(void)
1251 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
1252 kmem_cache_destroy(ldlm_resource_slab);
1253 /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
1254 * synchronize_rcu() to wait a grace period elapsed, so that
1255 * ldlm_lock_free() get a chance to be called. */
1257 kmem_cache_destroy(ldlm_lock_slab);
1258 kmem_cache_destroy(ldlm_interval_slab);