1 #include <linux/ceph/ceph_debug.h>
4 #include <linux/wait.h>
5 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10 #include <linux/utsname.h>
11 #include <linux/ratelimit.h>
14 #include "mds_client.h"
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/pagelist.h>
20 #include <linux/ceph/auth.h>
21 #include <linux/ceph/debugfs.h>
24 * A cluster of MDS (metadata server) daemons is responsible for
25 * managing the file system namespace (the directory hierarchy and
26 * inodes) and for coordinating shared access to storage. Metadata is
27 * partitioning hierarchically across a number of servers, and that
28 * partition varies over time as the cluster adjusts the distribution
29 * in order to balance load.
31 * The MDS client is primarily responsible to managing synchronous
32 * metadata requests for operations like open, unlink, and so forth.
33 * If there is a MDS failure, we find out about it when we (possibly
34 * request and) receive a new MDS map, and can resubmit affected
37 * For the most part, though, we take advantage of a lossless
38 * communications channel to the MDS, and do not need to worry about
39 * timing out or resubmitting requests.
41 * We maintain a stateful "session" with each MDS we interact with.
42 * Within each session, we sent periodic heartbeat messages to ensure
43 * any capabilities or leases we have been issues remain valid. If
44 * the session times out and goes stale, our leases and capabilities
45 * are no longer valid.
48 struct ceph_reconnect_state {
50 struct ceph_pagelist *pagelist;
54 static void __wake_requests(struct ceph_mds_client *mdsc,
55 struct list_head *head);
57 static const struct ceph_connection_operations mds_con_ops;
65 * parse individual inode info
67 static int parse_reply_info_in(void **p, void *end,
68 struct ceph_mds_reply_info_in *info,
74 *p += sizeof(struct ceph_mds_reply_inode) +
75 sizeof(*info->in->fragtree.splits) *
76 le32_to_cpu(info->in->fragtree.nsplits);
78 ceph_decode_32_safe(p, end, info->symlink_len, bad);
79 ceph_decode_need(p, end, info->symlink_len, bad);
81 *p += info->symlink_len;
83 if (features & CEPH_FEATURE_DIRLAYOUTHASH)
84 ceph_decode_copy_safe(p, end, &info->dir_layout,
85 sizeof(info->dir_layout), bad);
87 memset(&info->dir_layout, 0, sizeof(info->dir_layout));
89 ceph_decode_32_safe(p, end, info->xattr_len, bad);
90 ceph_decode_need(p, end, info->xattr_len, bad);
91 info->xattr_data = *p;
92 *p += info->xattr_len;
94 if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
95 ceph_decode_64_safe(p, end, info->inline_version, bad);
96 ceph_decode_32_safe(p, end, info->inline_len, bad);
97 ceph_decode_need(p, end, info->inline_len, bad);
98 info->inline_data = *p;
99 *p += info->inline_len;
101 info->inline_version = CEPH_INLINE_NONE;
103 if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
104 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
105 ceph_decode_need(p, end, info->pool_ns_len, bad);
106 *p += info->pool_ns_len;
108 info->pool_ns_len = 0;
117 * parse a normal reply, which may contain a (dir+)dentry and/or a
120 static int parse_reply_info_trace(void **p, void *end,
121 struct ceph_mds_reply_info_parsed *info,
126 if (info->head->is_dentry) {
127 err = parse_reply_info_in(p, end, &info->diri, features);
131 if (unlikely(*p + sizeof(*info->dirfrag) > end))
134 *p += sizeof(*info->dirfrag) +
135 sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
136 if (unlikely(*p > end))
139 ceph_decode_32_safe(p, end, info->dname_len, bad);
140 ceph_decode_need(p, end, info->dname_len, bad);
142 *p += info->dname_len;
144 *p += sizeof(*info->dlease);
147 if (info->head->is_target) {
148 err = parse_reply_info_in(p, end, &info->targeti, features);
153 if (unlikely(*p != end))
160 pr_err("problem parsing mds trace %d\n", err);
165 * parse readdir results
167 static int parse_reply_info_dir(void **p, void *end,
168 struct ceph_mds_reply_info_parsed *info,
175 if (*p + sizeof(*info->dir_dir) > end)
177 *p += sizeof(*info->dir_dir) +
178 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
182 ceph_decode_need(p, end, sizeof(num) + 2, bad);
183 num = ceph_decode_32(p);
184 info->dir_end = ceph_decode_8(p);
185 info->dir_complete = ceph_decode_8(p);
189 BUG_ON(!info->dir_in);
190 info->dir_dname = (void *)(info->dir_in + num);
191 info->dir_dname_len = (void *)(info->dir_dname + num);
192 info->dir_dlease = (void *)(info->dir_dname_len + num);
193 if ((unsigned long)(info->dir_dlease + num) >
194 (unsigned long)info->dir_in + info->dir_buf_size) {
195 pr_err("dir contents are larger than expected\n");
203 ceph_decode_need(p, end, sizeof(u32)*2, bad);
204 info->dir_dname_len[i] = ceph_decode_32(p);
205 ceph_decode_need(p, end, info->dir_dname_len[i], bad);
206 info->dir_dname[i] = *p;
207 *p += info->dir_dname_len[i];
208 dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i],
210 info->dir_dlease[i] = *p;
211 *p += sizeof(struct ceph_mds_reply_lease);
214 err = parse_reply_info_in(p, end, &info->dir_in[i], features);
229 pr_err("problem parsing dir contents %d\n", err);
234 * parse fcntl F_GETLK results
236 static int parse_reply_info_filelock(void **p, void *end,
237 struct ceph_mds_reply_info_parsed *info,
240 if (*p + sizeof(*info->filelock_reply) > end)
243 info->filelock_reply = *p;
244 *p += sizeof(*info->filelock_reply);
246 if (unlikely(*p != end))
255 * parse create results
257 static int parse_reply_info_create(void **p, void *end,
258 struct ceph_mds_reply_info_parsed *info,
261 if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
263 info->has_create_ino = false;
265 info->has_create_ino = true;
266 info->ino = ceph_decode_64(p);
270 if (unlikely(*p != end))
279 * parse extra results
281 static int parse_reply_info_extra(void **p, void *end,
282 struct ceph_mds_reply_info_parsed *info,
285 if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
286 return parse_reply_info_filelock(p, end, info, features);
287 else if (info->head->op == CEPH_MDS_OP_READDIR ||
288 info->head->op == CEPH_MDS_OP_LSSNAP)
289 return parse_reply_info_dir(p, end, info, features);
290 else if (info->head->op == CEPH_MDS_OP_CREATE)
291 return parse_reply_info_create(p, end, info, features);
297 * parse entire mds reply
299 static int parse_reply_info(struct ceph_msg *msg,
300 struct ceph_mds_reply_info_parsed *info,
307 info->head = msg->front.iov_base;
308 p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
309 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
312 ceph_decode_32_safe(&p, end, len, bad);
314 ceph_decode_need(&p, end, len, bad);
315 err = parse_reply_info_trace(&p, p+len, info, features);
321 ceph_decode_32_safe(&p, end, len, bad);
323 ceph_decode_need(&p, end, len, bad);
324 err = parse_reply_info_extra(&p, p+len, info, features);
330 ceph_decode_32_safe(&p, end, len, bad);
331 info->snapblob_len = len;
342 pr_err("mds parse_reply err %d\n", err);
346 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
350 free_pages((unsigned long)info->dir_in, get_order(info->dir_buf_size));
357 const char *ceph_session_state_name(int s)
360 case CEPH_MDS_SESSION_NEW: return "new";
361 case CEPH_MDS_SESSION_OPENING: return "opening";
362 case CEPH_MDS_SESSION_OPEN: return "open";
363 case CEPH_MDS_SESSION_HUNG: return "hung";
364 case CEPH_MDS_SESSION_CLOSING: return "closing";
365 case CEPH_MDS_SESSION_RESTARTING: return "restarting";
366 case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
367 default: return "???";
371 static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
373 if (atomic_inc_not_zero(&s->s_ref)) {
374 dout("mdsc get_session %p %d -> %d\n", s,
375 atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref));
378 dout("mdsc get_session %p 0 -- FAIL", s);
383 void ceph_put_mds_session(struct ceph_mds_session *s)
385 dout("mdsc put_session %p %d -> %d\n", s,
386 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
387 if (atomic_dec_and_test(&s->s_ref)) {
388 if (s->s_auth.authorizer)
389 ceph_auth_destroy_authorizer(
390 s->s_mdsc->fsc->client->monc.auth,
391 s->s_auth.authorizer);
397 * called under mdsc->mutex
399 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
402 struct ceph_mds_session *session;
404 if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
406 session = mdsc->sessions[mds];
407 dout("lookup_mds_session %p %d\n", session,
408 atomic_read(&session->s_ref));
409 get_session(session);
413 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
415 if (mds >= mdsc->max_sessions)
417 return mdsc->sessions[mds];
420 static int __verify_registered_session(struct ceph_mds_client *mdsc,
421 struct ceph_mds_session *s)
423 if (s->s_mds >= mdsc->max_sessions ||
424 mdsc->sessions[s->s_mds] != s)
430 * create+register a new session for given mds.
431 * called under mdsc->mutex.
433 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
436 struct ceph_mds_session *s;
438 if (mds >= mdsc->mdsmap->m_max_mds)
439 return ERR_PTR(-EINVAL);
441 s = kzalloc(sizeof(*s), GFP_NOFS);
443 return ERR_PTR(-ENOMEM);
446 s->s_state = CEPH_MDS_SESSION_NEW;
449 mutex_init(&s->s_mutex);
451 ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
453 spin_lock_init(&s->s_gen_ttl_lock);
455 s->s_cap_ttl = jiffies - 1;
457 spin_lock_init(&s->s_cap_lock);
458 s->s_renew_requested = 0;
460 INIT_LIST_HEAD(&s->s_caps);
463 atomic_set(&s->s_ref, 1);
464 INIT_LIST_HEAD(&s->s_waiting);
465 INIT_LIST_HEAD(&s->s_unsafe);
466 s->s_num_cap_releases = 0;
467 s->s_cap_reconnect = 0;
468 s->s_cap_iterator = NULL;
469 INIT_LIST_HEAD(&s->s_cap_releases);
470 INIT_LIST_HEAD(&s->s_cap_flushing);
471 INIT_LIST_HEAD(&s->s_cap_snaps_flushing);
473 dout("register_session mds%d\n", mds);
474 if (mds >= mdsc->max_sessions) {
475 int newmax = 1 << get_count_order(mds+1);
476 struct ceph_mds_session **sa;
478 dout("register_session realloc to %d\n", newmax);
479 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
482 if (mdsc->sessions) {
483 memcpy(sa, mdsc->sessions,
484 mdsc->max_sessions * sizeof(void *));
485 kfree(mdsc->sessions);
488 mdsc->max_sessions = newmax;
490 mdsc->sessions[mds] = s;
491 atomic_inc(&mdsc->num_sessions);
492 atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */
494 ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
495 ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
501 return ERR_PTR(-ENOMEM);
505 * called under mdsc->mutex
507 static void __unregister_session(struct ceph_mds_client *mdsc,
508 struct ceph_mds_session *s)
510 dout("__unregister_session mds%d %p\n", s->s_mds, s);
511 BUG_ON(mdsc->sessions[s->s_mds] != s);
512 mdsc->sessions[s->s_mds] = NULL;
513 ceph_con_close(&s->s_con);
514 ceph_put_mds_session(s);
515 atomic_dec(&mdsc->num_sessions);
519 * drop session refs in request.
521 * should be last request ref, or hold mdsc->mutex
523 static void put_request_session(struct ceph_mds_request *req)
525 if (req->r_session) {
526 ceph_put_mds_session(req->r_session);
527 req->r_session = NULL;
531 void ceph_mdsc_release_request(struct kref *kref)
533 struct ceph_mds_request *req = container_of(kref,
534 struct ceph_mds_request,
536 destroy_reply_info(&req->r_reply_info);
538 ceph_msg_put(req->r_request);
540 ceph_msg_put(req->r_reply);
542 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
545 if (req->r_locked_dir)
546 ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
547 iput(req->r_target_inode);
550 if (req->r_old_dentry)
551 dput(req->r_old_dentry);
552 if (req->r_old_dentry_dir) {
554 * track (and drop pins for) r_old_dentry_dir
555 * separately, since r_old_dentry's d_parent may have
556 * changed between the dir mutex being dropped and
557 * this request being freed.
559 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
561 iput(req->r_old_dentry_dir);
566 ceph_pagelist_release(req->r_pagelist);
567 put_request_session(req);
568 ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
573 * lookup session, bump ref if found.
575 * called under mdsc->mutex.
577 static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc,
580 struct ceph_mds_request *req;
581 struct rb_node *n = mdsc->request_tree.rb_node;
584 req = rb_entry(n, struct ceph_mds_request, r_node);
585 if (tid < req->r_tid)
587 else if (tid > req->r_tid)
590 ceph_mdsc_get_request(req);
597 static void __insert_request(struct ceph_mds_client *mdsc,
598 struct ceph_mds_request *new)
600 struct rb_node **p = &mdsc->request_tree.rb_node;
601 struct rb_node *parent = NULL;
602 struct ceph_mds_request *req = NULL;
606 req = rb_entry(parent, struct ceph_mds_request, r_node);
607 if (new->r_tid < req->r_tid)
609 else if (new->r_tid > req->r_tid)
615 rb_link_node(&new->r_node, parent, p);
616 rb_insert_color(&new->r_node, &mdsc->request_tree);
620 * Register an in-flight request, and assign a tid. Link to directory
621 * are modifying (if any).
623 * Called under mdsc->mutex.
625 static void __register_request(struct ceph_mds_client *mdsc,
626 struct ceph_mds_request *req,
629 req->r_tid = ++mdsc->last_tid;
631 ceph_reserve_caps(mdsc, &req->r_caps_reservation,
633 dout("__register_request %p tid %lld\n", req, req->r_tid);
634 ceph_mdsc_get_request(req);
635 __insert_request(mdsc, req);
637 req->r_uid = current_fsuid();
638 req->r_gid = current_fsgid();
640 if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
641 mdsc->oldest_tid = req->r_tid;
645 req->r_unsafe_dir = dir;
649 static void __unregister_request(struct ceph_mds_client *mdsc,
650 struct ceph_mds_request *req)
652 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
654 if (req->r_tid == mdsc->oldest_tid) {
655 struct rb_node *p = rb_next(&req->r_node);
656 mdsc->oldest_tid = 0;
658 struct ceph_mds_request *next_req =
659 rb_entry(p, struct ceph_mds_request, r_node);
660 if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
661 mdsc->oldest_tid = next_req->r_tid;
668 rb_erase(&req->r_node, &mdsc->request_tree);
669 RB_CLEAR_NODE(&req->r_node);
671 if (req->r_unsafe_dir && req->r_got_unsafe) {
672 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
673 spin_lock(&ci->i_unsafe_lock);
674 list_del_init(&req->r_unsafe_dir_item);
675 spin_unlock(&ci->i_unsafe_lock);
677 if (req->r_target_inode && req->r_got_unsafe) {
678 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
679 spin_lock(&ci->i_unsafe_lock);
680 list_del_init(&req->r_unsafe_target_item);
681 spin_unlock(&ci->i_unsafe_lock);
684 if (req->r_unsafe_dir) {
685 iput(req->r_unsafe_dir);
686 req->r_unsafe_dir = NULL;
689 complete_all(&req->r_safe_completion);
691 ceph_mdsc_put_request(req);
695 * Choose mds to send request to next. If there is a hint set in the
696 * request (e.g., due to a prior forward hint from the mds), use that.
697 * Otherwise, consult frag tree and/or caps to identify the
698 * appropriate mds. If all else fails, choose randomly.
700 * Called under mdsc->mutex.
702 static struct dentry *get_nonsnap_parent(struct dentry *dentry)
705 * we don't need to worry about protecting the d_parent access
706 * here because we never renaming inside the snapped namespace
707 * except to resplice to another snapdir, and either the old or new
708 * result is a valid result.
710 while (!IS_ROOT(dentry) && ceph_snap(d_inode(dentry)) != CEPH_NOSNAP)
711 dentry = dentry->d_parent;
715 static int __choose_mds(struct ceph_mds_client *mdsc,
716 struct ceph_mds_request *req)
719 struct ceph_inode_info *ci;
720 struct ceph_cap *cap;
721 int mode = req->r_direct_mode;
723 u32 hash = req->r_direct_hash;
724 bool is_hash = req->r_direct_is_hash;
727 * is there a specific mds we should try? ignore hint if we have
728 * no session and the mds is not up (active or recovering).
730 if (req->r_resend_mds >= 0 &&
731 (__have_session(mdsc, req->r_resend_mds) ||
732 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
733 dout("choose_mds using resend_mds mds%d\n",
735 return req->r_resend_mds;
738 if (mode == USE_RANDOM_MDS)
743 inode = req->r_inode;
744 } else if (req->r_dentry) {
745 /* ignore race with rename; old or new d_parent is okay */
746 struct dentry *parent = req->r_dentry->d_parent;
747 struct inode *dir = d_inode(parent);
749 if (dir->i_sb != mdsc->fsc->sb) {
751 inode = d_inode(req->r_dentry);
752 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
753 /* direct snapped/virtual snapdir requests
754 * based on parent dir inode */
755 struct dentry *dn = get_nonsnap_parent(parent);
757 dout("__choose_mds using nonsnap parent %p\n", inode);
760 inode = d_inode(req->r_dentry);
761 if (!inode || mode == USE_AUTH_MDS) {
764 hash = ceph_dentry_hash(dir, req->r_dentry);
770 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
774 ci = ceph_inode(inode);
776 if (is_hash && S_ISDIR(inode->i_mode)) {
777 struct ceph_inode_frag frag;
780 ceph_choose_frag(ci, hash, &frag, &found);
782 if (mode == USE_ANY_MDS && frag.ndist > 0) {
785 /* choose a random replica */
786 get_random_bytes(&r, 1);
789 dout("choose_mds %p %llx.%llx "
790 "frag %u mds%d (%d/%d)\n",
791 inode, ceph_vinop(inode),
794 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
795 CEPH_MDS_STATE_ACTIVE)
799 /* since this file/dir wasn't known to be
800 * replicated, then we want to look for the
801 * authoritative mds. */
804 /* choose auth mds */
806 dout("choose_mds %p %llx.%llx "
807 "frag %u mds%d (auth)\n",
808 inode, ceph_vinop(inode), frag.frag, mds);
809 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
810 CEPH_MDS_STATE_ACTIVE)
816 spin_lock(&ci->i_ceph_lock);
818 if (mode == USE_AUTH_MDS)
819 cap = ci->i_auth_cap;
820 if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
821 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
823 spin_unlock(&ci->i_ceph_lock);
826 mds = cap->session->s_mds;
827 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
828 inode, ceph_vinop(inode), mds,
829 cap == ci->i_auth_cap ? "auth " : "", cap);
830 spin_unlock(&ci->i_ceph_lock);
834 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
835 dout("choose_mds chose random mds%d\n", mds);
843 static struct ceph_msg *create_session_msg(u32 op, u64 seq)
845 struct ceph_msg *msg;
846 struct ceph_mds_session_head *h;
848 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
851 pr_err("create_session_msg ENOMEM creating msg\n");
854 h = msg->front.iov_base;
855 h->op = cpu_to_le32(op);
856 h->seq = cpu_to_le64(seq);
862 * session message, specialization for CEPH_SESSION_REQUEST_OPEN
863 * to include additional client metadata fields.
865 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
867 struct ceph_msg *msg;
868 struct ceph_mds_session_head *h;
870 int metadata_bytes = 0;
871 int metadata_key_count = 0;
872 struct ceph_options *opt = mdsc->fsc->client->options;
875 const char* metadata[][2] = {
876 {"hostname", utsname()->nodename},
877 {"kernel_version", utsname()->release},
878 {"entity_id", opt->name ? opt->name : ""},
882 /* Calculate serialized length of metadata */
883 metadata_bytes = 4; /* map length */
884 for (i = 0; metadata[i][0] != NULL; ++i) {
885 metadata_bytes += 8 + strlen(metadata[i][0]) +
886 strlen(metadata[i][1]);
887 metadata_key_count++;
890 /* Allocate the message */
891 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + metadata_bytes,
894 pr_err("create_session_msg ENOMEM creating msg\n");
897 h = msg->front.iov_base;
898 h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
899 h->seq = cpu_to_le64(seq);
902 * Serialize client metadata into waiting buffer space, using
903 * the format that userspace expects for map<string, string>
905 * ClientSession messages with metadata are v2
907 msg->hdr.version = cpu_to_le16(2);
908 msg->hdr.compat_version = cpu_to_le16(1);
910 /* The write pointer, following the session_head structure */
911 p = msg->front.iov_base + sizeof(*h);
913 /* Number of entries in the map */
914 ceph_encode_32(&p, metadata_key_count);
916 /* Two length-prefixed strings for each entry in the map */
917 for (i = 0; metadata[i][0] != NULL; ++i) {
918 size_t const key_len = strlen(metadata[i][0]);
919 size_t const val_len = strlen(metadata[i][1]);
921 ceph_encode_32(&p, key_len);
922 memcpy(p, metadata[i][0], key_len);
924 ceph_encode_32(&p, val_len);
925 memcpy(p, metadata[i][1], val_len);
933 * send session open request.
935 * called under mdsc->mutex
937 static int __open_session(struct ceph_mds_client *mdsc,
938 struct ceph_mds_session *session)
940 struct ceph_msg *msg;
942 int mds = session->s_mds;
944 /* wait for mds to go active? */
945 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
946 dout("open_session to mds%d (%s)\n", mds,
947 ceph_mds_state_name(mstate));
948 session->s_state = CEPH_MDS_SESSION_OPENING;
949 session->s_renew_requested = jiffies;
951 /* send connect message */
952 msg = create_session_open_msg(mdsc, session->s_seq);
955 ceph_con_send(&session->s_con, msg);
960 * open sessions for any export targets for the given mds
962 * called under mdsc->mutex
964 static struct ceph_mds_session *
965 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
967 struct ceph_mds_session *session;
969 session = __ceph_lookup_mds_session(mdsc, target);
971 session = register_session(mdsc, target);
975 if (session->s_state == CEPH_MDS_SESSION_NEW ||
976 session->s_state == CEPH_MDS_SESSION_CLOSING)
977 __open_session(mdsc, session);
982 struct ceph_mds_session *
983 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
985 struct ceph_mds_session *session;
987 dout("open_export_target_session to mds%d\n", target);
989 mutex_lock(&mdsc->mutex);
990 session = __open_export_target_session(mdsc, target);
991 mutex_unlock(&mdsc->mutex);
996 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
997 struct ceph_mds_session *session)
999 struct ceph_mds_info *mi;
1000 struct ceph_mds_session *ts;
1001 int i, mds = session->s_mds;
1003 if (mds >= mdsc->mdsmap->m_max_mds)
1006 mi = &mdsc->mdsmap->m_info[mds];
1007 dout("open_export_target_sessions for mds%d (%d targets)\n",
1008 session->s_mds, mi->num_export_targets);
1010 for (i = 0; i < mi->num_export_targets; i++) {
1011 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1013 ceph_put_mds_session(ts);
1017 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
1018 struct ceph_mds_session *session)
1020 mutex_lock(&mdsc->mutex);
1021 __open_export_target_sessions(mdsc, session);
1022 mutex_unlock(&mdsc->mutex);
1029 /* caller holds s_cap_lock, we drop it */
1030 static void cleanup_cap_releases(struct ceph_mds_client *mdsc,
1031 struct ceph_mds_session *session)
1032 __releases(session->s_cap_lock)
1034 LIST_HEAD(tmp_list);
1035 list_splice_init(&session->s_cap_releases, &tmp_list);
1036 session->s_num_cap_releases = 0;
1037 spin_unlock(&session->s_cap_lock);
1039 dout("cleanup_cap_releases mds%d\n", session->s_mds);
1040 while (!list_empty(&tmp_list)) {
1041 struct ceph_cap *cap;
1042 /* zero out the in-progress message */
1043 cap = list_first_entry(&tmp_list,
1044 struct ceph_cap, session_caps);
1045 list_del(&cap->session_caps);
1046 ceph_put_cap(mdsc, cap);
1050 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1051 struct ceph_mds_session *session)
1053 struct ceph_mds_request *req;
1056 dout("cleanup_session_requests mds%d\n", session->s_mds);
1057 mutex_lock(&mdsc->mutex);
1058 while (!list_empty(&session->s_unsafe)) {
1059 req = list_first_entry(&session->s_unsafe,
1060 struct ceph_mds_request, r_unsafe_item);
1061 list_del_init(&req->r_unsafe_item);
1062 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1064 __unregister_request(mdsc, req);
1066 /* zero r_attempts, so kick_requests() will re-send requests */
1067 p = rb_first(&mdsc->request_tree);
1069 req = rb_entry(p, struct ceph_mds_request, r_node);
1071 if (req->r_session &&
1072 req->r_session->s_mds == session->s_mds)
1073 req->r_attempts = 0;
1075 mutex_unlock(&mdsc->mutex);
1079 * Helper to safely iterate over all caps associated with a session, with
1080 * special care taken to handle a racing __ceph_remove_cap().
1082 * Caller must hold session s_mutex.
1084 static int iterate_session_caps(struct ceph_mds_session *session,
1085 int (*cb)(struct inode *, struct ceph_cap *,
1088 struct list_head *p;
1089 struct ceph_cap *cap;
1090 struct inode *inode, *last_inode = NULL;
1091 struct ceph_cap *old_cap = NULL;
1094 dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1095 spin_lock(&session->s_cap_lock);
1096 p = session->s_caps.next;
1097 while (p != &session->s_caps) {
1098 cap = list_entry(p, struct ceph_cap, session_caps);
1099 inode = igrab(&cap->ci->vfs_inode);
1104 session->s_cap_iterator = cap;
1105 spin_unlock(&session->s_cap_lock);
1112 ceph_put_cap(session->s_mdsc, old_cap);
1116 ret = cb(inode, cap, arg);
1119 spin_lock(&session->s_cap_lock);
1121 if (cap->ci == NULL) {
1122 dout("iterate_session_caps finishing cap %p removal\n",
1124 BUG_ON(cap->session != session);
1125 cap->session = NULL;
1126 list_del_init(&cap->session_caps);
1127 session->s_nr_caps--;
1128 if (cap->queue_release) {
1129 list_add_tail(&cap->session_caps,
1130 &session->s_cap_releases);
1131 session->s_num_cap_releases++;
1133 old_cap = cap; /* put_cap it w/o locks held */
1141 session->s_cap_iterator = NULL;
1142 spin_unlock(&session->s_cap_lock);
1146 ceph_put_cap(session->s_mdsc, old_cap);
1151 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1154 struct ceph_inode_info *ci = ceph_inode(inode);
1155 LIST_HEAD(to_remove);
1158 dout("removing cap %p, ci is %p, inode is %p\n",
1159 cap, ci, &ci->vfs_inode);
1160 spin_lock(&ci->i_ceph_lock);
1161 __ceph_remove_cap(cap, false);
1162 if (!ci->i_auth_cap) {
1163 struct ceph_cap_flush *cf;
1164 struct ceph_mds_client *mdsc =
1165 ceph_sb_to_client(inode->i_sb)->mdsc;
1168 struct rb_node *n = rb_first(&ci->i_cap_flush_tree);
1171 cf = rb_entry(n, struct ceph_cap_flush, i_node);
1172 rb_erase(&cf->i_node, &ci->i_cap_flush_tree);
1173 list_add(&cf->list, &to_remove);
1176 spin_lock(&mdsc->cap_dirty_lock);
1178 list_for_each_entry(cf, &to_remove, list)
1179 rb_erase(&cf->g_node, &mdsc->cap_flush_tree);
1181 if (!list_empty(&ci->i_dirty_item)) {
1182 pr_warn_ratelimited(
1183 " dropping dirty %s state for %p %lld\n",
1184 ceph_cap_string(ci->i_dirty_caps),
1185 inode, ceph_ino(inode));
1186 ci->i_dirty_caps = 0;
1187 list_del_init(&ci->i_dirty_item);
1190 if (!list_empty(&ci->i_flushing_item)) {
1191 pr_warn_ratelimited(
1192 " dropping dirty+flushing %s state for %p %lld\n",
1193 ceph_cap_string(ci->i_flushing_caps),
1194 inode, ceph_ino(inode));
1195 ci->i_flushing_caps = 0;
1196 list_del_init(&ci->i_flushing_item);
1197 mdsc->num_cap_flushing--;
1200 spin_unlock(&mdsc->cap_dirty_lock);
1202 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
1203 list_add(&ci->i_prealloc_cap_flush->list, &to_remove);
1204 ci->i_prealloc_cap_flush = NULL;
1207 spin_unlock(&ci->i_ceph_lock);
1208 while (!list_empty(&to_remove)) {
1209 struct ceph_cap_flush *cf;
1210 cf = list_first_entry(&to_remove,
1211 struct ceph_cap_flush, list);
1212 list_del(&cf->list);
1213 ceph_free_cap_flush(cf);
1221 * caller must hold session s_mutex
1223 static void remove_session_caps(struct ceph_mds_session *session)
1225 dout("remove_session_caps on %p\n", session);
1226 iterate_session_caps(session, remove_session_caps_cb, NULL);
1228 spin_lock(&session->s_cap_lock);
1229 if (session->s_nr_caps > 0) {
1230 struct super_block *sb = session->s_mdsc->fsc->sb;
1231 struct inode *inode;
1232 struct ceph_cap *cap, *prev = NULL;
1233 struct ceph_vino vino;
1235 * iterate_session_caps() skips inodes that are being
1236 * deleted, we need to wait until deletions are complete.
1237 * __wait_on_freeing_inode() is designed for the job,
1238 * but it is not exported, so use lookup inode function
1241 while (!list_empty(&session->s_caps)) {
1242 cap = list_entry(session->s_caps.next,
1243 struct ceph_cap, session_caps);
1247 vino = cap->ci->i_vino;
1248 spin_unlock(&session->s_cap_lock);
1250 inode = ceph_find_inode(sb, vino);
1253 spin_lock(&session->s_cap_lock);
1257 // drop cap expires and unlock s_cap_lock
1258 cleanup_cap_releases(session->s_mdsc, session);
1260 BUG_ON(session->s_nr_caps > 0);
1261 BUG_ON(!list_empty(&session->s_cap_flushing));
1265 * wake up any threads waiting on this session's caps. if the cap is
1266 * old (didn't get renewed on the client reconnect), remove it now.
1268 * caller must hold s_mutex.
1270 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1273 struct ceph_inode_info *ci = ceph_inode(inode);
1275 wake_up_all(&ci->i_cap_wq);
1277 spin_lock(&ci->i_ceph_lock);
1278 ci->i_wanted_max_size = 0;
1279 ci->i_requested_max_size = 0;
1280 spin_unlock(&ci->i_ceph_lock);
1285 static void wake_up_session_caps(struct ceph_mds_session *session,
1288 dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1289 iterate_session_caps(session, wake_up_session_cb,
1290 (void *)(unsigned long)reconnect);
1294 * Send periodic message to MDS renewing all currently held caps. The
1295 * ack will reset the expiration for all caps from this session.
1297 * caller holds s_mutex
1299 static int send_renew_caps(struct ceph_mds_client *mdsc,
1300 struct ceph_mds_session *session)
1302 struct ceph_msg *msg;
1305 if (time_after_eq(jiffies, session->s_cap_ttl) &&
1306 time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1307 pr_info("mds%d caps stale\n", session->s_mds);
1308 session->s_renew_requested = jiffies;
1310 /* do not try to renew caps until a recovering mds has reconnected
1311 * with its clients. */
1312 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1313 if (state < CEPH_MDS_STATE_RECONNECT) {
1314 dout("send_renew_caps ignoring mds%d (%s)\n",
1315 session->s_mds, ceph_mds_state_name(state));
1319 dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1320 ceph_mds_state_name(state));
1321 msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1322 ++session->s_renew_seq);
1325 ceph_con_send(&session->s_con, msg);
1329 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1330 struct ceph_mds_session *session, u64 seq)
1332 struct ceph_msg *msg;
1334 dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1335 session->s_mds, ceph_session_state_name(session->s_state), seq);
1336 msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1339 ceph_con_send(&session->s_con, msg);
1345 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1347 * Called under session->s_mutex
1349 static void renewed_caps(struct ceph_mds_client *mdsc,
1350 struct ceph_mds_session *session, int is_renew)
1355 spin_lock(&session->s_cap_lock);
1356 was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1358 session->s_cap_ttl = session->s_renew_requested +
1359 mdsc->mdsmap->m_session_timeout*HZ;
1362 if (time_before(jiffies, session->s_cap_ttl)) {
1363 pr_info("mds%d caps renewed\n", session->s_mds);
1366 pr_info("mds%d caps still stale\n", session->s_mds);
1369 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1370 session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1371 time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1372 spin_unlock(&session->s_cap_lock);
1375 wake_up_session_caps(session, 0);
1379 * send a session close request
1381 static int request_close_session(struct ceph_mds_client *mdsc,
1382 struct ceph_mds_session *session)
1384 struct ceph_msg *msg;
1386 dout("request_close_session mds%d state %s seq %lld\n",
1387 session->s_mds, ceph_session_state_name(session->s_state),
1389 msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
1392 ceph_con_send(&session->s_con, msg);
1397 * Called with s_mutex held.
1399 static int __close_session(struct ceph_mds_client *mdsc,
1400 struct ceph_mds_session *session)
1402 if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1404 session->s_state = CEPH_MDS_SESSION_CLOSING;
1405 return request_close_session(mdsc, session);
1409 * Trim old(er) caps.
1411 * Because we can't cache an inode without one or more caps, we do
1412 * this indirectly: if a cap is unused, we prune its aliases, at which
1413 * point the inode will hopefully get dropped to.
1415 * Yes, this is a bit sloppy. Our only real goal here is to respond to
1416 * memory pressure from the MDS, though, so it needn't be perfect.
1418 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1420 struct ceph_mds_session *session = arg;
1421 struct ceph_inode_info *ci = ceph_inode(inode);
1422 int used, wanted, oissued, mine;
1424 if (session->s_trim_caps <= 0)
1427 spin_lock(&ci->i_ceph_lock);
1428 mine = cap->issued | cap->implemented;
1429 used = __ceph_caps_used(ci);
1430 wanted = __ceph_caps_file_wanted(ci);
1431 oissued = __ceph_caps_issued_other(ci, cap);
1433 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1434 inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1435 ceph_cap_string(used), ceph_cap_string(wanted));
1436 if (cap == ci->i_auth_cap) {
1437 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1438 !list_empty(&ci->i_cap_snaps))
1440 if ((used | wanted) & CEPH_CAP_ANY_WR)
1443 /* The inode has cached pages, but it's no longer used.
1444 * we can safely drop it */
1445 if (wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1446 !(oissued & CEPH_CAP_FILE_CACHE)) {
1450 if ((used | wanted) & ~oissued & mine)
1451 goto out; /* we need these caps */
1453 session->s_trim_caps--;
1455 /* we aren't the only cap.. just remove us */
1456 __ceph_remove_cap(cap, true);
1458 /* try dropping referring dentries */
1459 spin_unlock(&ci->i_ceph_lock);
1460 d_prune_aliases(inode);
1461 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1462 inode, cap, atomic_read(&inode->i_count));
1467 spin_unlock(&ci->i_ceph_lock);
1472 * Trim session cap count down to some max number.
1474 static int trim_caps(struct ceph_mds_client *mdsc,
1475 struct ceph_mds_session *session,
1478 int trim_caps = session->s_nr_caps - max_caps;
1480 dout("trim_caps mds%d start: %d / %d, trim %d\n",
1481 session->s_mds, session->s_nr_caps, max_caps, trim_caps);
1482 if (trim_caps > 0) {
1483 session->s_trim_caps = trim_caps;
1484 iterate_session_caps(session, trim_caps_cb, session);
1485 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1486 session->s_mds, session->s_nr_caps, max_caps,
1487 trim_caps - session->s_trim_caps);
1488 session->s_trim_caps = 0;
1491 ceph_send_cap_releases(mdsc, session);
1495 static int check_capsnap_flush(struct ceph_inode_info *ci,
1499 spin_lock(&ci->i_ceph_lock);
1500 if (want_snap_seq > 0 && !list_empty(&ci->i_cap_snaps)) {
1501 struct ceph_cap_snap *capsnap =
1502 list_first_entry(&ci->i_cap_snaps,
1503 struct ceph_cap_snap, ci_item);
1504 ret = capsnap->follows >= want_snap_seq;
1506 spin_unlock(&ci->i_ceph_lock);
1510 static int check_caps_flush(struct ceph_mds_client *mdsc,
1514 struct ceph_cap_flush *cf;
1517 spin_lock(&mdsc->cap_dirty_lock);
1518 n = rb_first(&mdsc->cap_flush_tree);
1519 cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL;
1520 if (cf && cf->tid <= want_flush_tid) {
1521 dout("check_caps_flush still flushing tid %llu <= %llu\n",
1522 cf->tid, want_flush_tid);
1525 spin_unlock(&mdsc->cap_dirty_lock);
1530 * flush all dirty inode data to disk.
1532 * returns true if we've flushed through want_flush_tid
1534 static void wait_caps_flush(struct ceph_mds_client *mdsc,
1535 u64 want_flush_tid, u64 want_snap_seq)
1539 dout("check_caps_flush want %llu snap want %llu\n",
1540 want_flush_tid, want_snap_seq);
1541 mutex_lock(&mdsc->mutex);
1542 for (mds = 0; mds < mdsc->max_sessions; ) {
1543 struct ceph_mds_session *session = mdsc->sessions[mds];
1544 struct inode *inode = NULL;
1550 get_session(session);
1551 mutex_unlock(&mdsc->mutex);
1553 mutex_lock(&session->s_mutex);
1554 if (!list_empty(&session->s_cap_snaps_flushing)) {
1555 struct ceph_cap_snap *capsnap =
1556 list_first_entry(&session->s_cap_snaps_flushing,
1557 struct ceph_cap_snap,
1559 struct ceph_inode_info *ci = capsnap->ci;
1560 if (!check_capsnap_flush(ci, want_snap_seq)) {
1561 dout("check_cap_flush still flushing snap %p "
1562 "follows %lld <= %lld to mds%d\n",
1563 &ci->vfs_inode, capsnap->follows,
1564 want_snap_seq, mds);
1565 inode = igrab(&ci->vfs_inode);
1568 mutex_unlock(&session->s_mutex);
1569 ceph_put_mds_session(session);
1572 wait_event(mdsc->cap_flushing_wq,
1573 check_capsnap_flush(ceph_inode(inode),
1580 mutex_lock(&mdsc->mutex);
1582 mutex_unlock(&mdsc->mutex);
1584 wait_event(mdsc->cap_flushing_wq,
1585 check_caps_flush(mdsc, want_flush_tid));
1587 dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
1591 * called under s_mutex
1593 void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
1594 struct ceph_mds_session *session)
1596 struct ceph_msg *msg = NULL;
1597 struct ceph_mds_cap_release *head;
1598 struct ceph_mds_cap_item *item;
1599 struct ceph_cap *cap;
1600 LIST_HEAD(tmp_list);
1601 int num_cap_releases;
1603 spin_lock(&session->s_cap_lock);
1605 list_splice_init(&session->s_cap_releases, &tmp_list);
1606 num_cap_releases = session->s_num_cap_releases;
1607 session->s_num_cap_releases = 0;
1608 spin_unlock(&session->s_cap_lock);
1610 while (!list_empty(&tmp_list)) {
1612 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
1613 PAGE_CACHE_SIZE, GFP_NOFS, false);
1616 head = msg->front.iov_base;
1617 head->num = cpu_to_le32(0);
1618 msg->front.iov_len = sizeof(*head);
1620 cap = list_first_entry(&tmp_list, struct ceph_cap,
1622 list_del(&cap->session_caps);
1625 head = msg->front.iov_base;
1626 le32_add_cpu(&head->num, 1);
1627 item = msg->front.iov_base + msg->front.iov_len;
1628 item->ino = cpu_to_le64(cap->cap_ino);
1629 item->cap_id = cpu_to_le64(cap->cap_id);
1630 item->migrate_seq = cpu_to_le32(cap->mseq);
1631 item->seq = cpu_to_le32(cap->issue_seq);
1632 msg->front.iov_len += sizeof(*item);
1634 ceph_put_cap(mdsc, cap);
1636 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1637 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1638 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1639 ceph_con_send(&session->s_con, msg);
1644 BUG_ON(num_cap_releases != 0);
1646 spin_lock(&session->s_cap_lock);
1647 if (!list_empty(&session->s_cap_releases))
1649 spin_unlock(&session->s_cap_lock);
1652 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1653 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1654 ceph_con_send(&session->s_con, msg);
1658 pr_err("send_cap_releases mds%d, failed to allocate message\n",
1660 spin_lock(&session->s_cap_lock);
1661 list_splice(&tmp_list, &session->s_cap_releases);
1662 session->s_num_cap_releases += num_cap_releases;
1663 spin_unlock(&session->s_cap_lock);
1670 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
1673 struct ceph_inode_info *ci = ceph_inode(dir);
1674 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1675 struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
1676 size_t size = sizeof(*rinfo->dir_in) + sizeof(*rinfo->dir_dname_len) +
1677 sizeof(*rinfo->dir_dname) + sizeof(*rinfo->dir_dlease);
1678 int order, num_entries;
1680 spin_lock(&ci->i_ceph_lock);
1681 num_entries = ci->i_files + ci->i_subdirs;
1682 spin_unlock(&ci->i_ceph_lock);
1683 num_entries = max(num_entries, 1);
1684 num_entries = min(num_entries, opt->max_readdir);
1686 order = get_order(size * num_entries);
1687 while (order >= 0) {
1688 rinfo->dir_in = (void*)__get_free_pages(GFP_KERNEL |
1698 num_entries = (PAGE_SIZE << order) / size;
1699 num_entries = min(num_entries, opt->max_readdir);
1701 rinfo->dir_buf_size = PAGE_SIZE << order;
1702 req->r_num_caps = num_entries + 1;
1703 req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
1704 req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
1709 * Create an mds request.
1711 struct ceph_mds_request *
1712 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1714 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1717 return ERR_PTR(-ENOMEM);
1719 mutex_init(&req->r_fill_mutex);
1721 req->r_started = jiffies;
1722 req->r_resend_mds = -1;
1723 INIT_LIST_HEAD(&req->r_unsafe_dir_item);
1724 INIT_LIST_HEAD(&req->r_unsafe_target_item);
1726 kref_init(&req->r_kref);
1727 INIT_LIST_HEAD(&req->r_wait);
1728 init_completion(&req->r_completion);
1729 init_completion(&req->r_safe_completion);
1730 INIT_LIST_HEAD(&req->r_unsafe_item);
1732 req->r_stamp = current_fs_time(mdsc->fsc->sb);
1735 req->r_direct_mode = mode;
1740 * return oldest (lowest) request, tid in request tree, 0 if none.
1742 * called under mdsc->mutex.
1744 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
1746 if (RB_EMPTY_ROOT(&mdsc->request_tree))
1748 return rb_entry(rb_first(&mdsc->request_tree),
1749 struct ceph_mds_request, r_node);
1752 static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
1754 return mdsc->oldest_tid;
1758 * Build a dentry's path. Allocate on heap; caller must kfree. Based
1759 * on build_path_from_dentry in fs/cifs/dir.c.
1761 * If @stop_on_nosnap, generate path relative to the first non-snapped
1764 * Encode hidden .snap dirs as a double /, i.e.
1765 * foo/.snap/bar -> foo//bar
1767 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
1770 struct dentry *temp;
1776 return ERR_PTR(-EINVAL);
1780 seq = read_seqbegin(&rename_lock);
1782 for (temp = dentry; !IS_ROOT(temp);) {
1783 struct inode *inode = d_inode(temp);
1784 if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
1785 len++; /* slash only */
1786 else if (stop_on_nosnap && inode &&
1787 ceph_snap(inode) == CEPH_NOSNAP)
1790 len += 1 + temp->d_name.len;
1791 temp = temp->d_parent;
1795 len--; /* no leading '/' */
1797 path = kmalloc(len+1, GFP_NOFS);
1799 return ERR_PTR(-ENOMEM);
1801 path[pos] = 0; /* trailing null */
1803 for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
1804 struct inode *inode;
1806 spin_lock(&temp->d_lock);
1807 inode = d_inode(temp);
1808 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1809 dout("build_path path+%d: %p SNAPDIR\n",
1811 } else if (stop_on_nosnap && inode &&
1812 ceph_snap(inode) == CEPH_NOSNAP) {
1813 spin_unlock(&temp->d_lock);
1816 pos -= temp->d_name.len;
1818 spin_unlock(&temp->d_lock);
1821 strncpy(path + pos, temp->d_name.name,
1824 spin_unlock(&temp->d_lock);
1827 temp = temp->d_parent;
1830 if (pos != 0 || read_seqretry(&rename_lock, seq)) {
1831 pr_err("build_path did not end path lookup where "
1832 "expected, namelen is %d, pos is %d\n", len, pos);
1833 /* presumably this is only possible if racing with a
1834 rename of one of the parent directories (we can not
1835 lock the dentries above us to prevent this, but
1836 retrying should be harmless) */
1841 *base = ceph_ino(d_inode(temp));
1843 dout("build_path on %p %d built %llx '%.*s'\n",
1844 dentry, d_count(dentry), *base, len, path);
1848 static int build_dentry_path(struct dentry *dentry,
1849 const char **ppath, int *ppathlen, u64 *pino,
1854 if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) {
1855 *pino = ceph_ino(d_inode(dentry->d_parent));
1856 *ppath = dentry->d_name.name;
1857 *ppathlen = dentry->d_name.len;
1860 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1862 return PTR_ERR(path);
1868 static int build_inode_path(struct inode *inode,
1869 const char **ppath, int *ppathlen, u64 *pino,
1872 struct dentry *dentry;
1875 if (ceph_snap(inode) == CEPH_NOSNAP) {
1876 *pino = ceph_ino(inode);
1880 dentry = d_find_alias(inode);
1881 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1884 return PTR_ERR(path);
1891 * request arguments may be specified via an inode *, a dentry *, or
1892 * an explicit ino+path.
1894 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1895 const char *rpath, u64 rino,
1896 const char **ppath, int *pathlen,
1897 u64 *ino, int *freepath)
1902 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
1903 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
1905 } else if (rdentry) {
1906 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
1907 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1909 } else if (rpath || rino) {
1912 *pathlen = rpath ? strlen(rpath) : 0;
1913 dout(" path %.*s\n", *pathlen, rpath);
1920 * called under mdsc->mutex
1922 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1923 struct ceph_mds_request *req,
1924 int mds, bool drop_cap_releases)
1926 struct ceph_msg *msg;
1927 struct ceph_mds_request_head *head;
1928 const char *path1 = NULL;
1929 const char *path2 = NULL;
1930 u64 ino1 = 0, ino2 = 0;
1931 int pathlen1 = 0, pathlen2 = 0;
1932 int freepath1 = 0, freepath2 = 0;
1938 ret = set_request_path_attr(req->r_inode, req->r_dentry,
1939 req->r_path1, req->r_ino1.ino,
1940 &path1, &pathlen1, &ino1, &freepath1);
1946 ret = set_request_path_attr(NULL, req->r_old_dentry,
1947 req->r_path2, req->r_ino2.ino,
1948 &path2, &pathlen2, &ino2, &freepath2);
1954 len = sizeof(*head) +
1955 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
1956 sizeof(struct ceph_timespec);
1958 /* calculate (max) length for cap releases */
1959 len += sizeof(struct ceph_mds_request_release) *
1960 (!!req->r_inode_drop + !!req->r_dentry_drop +
1961 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
1962 if (req->r_dentry_drop)
1963 len += req->r_dentry->d_name.len;
1964 if (req->r_old_dentry_drop)
1965 len += req->r_old_dentry->d_name.len;
1967 msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false);
1969 msg = ERR_PTR(-ENOMEM);
1973 msg->hdr.version = cpu_to_le16(2);
1974 msg->hdr.tid = cpu_to_le64(req->r_tid);
1976 head = msg->front.iov_base;
1977 p = msg->front.iov_base + sizeof(*head);
1978 end = msg->front.iov_base + msg->front.iov_len;
1980 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
1981 head->op = cpu_to_le32(req->r_op);
1982 head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
1983 head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
1984 head->args = req->r_args;
1986 ceph_encode_filepath(&p, end, ino1, path1);
1987 ceph_encode_filepath(&p, end, ino2, path2);
1989 /* make note of release offset, in case we need to replay */
1990 req->r_request_release_offset = p - msg->front.iov_base;
1994 if (req->r_inode_drop)
1995 releases += ceph_encode_inode_release(&p,
1996 req->r_inode ? req->r_inode : d_inode(req->r_dentry),
1997 mds, req->r_inode_drop, req->r_inode_unless, 0);
1998 if (req->r_dentry_drop)
1999 releases += ceph_encode_dentry_release(&p, req->r_dentry,
2000 mds, req->r_dentry_drop, req->r_dentry_unless);
2001 if (req->r_old_dentry_drop)
2002 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
2003 mds, req->r_old_dentry_drop, req->r_old_dentry_unless);
2004 if (req->r_old_inode_drop)
2005 releases += ceph_encode_inode_release(&p,
2006 d_inode(req->r_old_dentry),
2007 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
2009 if (drop_cap_releases) {
2011 p = msg->front.iov_base + req->r_request_release_offset;
2014 head->num_releases = cpu_to_le16(releases);
2018 struct ceph_timespec ts;
2019 ceph_encode_timespec(&ts, &req->r_stamp);
2020 ceph_encode_copy(&p, &ts, sizeof(ts));
2024 msg->front.iov_len = p - msg->front.iov_base;
2025 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2027 if (req->r_pagelist) {
2028 struct ceph_pagelist *pagelist = req->r_pagelist;
2029 atomic_inc(&pagelist->refcnt);
2030 ceph_msg_data_add_pagelist(msg, pagelist);
2031 msg->hdr.data_len = cpu_to_le32(pagelist->length);
2033 msg->hdr.data_len = 0;
2036 msg->hdr.data_off = cpu_to_le16(0);
2040 kfree((char *)path2);
2043 kfree((char *)path1);
2049 * called under mdsc->mutex if error, under no mutex if
2052 static void complete_request(struct ceph_mds_client *mdsc,
2053 struct ceph_mds_request *req)
2055 if (req->r_callback)
2056 req->r_callback(mdsc, req);
2058 complete_all(&req->r_completion);
2062 * called under mdsc->mutex
2064 static int __prepare_send_request(struct ceph_mds_client *mdsc,
2065 struct ceph_mds_request *req,
2066 int mds, bool drop_cap_releases)
2068 struct ceph_mds_request_head *rhead;
2069 struct ceph_msg *msg;
2074 struct ceph_cap *cap =
2075 ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2078 req->r_sent_on_mseq = cap->mseq;
2080 req->r_sent_on_mseq = -1;
2082 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2083 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2085 if (req->r_got_unsafe) {
2088 * Replay. Do not regenerate message (and rebuild
2089 * paths, etc.); just use the original message.
2090 * Rebuilding paths will break for renames because
2091 * d_move mangles the src name.
2093 msg = req->r_request;
2094 rhead = msg->front.iov_base;
2096 flags = le32_to_cpu(rhead->flags);
2097 flags |= CEPH_MDS_FLAG_REPLAY;
2098 rhead->flags = cpu_to_le32(flags);
2100 if (req->r_target_inode)
2101 rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2103 rhead->num_retry = req->r_attempts - 1;
2105 /* remove cap/dentry releases from message */
2106 rhead->num_releases = 0;
2109 p = msg->front.iov_base + req->r_request_release_offset;
2111 struct ceph_timespec ts;
2112 ceph_encode_timespec(&ts, &req->r_stamp);
2113 ceph_encode_copy(&p, &ts, sizeof(ts));
2116 msg->front.iov_len = p - msg->front.iov_base;
2117 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2121 if (req->r_request) {
2122 ceph_msg_put(req->r_request);
2123 req->r_request = NULL;
2125 msg = create_request_message(mdsc, req, mds, drop_cap_releases);
2127 req->r_err = PTR_ERR(msg);
2128 return PTR_ERR(msg);
2130 req->r_request = msg;
2132 rhead = msg->front.iov_base;
2133 rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2134 if (req->r_got_unsafe)
2135 flags |= CEPH_MDS_FLAG_REPLAY;
2136 if (req->r_locked_dir)
2137 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2138 rhead->flags = cpu_to_le32(flags);
2139 rhead->num_fwd = req->r_num_fwd;
2140 rhead->num_retry = req->r_attempts - 1;
2143 dout(" r_locked_dir = %p\n", req->r_locked_dir);
2148 * send request, or put it on the appropriate wait list.
2150 static int __do_request(struct ceph_mds_client *mdsc,
2151 struct ceph_mds_request *req)
2153 struct ceph_mds_session *session = NULL;
2157 if (req->r_err || req->r_got_result) {
2159 __unregister_request(mdsc, req);
2163 if (req->r_timeout &&
2164 time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2165 dout("do_request timed out\n");
2169 if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2170 dout("do_request forced umount\n");
2175 put_request_session(req);
2177 mds = __choose_mds(mdsc, req);
2179 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2180 dout("do_request no mds or not active, waiting for map\n");
2181 list_add(&req->r_wait, &mdsc->waiting_for_map);
2185 /* get, open session */
2186 session = __ceph_lookup_mds_session(mdsc, mds);
2188 session = register_session(mdsc, mds);
2189 if (IS_ERR(session)) {
2190 err = PTR_ERR(session);
2194 req->r_session = get_session(session);
2196 dout("do_request mds%d session %p state %s\n", mds, session,
2197 ceph_session_state_name(session->s_state));
2198 if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2199 session->s_state != CEPH_MDS_SESSION_HUNG) {
2200 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2201 session->s_state == CEPH_MDS_SESSION_CLOSING)
2202 __open_session(mdsc, session);
2203 list_add(&req->r_wait, &session->s_waiting);
2208 req->r_resend_mds = -1; /* forget any previous mds hint */
2210 if (req->r_request_started == 0) /* note request start time */
2211 req->r_request_started = jiffies;
2213 err = __prepare_send_request(mdsc, req, mds, false);
2215 ceph_msg_get(req->r_request);
2216 ceph_con_send(&session->s_con, req->r_request);
2220 ceph_put_mds_session(session);
2223 dout("__do_request early error %d\n", err);
2225 complete_request(mdsc, req);
2226 __unregister_request(mdsc, req);
2233 * called under mdsc->mutex
2235 static void __wake_requests(struct ceph_mds_client *mdsc,
2236 struct list_head *head)
2238 struct ceph_mds_request *req;
2239 LIST_HEAD(tmp_list);
2241 list_splice_init(head, &tmp_list);
2243 while (!list_empty(&tmp_list)) {
2244 req = list_entry(tmp_list.next,
2245 struct ceph_mds_request, r_wait);
2246 list_del_init(&req->r_wait);
2247 dout(" wake request %p tid %llu\n", req, req->r_tid);
2248 __do_request(mdsc, req);
2253 * Wake up threads with requests pending for @mds, so that they can
2254 * resubmit their requests to a possibly different mds.
2256 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2258 struct ceph_mds_request *req;
2259 struct rb_node *p = rb_first(&mdsc->request_tree);
2261 dout("kick_requests mds%d\n", mds);
2263 req = rb_entry(p, struct ceph_mds_request, r_node);
2265 if (req->r_got_unsafe)
2267 if (req->r_attempts > 0)
2268 continue; /* only new requests */
2269 if (req->r_session &&
2270 req->r_session->s_mds == mds) {
2271 dout(" kicking tid %llu\n", req->r_tid);
2272 list_del_init(&req->r_wait);
2273 __do_request(mdsc, req);
2278 void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
2279 struct ceph_mds_request *req)
2281 dout("submit_request on %p\n", req);
2282 mutex_lock(&mdsc->mutex);
2283 __register_request(mdsc, req, NULL);
2284 __do_request(mdsc, req);
2285 mutex_unlock(&mdsc->mutex);
2289 * Synchrously perform an mds request. Take care of all of the
2290 * session setup, forwarding, retry details.
2292 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
2294 struct ceph_mds_request *req)
2298 dout("do_request on %p\n", req);
2300 /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
2302 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
2303 if (req->r_locked_dir)
2304 ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
2305 if (req->r_old_dentry_dir)
2306 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2309 /* deny access to directories with pool_ns layouts */
2310 if (req->r_inode && S_ISDIR(req->r_inode->i_mode) &&
2311 ceph_inode(req->r_inode)->i_pool_ns_len)
2313 if (req->r_locked_dir &&
2314 ceph_inode(req->r_locked_dir)->i_pool_ns_len)
2318 mutex_lock(&mdsc->mutex);
2319 __register_request(mdsc, req, dir);
2320 __do_request(mdsc, req);
2328 mutex_unlock(&mdsc->mutex);
2329 dout("do_request waiting\n");
2330 if (!req->r_timeout && req->r_wait_for_completion) {
2331 err = req->r_wait_for_completion(mdsc, req);
2333 long timeleft = wait_for_completion_killable_timeout(
2335 ceph_timeout_jiffies(req->r_timeout));
2339 err = -EIO; /* timed out */
2341 err = timeleft; /* killed */
2343 dout("do_request waited, got %d\n", err);
2344 mutex_lock(&mdsc->mutex);
2346 /* only abort if we didn't race with a real reply */
2347 if (req->r_got_result) {
2348 err = le32_to_cpu(req->r_reply_info.head->result);
2349 } else if (err < 0) {
2350 dout("aborted request %lld with %d\n", req->r_tid, err);
2353 * ensure we aren't running concurrently with
2354 * ceph_fill_trace or ceph_readdir_prepopulate, which
2355 * rely on locks (dir mutex) held by our caller.
2357 mutex_lock(&req->r_fill_mutex);
2359 req->r_aborted = true;
2360 mutex_unlock(&req->r_fill_mutex);
2362 if (req->r_locked_dir &&
2363 (req->r_op & CEPH_MDS_OP_WRITE))
2364 ceph_invalidate_dir_request(req);
2370 mutex_unlock(&mdsc->mutex);
2371 dout("do_request %p done, result %d\n", req, err);
2376 * Invalidate dir's completeness, dentry lease state on an aborted MDS
2377 * namespace request.
2379 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
2381 struct inode *inode = req->r_locked_dir;
2383 dout("invalidate_dir_request %p (complete, lease(s))\n", inode);
2385 ceph_dir_clear_complete(inode);
2387 ceph_invalidate_dentry_lease(req->r_dentry);
2388 if (req->r_old_dentry)
2389 ceph_invalidate_dentry_lease(req->r_old_dentry);
2395 * We take the session mutex and parse and process the reply immediately.
2396 * This preserves the logical ordering of replies, capabilities, etc., sent
2397 * by the MDS as they are applied to our local cache.
2399 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2401 struct ceph_mds_client *mdsc = session->s_mdsc;
2402 struct ceph_mds_request *req;
2403 struct ceph_mds_reply_head *head = msg->front.iov_base;
2404 struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
2405 struct ceph_snap_realm *realm;
2408 int mds = session->s_mds;
2410 if (msg->front.iov_len < sizeof(*head)) {
2411 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2416 /* get request, session */
2417 tid = le64_to_cpu(msg->hdr.tid);
2418 mutex_lock(&mdsc->mutex);
2419 req = __lookup_request(mdsc, tid);
2421 dout("handle_reply on unknown tid %llu\n", tid);
2422 mutex_unlock(&mdsc->mutex);
2425 dout("handle_reply %p\n", req);
2427 /* correct session? */
2428 if (req->r_session != session) {
2429 pr_err("mdsc_handle_reply got %llu on session mds%d"
2430 " not mds%d\n", tid, session->s_mds,
2431 req->r_session ? req->r_session->s_mds : -1);
2432 mutex_unlock(&mdsc->mutex);
2437 if ((req->r_got_unsafe && !head->safe) ||
2438 (req->r_got_safe && head->safe)) {
2439 pr_warn("got a dup %s reply on %llu from mds%d\n",
2440 head->safe ? "safe" : "unsafe", tid, mds);
2441 mutex_unlock(&mdsc->mutex);
2444 if (req->r_got_safe) {
2445 pr_warn("got unsafe after safe on %llu from mds%d\n",
2447 mutex_unlock(&mdsc->mutex);
2451 result = le32_to_cpu(head->result);
2455 * if we're not talking to the authority, send to them
2456 * if the authority has changed while we weren't looking,
2457 * send to new authority
2458 * Otherwise we just have to return an ESTALE
2460 if (result == -ESTALE) {
2461 dout("got ESTALE on request %llu", req->r_tid);
2462 req->r_resend_mds = -1;
2463 if (req->r_direct_mode != USE_AUTH_MDS) {
2464 dout("not using auth, setting for that now");
2465 req->r_direct_mode = USE_AUTH_MDS;
2466 __do_request(mdsc, req);
2467 mutex_unlock(&mdsc->mutex);
2470 int mds = __choose_mds(mdsc, req);
2471 if (mds >= 0 && mds != req->r_session->s_mds) {
2472 dout("but auth changed, so resending");
2473 __do_request(mdsc, req);
2474 mutex_unlock(&mdsc->mutex);
2478 dout("have to return ESTALE on request %llu", req->r_tid);
2483 req->r_got_safe = true;
2484 __unregister_request(mdsc, req);
2486 if (req->r_got_unsafe) {
2488 * We already handled the unsafe response, now do the
2489 * cleanup. No need to examine the response; the MDS
2490 * doesn't include any result info in the safe
2491 * response. And even if it did, there is nothing
2492 * useful we could do with a revised return value.
2494 dout("got safe reply %llu, mds%d\n", tid, mds);
2495 list_del_init(&req->r_unsafe_item);
2497 /* last unsafe request during umount? */
2498 if (mdsc->stopping && !__get_oldest_req(mdsc))
2499 complete_all(&mdsc->safe_umount_waiters);
2500 mutex_unlock(&mdsc->mutex);
2504 req->r_got_unsafe = true;
2505 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
2506 if (req->r_unsafe_dir) {
2507 struct ceph_inode_info *ci =
2508 ceph_inode(req->r_unsafe_dir);
2509 spin_lock(&ci->i_unsafe_lock);
2510 list_add_tail(&req->r_unsafe_dir_item,
2511 &ci->i_unsafe_dirops);
2512 spin_unlock(&ci->i_unsafe_lock);
2516 dout("handle_reply tid %lld result %d\n", tid, result);
2517 rinfo = &req->r_reply_info;
2518 err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
2519 mutex_unlock(&mdsc->mutex);
2521 mutex_lock(&session->s_mutex);
2523 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
2530 if (rinfo->snapblob_len) {
2531 down_write(&mdsc->snap_rwsem);
2532 ceph_update_snap_trace(mdsc, rinfo->snapblob,
2533 rinfo->snapblob + rinfo->snapblob_len,
2534 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
2536 downgrade_write(&mdsc->snap_rwsem);
2538 down_read(&mdsc->snap_rwsem);
2541 /* insert trace into our cache */
2542 mutex_lock(&req->r_fill_mutex);
2543 err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
2545 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
2546 req->r_op == CEPH_MDS_OP_LSSNAP))
2547 ceph_readdir_prepopulate(req, req->r_session);
2548 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
2550 mutex_unlock(&req->r_fill_mutex);
2552 up_read(&mdsc->snap_rwsem);
2554 ceph_put_snap_realm(mdsc, realm);
2556 if (err == 0 && req->r_got_unsafe && req->r_target_inode) {
2557 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
2558 spin_lock(&ci->i_unsafe_lock);
2559 list_add_tail(&req->r_unsafe_target_item, &ci->i_unsafe_iops);
2560 spin_unlock(&ci->i_unsafe_lock);
2563 mutex_lock(&mdsc->mutex);
2564 if (!req->r_aborted) {
2568 req->r_reply = ceph_msg_get(msg);
2569 req->r_got_result = true;
2572 dout("reply arrived after request %lld was aborted\n", tid);
2574 mutex_unlock(&mdsc->mutex);
2576 mutex_unlock(&session->s_mutex);
2578 /* kick calling process */
2579 complete_request(mdsc, req);
2581 ceph_mdsc_put_request(req);
2588 * handle mds notification that our request has been forwarded.
2590 static void handle_forward(struct ceph_mds_client *mdsc,
2591 struct ceph_mds_session *session,
2592 struct ceph_msg *msg)
2594 struct ceph_mds_request *req;
2595 u64 tid = le64_to_cpu(msg->hdr.tid);
2599 void *p = msg->front.iov_base;
2600 void *end = p + msg->front.iov_len;
2602 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2603 next_mds = ceph_decode_32(&p);
2604 fwd_seq = ceph_decode_32(&p);
2606 mutex_lock(&mdsc->mutex);
2607 req = __lookup_request(mdsc, tid);
2609 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
2610 goto out; /* dup reply? */
2613 if (req->r_aborted) {
2614 dout("forward tid %llu aborted, unregistering\n", tid);
2615 __unregister_request(mdsc, req);
2616 } else if (fwd_seq <= req->r_num_fwd) {
2617 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2618 tid, next_mds, req->r_num_fwd, fwd_seq);
2620 /* resend. forward race not possible; mds would drop */
2621 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
2623 BUG_ON(req->r_got_result);
2624 req->r_attempts = 0;
2625 req->r_num_fwd = fwd_seq;
2626 req->r_resend_mds = next_mds;
2627 put_request_session(req);
2628 __do_request(mdsc, req);
2630 ceph_mdsc_put_request(req);
2632 mutex_unlock(&mdsc->mutex);
2636 pr_err("mdsc_handle_forward decode error err=%d\n", err);
2640 * handle a mds session control message
2642 static void handle_session(struct ceph_mds_session *session,
2643 struct ceph_msg *msg)
2645 struct ceph_mds_client *mdsc = session->s_mdsc;
2648 int mds = session->s_mds;
2649 struct ceph_mds_session_head *h = msg->front.iov_base;
2653 if (msg->front.iov_len != sizeof(*h))
2655 op = le32_to_cpu(h->op);
2656 seq = le64_to_cpu(h->seq);
2658 mutex_lock(&mdsc->mutex);
2659 if (op == CEPH_SESSION_CLOSE)
2660 __unregister_session(mdsc, session);
2661 /* FIXME: this ttl calculation is generous */
2662 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
2663 mutex_unlock(&mdsc->mutex);
2665 mutex_lock(&session->s_mutex);
2667 dout("handle_session mds%d %s %p state %s seq %llu\n",
2668 mds, ceph_session_op_name(op), session,
2669 ceph_session_state_name(session->s_state), seq);
2671 if (session->s_state == CEPH_MDS_SESSION_HUNG) {
2672 session->s_state = CEPH_MDS_SESSION_OPEN;
2673 pr_info("mds%d came back\n", session->s_mds);
2677 case CEPH_SESSION_OPEN:
2678 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2679 pr_info("mds%d reconnect success\n", session->s_mds);
2680 session->s_state = CEPH_MDS_SESSION_OPEN;
2681 renewed_caps(mdsc, session, 0);
2684 __close_session(mdsc, session);
2687 case CEPH_SESSION_RENEWCAPS:
2688 if (session->s_renew_seq == seq)
2689 renewed_caps(mdsc, session, 1);
2692 case CEPH_SESSION_CLOSE:
2693 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2694 pr_info("mds%d reconnect denied\n", session->s_mds);
2695 cleanup_session_requests(mdsc, session);
2696 remove_session_caps(session);
2697 wake = 2; /* for good measure */
2698 wake_up_all(&mdsc->session_close_wq);
2701 case CEPH_SESSION_STALE:
2702 pr_info("mds%d caps went stale, renewing\n",
2704 spin_lock(&session->s_gen_ttl_lock);
2705 session->s_cap_gen++;
2706 session->s_cap_ttl = jiffies - 1;
2707 spin_unlock(&session->s_gen_ttl_lock);
2708 send_renew_caps(mdsc, session);
2711 case CEPH_SESSION_RECALL_STATE:
2712 trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
2715 case CEPH_SESSION_FLUSHMSG:
2716 send_flushmsg_ack(mdsc, session, seq);
2719 case CEPH_SESSION_FORCE_RO:
2720 dout("force_session_readonly %p\n", session);
2721 spin_lock(&session->s_cap_lock);
2722 session->s_readonly = true;
2723 spin_unlock(&session->s_cap_lock);
2724 wake_up_session_caps(session, 0);
2728 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
2732 mutex_unlock(&session->s_mutex);
2734 mutex_lock(&mdsc->mutex);
2735 __wake_requests(mdsc, &session->s_waiting);
2737 kick_requests(mdsc, mds);
2738 mutex_unlock(&mdsc->mutex);
2743 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
2744 (int)msg->front.iov_len);
2751 * called under session->mutex.
2753 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
2754 struct ceph_mds_session *session)
2756 struct ceph_mds_request *req, *nreq;
2760 dout("replay_unsafe_requests mds%d\n", session->s_mds);
2762 mutex_lock(&mdsc->mutex);
2763 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
2764 err = __prepare_send_request(mdsc, req, session->s_mds, true);
2766 ceph_msg_get(req->r_request);
2767 ceph_con_send(&session->s_con, req->r_request);
2772 * also re-send old requests when MDS enters reconnect stage. So that MDS
2773 * can process completed request in clientreplay stage.
2775 p = rb_first(&mdsc->request_tree);
2777 req = rb_entry(p, struct ceph_mds_request, r_node);
2779 if (req->r_got_unsafe)
2781 if (req->r_attempts == 0)
2782 continue; /* only old requests */
2783 if (req->r_session &&
2784 req->r_session->s_mds == session->s_mds) {
2785 err = __prepare_send_request(mdsc, req,
2786 session->s_mds, true);
2788 ceph_msg_get(req->r_request);
2789 ceph_con_send(&session->s_con, req->r_request);
2793 mutex_unlock(&mdsc->mutex);
2797 * Encode information about a cap for a reconnect with the MDS.
2799 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2803 struct ceph_mds_cap_reconnect v2;
2804 struct ceph_mds_cap_reconnect_v1 v1;
2807 struct ceph_inode_info *ci;
2808 struct ceph_reconnect_state *recon_state = arg;
2809 struct ceph_pagelist *pagelist = recon_state->pagelist;
2813 struct dentry *dentry;
2817 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2818 inode, ceph_vinop(inode), cap, cap->cap_id,
2819 ceph_cap_string(cap->issued));
2820 err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
2824 dentry = d_find_alias(inode);
2826 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
2828 err = PTR_ERR(path);
2835 err = ceph_pagelist_encode_string(pagelist, path, pathlen);
2839 spin_lock(&ci->i_ceph_lock);
2840 cap->seq = 0; /* reset cap seq */
2841 cap->issue_seq = 0; /* and issue_seq */
2842 cap->mseq = 0; /* and migrate_seq */
2843 cap->cap_gen = cap->session->s_cap_gen;
2845 if (recon_state->flock) {
2846 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
2847 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2848 rec.v2.issued = cpu_to_le32(cap->issued);
2849 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2850 rec.v2.pathbase = cpu_to_le64(pathbase);
2851 rec.v2.flock_len = 0;
2852 reclen = sizeof(rec.v2);
2854 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
2855 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2856 rec.v1.issued = cpu_to_le32(cap->issued);
2857 rec.v1.size = cpu_to_le64(inode->i_size);
2858 ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime);
2859 ceph_encode_timespec(&rec.v1.atime, &inode->i_atime);
2860 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2861 rec.v1.pathbase = cpu_to_le64(pathbase);
2862 reclen = sizeof(rec.v1);
2864 spin_unlock(&ci->i_ceph_lock);
2866 if (recon_state->flock) {
2867 int num_fcntl_locks, num_flock_locks;
2868 struct ceph_filelock *flocks;
2871 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
2872 flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
2873 sizeof(struct ceph_filelock), GFP_NOFS);
2878 err = ceph_encode_locks_to_buffer(inode, flocks,
2888 * number of encoded locks is stable, so copy to pagelist
2890 rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) +
2891 (num_fcntl_locks+num_flock_locks) *
2892 sizeof(struct ceph_filelock));
2893 err = ceph_pagelist_append(pagelist, &rec, reclen);
2895 err = ceph_locks_to_pagelist(flocks, pagelist,
2900 err = ceph_pagelist_append(pagelist, &rec, reclen);
2903 recon_state->nr_caps++;
2913 * If an MDS fails and recovers, clients need to reconnect in order to
2914 * reestablish shared state. This includes all caps issued through
2915 * this session _and_ the snap_realm hierarchy. Because it's not
2916 * clear which snap realms the mds cares about, we send everything we
2917 * know about.. that ensures we'll then get any new info the
2918 * recovering MDS might have.
2920 * This is a relatively heavyweight operation, but it's rare.
2922 * called with mdsc->mutex held.
2924 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2925 struct ceph_mds_session *session)
2927 struct ceph_msg *reply;
2929 int mds = session->s_mds;
2932 struct ceph_pagelist *pagelist;
2933 struct ceph_reconnect_state recon_state;
2935 pr_info("mds%d reconnect start\n", mds);
2937 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
2939 goto fail_nopagelist;
2940 ceph_pagelist_init(pagelist);
2942 reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false);
2946 mutex_lock(&session->s_mutex);
2947 session->s_state = CEPH_MDS_SESSION_RECONNECTING;
2950 dout("session %p state %s\n", session,
2951 ceph_session_state_name(session->s_state));
2953 spin_lock(&session->s_gen_ttl_lock);
2954 session->s_cap_gen++;
2955 spin_unlock(&session->s_gen_ttl_lock);
2957 spin_lock(&session->s_cap_lock);
2958 /* don't know if session is readonly */
2959 session->s_readonly = 0;
2961 * notify __ceph_remove_cap() that we are composing cap reconnect.
2962 * If a cap get released before being added to the cap reconnect,
2963 * __ceph_remove_cap() should skip queuing cap release.
2965 session->s_cap_reconnect = 1;
2966 /* drop old cap expires; we're about to reestablish that state */
2967 cleanup_cap_releases(mdsc, session);
2969 /* trim unused caps to reduce MDS's cache rejoin time */
2970 if (mdsc->fsc->sb->s_root)
2971 shrink_dcache_parent(mdsc->fsc->sb->s_root);
2973 ceph_con_close(&session->s_con);
2974 ceph_con_open(&session->s_con,
2975 CEPH_ENTITY_TYPE_MDS, mds,
2976 ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
2978 /* replay unsafe requests */
2979 replay_unsafe_requests(mdsc, session);
2981 down_read(&mdsc->snap_rwsem);
2983 /* traverse this session's caps */
2984 s_nr_caps = session->s_nr_caps;
2985 err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
2989 recon_state.nr_caps = 0;
2990 recon_state.pagelist = pagelist;
2991 recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK;
2992 err = iterate_session_caps(session, encode_caps_cb, &recon_state);
2996 spin_lock(&session->s_cap_lock);
2997 session->s_cap_reconnect = 0;
2998 spin_unlock(&session->s_cap_lock);
3001 * snaprealms. we provide mds with the ino, seq (version), and
3002 * parent for all of our realms. If the mds has any newer info,
3005 for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
3006 struct ceph_snap_realm *realm =
3007 rb_entry(p, struct ceph_snap_realm, node);
3008 struct ceph_mds_snaprealm_reconnect sr_rec;
3010 dout(" adding snap realm %llx seq %lld parent %llx\n",
3011 realm->ino, realm->seq, realm->parent_ino);
3012 sr_rec.ino = cpu_to_le64(realm->ino);
3013 sr_rec.seq = cpu_to_le64(realm->seq);
3014 sr_rec.parent = cpu_to_le64(realm->parent_ino);
3015 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
3020 if (recon_state.flock)
3021 reply->hdr.version = cpu_to_le16(2);
3023 /* raced with cap release? */
3024 if (s_nr_caps != recon_state.nr_caps) {
3025 struct page *page = list_first_entry(&pagelist->head,
3027 __le32 *addr = kmap_atomic(page);
3028 *addr = cpu_to_le32(recon_state.nr_caps);
3029 kunmap_atomic(addr);
3032 reply->hdr.data_len = cpu_to_le32(pagelist->length);
3033 ceph_msg_data_add_pagelist(reply, pagelist);
3035 ceph_early_kick_flushing_caps(mdsc, session);
3037 ceph_con_send(&session->s_con, reply);
3039 mutex_unlock(&session->s_mutex);
3041 mutex_lock(&mdsc->mutex);
3042 __wake_requests(mdsc, &session->s_waiting);
3043 mutex_unlock(&mdsc->mutex);
3045 up_read(&mdsc->snap_rwsem);
3049 ceph_msg_put(reply);
3050 up_read(&mdsc->snap_rwsem);
3051 mutex_unlock(&session->s_mutex);
3053 ceph_pagelist_release(pagelist);
3055 pr_err("error %d preparing reconnect for mds%d\n", err, mds);
3061 * compare old and new mdsmaps, kicking requests
3062 * and closing out old connections as necessary
3064 * called under mdsc->mutex.
3066 static void check_new_map(struct ceph_mds_client *mdsc,
3067 struct ceph_mdsmap *newmap,
3068 struct ceph_mdsmap *oldmap)
3071 int oldstate, newstate;
3072 struct ceph_mds_session *s;
3074 dout("check_new_map new %u old %u\n",
3075 newmap->m_epoch, oldmap->m_epoch);
3077 for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
3078 if (mdsc->sessions[i] == NULL)
3080 s = mdsc->sessions[i];
3081 oldstate = ceph_mdsmap_get_state(oldmap, i);
3082 newstate = ceph_mdsmap_get_state(newmap, i);
3084 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
3085 i, ceph_mds_state_name(oldstate),
3086 ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
3087 ceph_mds_state_name(newstate),
3088 ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
3089 ceph_session_state_name(s->s_state));
3091 if (i >= newmap->m_max_mds ||
3092 memcmp(ceph_mdsmap_get_addr(oldmap, i),
3093 ceph_mdsmap_get_addr(newmap, i),
3094 sizeof(struct ceph_entity_addr))) {
3095 if (s->s_state == CEPH_MDS_SESSION_OPENING) {
3096 /* the session never opened, just close it
3098 __wake_requests(mdsc, &s->s_waiting);
3099 __unregister_session(mdsc, s);
3102 mutex_unlock(&mdsc->mutex);
3103 mutex_lock(&s->s_mutex);
3104 mutex_lock(&mdsc->mutex);
3105 ceph_con_close(&s->s_con);
3106 mutex_unlock(&s->s_mutex);
3107 s->s_state = CEPH_MDS_SESSION_RESTARTING;
3109 } else if (oldstate == newstate) {
3110 continue; /* nothing new with this mds */
3116 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
3117 newstate >= CEPH_MDS_STATE_RECONNECT) {
3118 mutex_unlock(&mdsc->mutex);
3119 send_mds_reconnect(mdsc, s);
3120 mutex_lock(&mdsc->mutex);
3124 * kick request on any mds that has gone active.
3126 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
3127 newstate >= CEPH_MDS_STATE_ACTIVE) {
3128 if (oldstate != CEPH_MDS_STATE_CREATING &&
3129 oldstate != CEPH_MDS_STATE_STARTING)
3130 pr_info("mds%d recovery completed\n", s->s_mds);
3131 kick_requests(mdsc, i);
3132 ceph_kick_flushing_caps(mdsc, s);
3133 wake_up_session_caps(s, 1);
3137 for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) {
3138 s = mdsc->sessions[i];
3141 if (!ceph_mdsmap_is_laggy(newmap, i))
3143 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3144 s->s_state == CEPH_MDS_SESSION_HUNG ||
3145 s->s_state == CEPH_MDS_SESSION_CLOSING) {
3146 dout(" connecting to export targets of laggy mds%d\n",
3148 __open_export_target_sessions(mdsc, s);
3160 * caller must hold session s_mutex, dentry->d_lock
3162 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
3164 struct ceph_dentry_info *di = ceph_dentry(dentry);
3166 ceph_put_mds_session(di->lease_session);
3167 di->lease_session = NULL;
3170 static void handle_lease(struct ceph_mds_client *mdsc,
3171 struct ceph_mds_session *session,
3172 struct ceph_msg *msg)
3174 struct super_block *sb = mdsc->fsc->sb;
3175 struct inode *inode;
3176 struct dentry *parent, *dentry;
3177 struct ceph_dentry_info *di;
3178 int mds = session->s_mds;
3179 struct ceph_mds_lease *h = msg->front.iov_base;
3181 struct ceph_vino vino;
3185 dout("handle_lease from mds%d\n", mds);
3188 if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
3190 vino.ino = le64_to_cpu(h->ino);
3191 vino.snap = CEPH_NOSNAP;
3192 seq = le32_to_cpu(h->seq);
3193 dname.name = (void *)h + sizeof(*h) + sizeof(u32);
3194 dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
3195 if (dname.len != get_unaligned_le32(h+1))
3199 inode = ceph_find_inode(sb, vino);
3200 dout("handle_lease %s, ino %llx %p %.*s\n",
3201 ceph_lease_op_name(h->action), vino.ino, inode,
3202 dname.len, dname.name);
3204 mutex_lock(&session->s_mutex);
3207 if (inode == NULL) {
3208 dout("handle_lease no inode %llx\n", vino.ino);
3213 parent = d_find_alias(inode);
3215 dout("no parent dentry on inode %p\n", inode);
3217 goto release; /* hrm... */
3219 dname.hash = full_name_hash(dname.name, dname.len);
3220 dentry = d_lookup(parent, &dname);
3225 spin_lock(&dentry->d_lock);
3226 di = ceph_dentry(dentry);
3227 switch (h->action) {
3228 case CEPH_MDS_LEASE_REVOKE:
3229 if (di->lease_session == session) {
3230 if (ceph_seq_cmp(di->lease_seq, seq) > 0)
3231 h->seq = cpu_to_le32(di->lease_seq);
3232 __ceph_mdsc_drop_dentry_lease(dentry);
3237 case CEPH_MDS_LEASE_RENEW:
3238 if (di->lease_session == session &&
3239 di->lease_gen == session->s_cap_gen &&
3240 di->lease_renew_from &&
3241 di->lease_renew_after == 0) {
3242 unsigned long duration =
3243 msecs_to_jiffies(le32_to_cpu(h->duration_ms));
3245 di->lease_seq = seq;
3246 dentry->d_time = di->lease_renew_from + duration;
3247 di->lease_renew_after = di->lease_renew_from +
3249 di->lease_renew_from = 0;
3253 spin_unlock(&dentry->d_lock);
3260 /* let's just reuse the same message */
3261 h->action = CEPH_MDS_LEASE_REVOKE_ACK;
3263 ceph_con_send(&session->s_con, msg);
3267 mutex_unlock(&session->s_mutex);
3271 pr_err("corrupt lease message\n");
3275 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
3276 struct inode *inode,
3277 struct dentry *dentry, char action,
3280 struct ceph_msg *msg;
3281 struct ceph_mds_lease *lease;
3282 int len = sizeof(*lease) + sizeof(u32);
3285 dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
3286 inode, dentry, ceph_lease_op_name(action), session->s_mds);
3287 dnamelen = dentry->d_name.len;
3290 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
3293 lease = msg->front.iov_base;
3294 lease->action = action;
3295 lease->ino = cpu_to_le64(ceph_vino(inode).ino);
3296 lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
3297 lease->seq = cpu_to_le32(seq);
3298 put_unaligned_le32(dnamelen, lease + 1);
3299 memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
3302 * if this is a preemptive lease RELEASE, no need to
3303 * flush request stream, since the actual request will
3306 msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
3308 ceph_con_send(&session->s_con, msg);
3312 * Preemptively release a lease we expect to invalidate anyway.
3313 * Pass @inode always, @dentry is optional.
3315 void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
3316 struct dentry *dentry)
3318 struct ceph_dentry_info *di;
3319 struct ceph_mds_session *session;
3322 BUG_ON(inode == NULL);
3323 BUG_ON(dentry == NULL);
3325 /* is dentry lease valid? */
3326 spin_lock(&dentry->d_lock);
3327 di = ceph_dentry(dentry);
3328 if (!di || !di->lease_session ||
3329 di->lease_session->s_mds < 0 ||
3330 di->lease_gen != di->lease_session->s_cap_gen ||
3331 !time_before(jiffies, dentry->d_time)) {
3332 dout("lease_release inode %p dentry %p -- "
3335 spin_unlock(&dentry->d_lock);
3339 /* we do have a lease on this dentry; note mds and seq */
3340 session = ceph_get_mds_session(di->lease_session);
3341 seq = di->lease_seq;
3342 __ceph_mdsc_drop_dentry_lease(dentry);
3343 spin_unlock(&dentry->d_lock);
3345 dout("lease_release inode %p dentry %p to mds%d\n",
3346 inode, dentry, session->s_mds);
3347 ceph_mdsc_lease_send_msg(session, inode, dentry,
3348 CEPH_MDS_LEASE_RELEASE, seq);
3349 ceph_put_mds_session(session);
3353 * drop all leases (and dentry refs) in preparation for umount
3355 static void drop_leases(struct ceph_mds_client *mdsc)
3359 dout("drop_leases\n");
3360 mutex_lock(&mdsc->mutex);
3361 for (i = 0; i < mdsc->max_sessions; i++) {
3362 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3365 mutex_unlock(&mdsc->mutex);
3366 mutex_lock(&s->s_mutex);
3367 mutex_unlock(&s->s_mutex);
3368 ceph_put_mds_session(s);
3369 mutex_lock(&mdsc->mutex);
3371 mutex_unlock(&mdsc->mutex);
3377 * delayed work -- periodically trim expired leases, renew caps with mds
3379 static void schedule_delayed(struct ceph_mds_client *mdsc)
3382 unsigned hz = round_jiffies_relative(HZ * delay);
3383 schedule_delayed_work(&mdsc->delayed_work, hz);
3386 static void delayed_work(struct work_struct *work)
3389 struct ceph_mds_client *mdsc =
3390 container_of(work, struct ceph_mds_client, delayed_work.work);
3394 dout("mdsc delayed_work\n");
3395 ceph_check_delayed_caps(mdsc);
3397 mutex_lock(&mdsc->mutex);
3398 renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
3399 renew_caps = time_after_eq(jiffies, HZ*renew_interval +
3400 mdsc->last_renew_caps);
3402 mdsc->last_renew_caps = jiffies;
3404 for (i = 0; i < mdsc->max_sessions; i++) {
3405 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3408 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
3409 dout("resending session close request for mds%d\n",
3411 request_close_session(mdsc, s);
3412 ceph_put_mds_session(s);
3415 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
3416 if (s->s_state == CEPH_MDS_SESSION_OPEN) {
3417 s->s_state = CEPH_MDS_SESSION_HUNG;
3418 pr_info("mds%d hung\n", s->s_mds);
3421 if (s->s_state < CEPH_MDS_SESSION_OPEN) {
3422 /* this mds is failed or recovering, just wait */
3423 ceph_put_mds_session(s);
3426 mutex_unlock(&mdsc->mutex);
3428 mutex_lock(&s->s_mutex);
3430 send_renew_caps(mdsc, s);
3432 ceph_con_keepalive(&s->s_con);
3433 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3434 s->s_state == CEPH_MDS_SESSION_HUNG)
3435 ceph_send_cap_releases(mdsc, s);
3436 mutex_unlock(&s->s_mutex);
3437 ceph_put_mds_session(s);
3439 mutex_lock(&mdsc->mutex);
3441 mutex_unlock(&mdsc->mutex);
3443 schedule_delayed(mdsc);
3446 int ceph_mdsc_init(struct ceph_fs_client *fsc)
3449 struct ceph_mds_client *mdsc;
3451 mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
3456 mutex_init(&mdsc->mutex);
3457 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
3458 if (mdsc->mdsmap == NULL) {
3463 init_completion(&mdsc->safe_umount_waiters);
3464 init_waitqueue_head(&mdsc->session_close_wq);
3465 INIT_LIST_HEAD(&mdsc->waiting_for_map);
3466 mdsc->sessions = NULL;
3467 atomic_set(&mdsc->num_sessions, 0);
3468 mdsc->max_sessions = 0;
3470 mdsc->last_snap_seq = 0;
3471 init_rwsem(&mdsc->snap_rwsem);
3472 mdsc->snap_realms = RB_ROOT;
3473 INIT_LIST_HEAD(&mdsc->snap_empty);
3474 spin_lock_init(&mdsc->snap_empty_lock);
3476 mdsc->oldest_tid = 0;
3477 mdsc->request_tree = RB_ROOT;
3478 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
3479 mdsc->last_renew_caps = jiffies;
3480 INIT_LIST_HEAD(&mdsc->cap_delay_list);
3481 spin_lock_init(&mdsc->cap_delay_lock);
3482 INIT_LIST_HEAD(&mdsc->snap_flush_list);
3483 spin_lock_init(&mdsc->snap_flush_lock);
3484 mdsc->last_cap_flush_tid = 1;
3485 mdsc->cap_flush_tree = RB_ROOT;
3486 INIT_LIST_HEAD(&mdsc->cap_dirty);
3487 INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
3488 mdsc->num_cap_flushing = 0;
3489 spin_lock_init(&mdsc->cap_dirty_lock);
3490 init_waitqueue_head(&mdsc->cap_flushing_wq);
3491 spin_lock_init(&mdsc->dentry_lru_lock);
3492 INIT_LIST_HEAD(&mdsc->dentry_lru);
3494 ceph_caps_init(mdsc);
3495 ceph_adjust_min_caps(mdsc, fsc->min_caps);
3497 init_rwsem(&mdsc->pool_perm_rwsem);
3498 mdsc->pool_perm_tree = RB_ROOT;
3504 * Wait for safe replies on open mds requests. If we time out, drop
3505 * all requests from the tree to avoid dangling dentry refs.
3507 static void wait_requests(struct ceph_mds_client *mdsc)
3509 struct ceph_options *opts = mdsc->fsc->client->options;
3510 struct ceph_mds_request *req;
3512 mutex_lock(&mdsc->mutex);
3513 if (__get_oldest_req(mdsc)) {
3514 mutex_unlock(&mdsc->mutex);
3516 dout("wait_requests waiting for requests\n");
3517 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
3518 ceph_timeout_jiffies(opts->mount_timeout));
3520 /* tear down remaining requests */
3521 mutex_lock(&mdsc->mutex);
3522 while ((req = __get_oldest_req(mdsc))) {
3523 dout("wait_requests timed out on tid %llu\n",
3525 __unregister_request(mdsc, req);
3528 mutex_unlock(&mdsc->mutex);
3529 dout("wait_requests done\n");
3533 * called before mount is ro, and before dentries are torn down.
3534 * (hmm, does this still race with new lookups?)
3536 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
3538 dout("pre_umount\n");
3542 ceph_flush_dirty_caps(mdsc);
3543 wait_requests(mdsc);
3546 * wait for reply handlers to drop their request refs and
3547 * their inode/dcache refs
3553 * wait for all write mds requests to flush.
3555 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
3557 struct ceph_mds_request *req = NULL, *nextreq;
3560 mutex_lock(&mdsc->mutex);
3561 dout("wait_unsafe_requests want %lld\n", want_tid);
3563 req = __get_oldest_req(mdsc);
3564 while (req && req->r_tid <= want_tid) {
3565 /* find next request */
3566 n = rb_next(&req->r_node);
3568 nextreq = rb_entry(n, struct ceph_mds_request, r_node);
3571 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
3572 (req->r_op & CEPH_MDS_OP_WRITE)) {
3574 ceph_mdsc_get_request(req);
3576 ceph_mdsc_get_request(nextreq);
3577 mutex_unlock(&mdsc->mutex);
3578 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
3579 req->r_tid, want_tid);
3580 wait_for_completion(&req->r_safe_completion);
3581 mutex_lock(&mdsc->mutex);
3582 ceph_mdsc_put_request(req);
3584 break; /* next dne before, so we're done! */
3585 if (RB_EMPTY_NODE(&nextreq->r_node)) {
3586 /* next request was removed from tree */
3587 ceph_mdsc_put_request(nextreq);
3590 ceph_mdsc_put_request(nextreq); /* won't go away */
3594 mutex_unlock(&mdsc->mutex);
3595 dout("wait_unsafe_requests done\n");
3598 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3600 u64 want_tid, want_flush, want_snap;
3602 if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
3606 mutex_lock(&mdsc->mutex);
3607 want_tid = mdsc->last_tid;
3608 mutex_unlock(&mdsc->mutex);
3610 ceph_flush_dirty_caps(mdsc);
3611 spin_lock(&mdsc->cap_dirty_lock);
3612 want_flush = mdsc->last_cap_flush_tid;
3613 spin_unlock(&mdsc->cap_dirty_lock);
3615 down_read(&mdsc->snap_rwsem);
3616 want_snap = mdsc->last_snap_seq;
3617 up_read(&mdsc->snap_rwsem);
3619 dout("sync want tid %lld flush_seq %lld snap_seq %lld\n",
3620 want_tid, want_flush, want_snap);
3622 wait_unsafe_requests(mdsc, want_tid);
3623 wait_caps_flush(mdsc, want_flush, want_snap);
3627 * true if all sessions are closed, or we force unmount
3629 static bool done_closing_sessions(struct ceph_mds_client *mdsc)
3631 if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
3633 return atomic_read(&mdsc->num_sessions) == 0;
3637 * called after sb is ro.
3639 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3641 struct ceph_options *opts = mdsc->fsc->client->options;
3642 struct ceph_mds_session *session;
3645 dout("close_sessions\n");
3647 /* close sessions */
3648 mutex_lock(&mdsc->mutex);
3649 for (i = 0; i < mdsc->max_sessions; i++) {
3650 session = __ceph_lookup_mds_session(mdsc, i);
3653 mutex_unlock(&mdsc->mutex);
3654 mutex_lock(&session->s_mutex);
3655 __close_session(mdsc, session);
3656 mutex_unlock(&session->s_mutex);
3657 ceph_put_mds_session(session);
3658 mutex_lock(&mdsc->mutex);
3660 mutex_unlock(&mdsc->mutex);
3662 dout("waiting for sessions to close\n");
3663 wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc),
3664 ceph_timeout_jiffies(opts->mount_timeout));
3666 /* tear down remaining sessions */
3667 mutex_lock(&mdsc->mutex);
3668 for (i = 0; i < mdsc->max_sessions; i++) {
3669 if (mdsc->sessions[i]) {
3670 session = get_session(mdsc->sessions[i]);
3671 __unregister_session(mdsc, session);
3672 mutex_unlock(&mdsc->mutex);
3673 mutex_lock(&session->s_mutex);
3674 remove_session_caps(session);
3675 mutex_unlock(&session->s_mutex);
3676 ceph_put_mds_session(session);
3677 mutex_lock(&mdsc->mutex);
3680 WARN_ON(!list_empty(&mdsc->cap_delay_list));
3681 mutex_unlock(&mdsc->mutex);
3683 ceph_cleanup_empty_realms(mdsc);
3685 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3690 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
3692 struct ceph_mds_session *session;
3695 dout("force umount\n");
3697 mutex_lock(&mdsc->mutex);
3698 for (mds = 0; mds < mdsc->max_sessions; mds++) {
3699 session = __ceph_lookup_mds_session(mdsc, mds);
3702 mutex_unlock(&mdsc->mutex);
3703 mutex_lock(&session->s_mutex);
3704 __close_session(mdsc, session);
3705 if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
3706 cleanup_session_requests(mdsc, session);
3707 remove_session_caps(session);
3709 mutex_unlock(&session->s_mutex);
3710 ceph_put_mds_session(session);
3711 mutex_lock(&mdsc->mutex);
3712 kick_requests(mdsc, mds);
3714 __wake_requests(mdsc, &mdsc->waiting_for_map);
3715 mutex_unlock(&mdsc->mutex);
3718 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
3721 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3723 ceph_mdsmap_destroy(mdsc->mdsmap);
3724 kfree(mdsc->sessions);
3725 ceph_caps_finalize(mdsc);
3726 ceph_pool_perm_destroy(mdsc);
3729 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
3731 struct ceph_mds_client *mdsc = fsc->mdsc;
3733 dout("mdsc_destroy %p\n", mdsc);
3734 ceph_mdsc_stop(mdsc);
3736 /* flush out any connection work with references to us */
3741 dout("mdsc_destroy %p done\n", mdsc);
3746 * handle mds map update.
3748 void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3752 void *p = msg->front.iov_base;
3753 void *end = p + msg->front.iov_len;
3754 struct ceph_mdsmap *newmap, *oldmap;
3755 struct ceph_fsid fsid;
3758 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
3759 ceph_decode_copy(&p, &fsid, sizeof(fsid));
3760 if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
3762 epoch = ceph_decode_32(&p);
3763 maplen = ceph_decode_32(&p);
3764 dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
3766 /* do we need it? */
3767 mutex_lock(&mdsc->mutex);
3768 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
3769 dout("handle_map epoch %u <= our %u\n",
3770 epoch, mdsc->mdsmap->m_epoch);
3771 mutex_unlock(&mdsc->mutex);
3775 newmap = ceph_mdsmap_decode(&p, end);
3776 if (IS_ERR(newmap)) {
3777 err = PTR_ERR(newmap);
3781 /* swap into place */
3783 oldmap = mdsc->mdsmap;
3784 mdsc->mdsmap = newmap;
3785 check_new_map(mdsc, newmap, oldmap);
3786 ceph_mdsmap_destroy(oldmap);
3788 mdsc->mdsmap = newmap; /* first mds map */
3790 mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
3792 __wake_requests(mdsc, &mdsc->waiting_for_map);
3793 ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
3794 mdsc->mdsmap->m_epoch);
3796 mutex_unlock(&mdsc->mutex);
3797 schedule_delayed(mdsc);
3801 mutex_unlock(&mdsc->mutex);
3803 pr_err("error decoding mdsmap %d\n", err);
3807 static struct ceph_connection *con_get(struct ceph_connection *con)
3809 struct ceph_mds_session *s = con->private;
3811 if (get_session(s)) {
3812 dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref));
3815 dout("mdsc con_get %p FAIL\n", s);
3819 static void con_put(struct ceph_connection *con)
3821 struct ceph_mds_session *s = con->private;
3823 dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1);
3824 ceph_put_mds_session(s);
3828 * if the client is unresponsive for long enough, the mds will kill
3829 * the session entirely.
3831 static void peer_reset(struct ceph_connection *con)
3833 struct ceph_mds_session *s = con->private;
3834 struct ceph_mds_client *mdsc = s->s_mdsc;
3836 pr_warn("mds%d closed our session\n", s->s_mds);
3837 send_mds_reconnect(mdsc, s);
3840 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
3842 struct ceph_mds_session *s = con->private;
3843 struct ceph_mds_client *mdsc = s->s_mdsc;
3844 int type = le16_to_cpu(msg->hdr.type);
3846 mutex_lock(&mdsc->mutex);
3847 if (__verify_registered_session(mdsc, s) < 0) {
3848 mutex_unlock(&mdsc->mutex);
3851 mutex_unlock(&mdsc->mutex);
3854 case CEPH_MSG_MDS_MAP:
3855 ceph_mdsc_handle_map(mdsc, msg);
3857 case CEPH_MSG_CLIENT_SESSION:
3858 handle_session(s, msg);
3860 case CEPH_MSG_CLIENT_REPLY:
3861 handle_reply(s, msg);
3863 case CEPH_MSG_CLIENT_REQUEST_FORWARD:
3864 handle_forward(mdsc, s, msg);
3866 case CEPH_MSG_CLIENT_CAPS:
3867 ceph_handle_caps(s, msg);
3869 case CEPH_MSG_CLIENT_SNAP:
3870 ceph_handle_snap(mdsc, s, msg);
3872 case CEPH_MSG_CLIENT_LEASE:
3873 handle_lease(mdsc, s, msg);
3877 pr_err("received unknown message type %d %s\n", type,
3878 ceph_msg_type_name(type));
3889 * Note: returned pointer is the address of a structure that's
3890 * managed separately. Caller must *not* attempt to free it.
3892 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
3893 int *proto, int force_new)
3895 struct ceph_mds_session *s = con->private;
3896 struct ceph_mds_client *mdsc = s->s_mdsc;
3897 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3898 struct ceph_auth_handshake *auth = &s->s_auth;
3900 if (force_new && auth->authorizer) {
3901 ceph_auth_destroy_authorizer(ac, auth->authorizer);
3902 auth->authorizer = NULL;
3904 if (!auth->authorizer) {
3905 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
3908 return ERR_PTR(ret);
3910 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
3913 return ERR_PTR(ret);
3915 *proto = ac->protocol;
3921 static int verify_authorizer_reply(struct ceph_connection *con, int len)
3923 struct ceph_mds_session *s = con->private;
3924 struct ceph_mds_client *mdsc = s->s_mdsc;
3925 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3927 return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer, len);
3930 static int invalidate_authorizer(struct ceph_connection *con)
3932 struct ceph_mds_session *s = con->private;
3933 struct ceph_mds_client *mdsc = s->s_mdsc;
3934 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3936 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
3938 return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
3941 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
3942 struct ceph_msg_header *hdr, int *skip)
3944 struct ceph_msg *msg;
3945 int type = (int) le16_to_cpu(hdr->type);
3946 int front_len = (int) le32_to_cpu(hdr->front_len);
3952 msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
3954 pr_err("unable to allocate msg type %d len %d\n",
3962 static int mds_sign_message(struct ceph_msg *msg)
3964 struct ceph_mds_session *s = msg->con->private;
3965 struct ceph_auth_handshake *auth = &s->s_auth;
3967 return ceph_auth_sign_message(auth, msg);
3970 static int mds_check_message_signature(struct ceph_msg *msg)
3972 struct ceph_mds_session *s = msg->con->private;
3973 struct ceph_auth_handshake *auth = &s->s_auth;
3975 return ceph_auth_check_message_signature(auth, msg);
3978 static const struct ceph_connection_operations mds_con_ops = {
3981 .dispatch = dispatch,
3982 .get_authorizer = get_authorizer,
3983 .verify_authorizer_reply = verify_authorizer_reply,
3984 .invalidate_authorizer = invalidate_authorizer,
3985 .peer_reset = peer_reset,
3986 .alloc_msg = mds_alloc_msg,
3987 .sign_message = mds_sign_message,
3988 .check_message_signature = mds_check_message_signature,