Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[linux-2.6-block.git] / fs / ceph / mds_client.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/fs.h>
4 #include <linux/wait.h>
5 #include <linux/slab.h>
6 #include <linux/gfp.h>
7 #include <linux/sched.h>
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10
11 #include "super.h"
12 #include "mds_client.h"
13
14 #include <linux/ceph/ceph_features.h>
15 #include <linux/ceph/messenger.h>
16 #include <linux/ceph/decode.h>
17 #include <linux/ceph/pagelist.h>
18 #include <linux/ceph/auth.h>
19 #include <linux/ceph/debugfs.h>
20
21 /*
22  * A cluster of MDS (metadata server) daemons is responsible for
23  * managing the file system namespace (the directory hierarchy and
24  * inodes) and for coordinating shared access to storage.  Metadata is
25  * partitioning hierarchically across a number of servers, and that
26  * partition varies over time as the cluster adjusts the distribution
27  * in order to balance load.
28  *
29  * The MDS client is primarily responsible to managing synchronous
30  * metadata requests for operations like open, unlink, and so forth.
31  * If there is a MDS failure, we find out about it when we (possibly
32  * request and) receive a new MDS map, and can resubmit affected
33  * requests.
34  *
35  * For the most part, though, we take advantage of a lossless
36  * communications channel to the MDS, and do not need to worry about
37  * timing out or resubmitting requests.
38  *
39  * We maintain a stateful "session" with each MDS we interact with.
40  * Within each session, we sent periodic heartbeat messages to ensure
41  * any capabilities or leases we have been issues remain valid.  If
42  * the session times out and goes stale, our leases and capabilities
43  * are no longer valid.
44  */
45
46 struct ceph_reconnect_state {
47         int nr_caps;
48         struct ceph_pagelist *pagelist;
49         bool flock;
50 };
51
52 static void __wake_requests(struct ceph_mds_client *mdsc,
53                             struct list_head *head);
54
55 static const struct ceph_connection_operations mds_con_ops;
56
57
58 /*
59  * mds reply parsing
60  */
61
62 /*
63  * parse individual inode info
64  */
65 static int parse_reply_info_in(void **p, void *end,
66                                struct ceph_mds_reply_info_in *info,
67                                u64 features)
68 {
69         int err = -EIO;
70
71         info->in = *p;
72         *p += sizeof(struct ceph_mds_reply_inode) +
73                 sizeof(*info->in->fragtree.splits) *
74                 le32_to_cpu(info->in->fragtree.nsplits);
75
76         ceph_decode_32_safe(p, end, info->symlink_len, bad);
77         ceph_decode_need(p, end, info->symlink_len, bad);
78         info->symlink = *p;
79         *p += info->symlink_len;
80
81         if (features & CEPH_FEATURE_DIRLAYOUTHASH)
82                 ceph_decode_copy_safe(p, end, &info->dir_layout,
83                                       sizeof(info->dir_layout), bad);
84         else
85                 memset(&info->dir_layout, 0, sizeof(info->dir_layout));
86
87         ceph_decode_32_safe(p, end, info->xattr_len, bad);
88         ceph_decode_need(p, end, info->xattr_len, bad);
89         info->xattr_data = *p;
90         *p += info->xattr_len;
91         return 0;
92 bad:
93         return err;
94 }
95
96 /*
97  * parse a normal reply, which may contain a (dir+)dentry and/or a
98  * target inode.
99  */
100 static int parse_reply_info_trace(void **p, void *end,
101                                   struct ceph_mds_reply_info_parsed *info,
102                                   u64 features)
103 {
104         int err;
105
106         if (info->head->is_dentry) {
107                 err = parse_reply_info_in(p, end, &info->diri, features);
108                 if (err < 0)
109                         goto out_bad;
110
111                 if (unlikely(*p + sizeof(*info->dirfrag) > end))
112                         goto bad;
113                 info->dirfrag = *p;
114                 *p += sizeof(*info->dirfrag) +
115                         sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
116                 if (unlikely(*p > end))
117                         goto bad;
118
119                 ceph_decode_32_safe(p, end, info->dname_len, bad);
120                 ceph_decode_need(p, end, info->dname_len, bad);
121                 info->dname = *p;
122                 *p += info->dname_len;
123                 info->dlease = *p;
124                 *p += sizeof(*info->dlease);
125         }
126
127         if (info->head->is_target) {
128                 err = parse_reply_info_in(p, end, &info->targeti, features);
129                 if (err < 0)
130                         goto out_bad;
131         }
132
133         if (unlikely(*p != end))
134                 goto bad;
135         return 0;
136
137 bad:
138         err = -EIO;
139 out_bad:
140         pr_err("problem parsing mds trace %d\n", err);
141         return err;
142 }
143
144 /*
145  * parse readdir results
146  */
147 static int parse_reply_info_dir(void **p, void *end,
148                                 struct ceph_mds_reply_info_parsed *info,
149                                 u64 features)
150 {
151         u32 num, i = 0;
152         int err;
153
154         info->dir_dir = *p;
155         if (*p + sizeof(*info->dir_dir) > end)
156                 goto bad;
157         *p += sizeof(*info->dir_dir) +
158                 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
159         if (*p > end)
160                 goto bad;
161
162         ceph_decode_need(p, end, sizeof(num) + 2, bad);
163         num = ceph_decode_32(p);
164         info->dir_end = ceph_decode_8(p);
165         info->dir_complete = ceph_decode_8(p);
166         if (num == 0)
167                 goto done;
168
169         BUG_ON(!info->dir_in);
170         info->dir_dname = (void *)(info->dir_in + num);
171         info->dir_dname_len = (void *)(info->dir_dname + num);
172         info->dir_dlease = (void *)(info->dir_dname_len + num);
173         if ((unsigned long)(info->dir_dlease + num) >
174             (unsigned long)info->dir_in + info->dir_buf_size) {
175                 pr_err("dir contents are larger than expected\n");
176                 WARN_ON(1);
177                 goto bad;
178         }
179
180         info->dir_nr = num;
181         while (num) {
182                 /* dentry */
183                 ceph_decode_need(p, end, sizeof(u32)*2, bad);
184                 info->dir_dname_len[i] = ceph_decode_32(p);
185                 ceph_decode_need(p, end, info->dir_dname_len[i], bad);
186                 info->dir_dname[i] = *p;
187                 *p += info->dir_dname_len[i];
188                 dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i],
189                      info->dir_dname[i]);
190                 info->dir_dlease[i] = *p;
191                 *p += sizeof(struct ceph_mds_reply_lease);
192
193                 /* inode */
194                 err = parse_reply_info_in(p, end, &info->dir_in[i], features);
195                 if (err < 0)
196                         goto out_bad;
197                 i++;
198                 num--;
199         }
200
201 done:
202         if (*p != end)
203                 goto bad;
204         return 0;
205
206 bad:
207         err = -EIO;
208 out_bad:
209         pr_err("problem parsing dir contents %d\n", err);
210         return err;
211 }
212
213 /*
214  * parse fcntl F_GETLK results
215  */
216 static int parse_reply_info_filelock(void **p, void *end,
217                                      struct ceph_mds_reply_info_parsed *info,
218                                      u64 features)
219 {
220         if (*p + sizeof(*info->filelock_reply) > end)
221                 goto bad;
222
223         info->filelock_reply = *p;
224         *p += sizeof(*info->filelock_reply);
225
226         if (unlikely(*p != end))
227                 goto bad;
228         return 0;
229
230 bad:
231         return -EIO;
232 }
233
234 /*
235  * parse create results
236  */
237 static int parse_reply_info_create(void **p, void *end,
238                                   struct ceph_mds_reply_info_parsed *info,
239                                   u64 features)
240 {
241         if (features & CEPH_FEATURE_REPLY_CREATE_INODE) {
242                 if (*p == end) {
243                         info->has_create_ino = false;
244                 } else {
245                         info->has_create_ino = true;
246                         info->ino = ceph_decode_64(p);
247                 }
248         }
249
250         if (unlikely(*p != end))
251                 goto bad;
252         return 0;
253
254 bad:
255         return -EIO;
256 }
257
258 /*
259  * parse extra results
260  */
261 static int parse_reply_info_extra(void **p, void *end,
262                                   struct ceph_mds_reply_info_parsed *info,
263                                   u64 features)
264 {
265         if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
266                 return parse_reply_info_filelock(p, end, info, features);
267         else if (info->head->op == CEPH_MDS_OP_READDIR ||
268                  info->head->op == CEPH_MDS_OP_LSSNAP)
269                 return parse_reply_info_dir(p, end, info, features);
270         else if (info->head->op == CEPH_MDS_OP_CREATE)
271                 return parse_reply_info_create(p, end, info, features);
272         else
273                 return -EIO;
274 }
275
276 /*
277  * parse entire mds reply
278  */
279 static int parse_reply_info(struct ceph_msg *msg,
280                             struct ceph_mds_reply_info_parsed *info,
281                             u64 features)
282 {
283         void *p, *end;
284         u32 len;
285         int err;
286
287         info->head = msg->front.iov_base;
288         p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
289         end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
290
291         /* trace */
292         ceph_decode_32_safe(&p, end, len, bad);
293         if (len > 0) {
294                 ceph_decode_need(&p, end, len, bad);
295                 err = parse_reply_info_trace(&p, p+len, info, features);
296                 if (err < 0)
297                         goto out_bad;
298         }
299
300         /* extra */
301         ceph_decode_32_safe(&p, end, len, bad);
302         if (len > 0) {
303                 ceph_decode_need(&p, end, len, bad);
304                 err = parse_reply_info_extra(&p, p+len, info, features);
305                 if (err < 0)
306                         goto out_bad;
307         }
308
309         /* snap blob */
310         ceph_decode_32_safe(&p, end, len, bad);
311         info->snapblob_len = len;
312         info->snapblob = p;
313         p += len;
314
315         if (p != end)
316                 goto bad;
317         return 0;
318
319 bad:
320         err = -EIO;
321 out_bad:
322         pr_err("mds parse_reply err %d\n", err);
323         return err;
324 }
325
326 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
327 {
328         if (!info->dir_in)
329                 return;
330         free_pages((unsigned long)info->dir_in, get_order(info->dir_buf_size));
331 }
332
333
334 /*
335  * sessions
336  */
337 static const char *session_state_name(int s)
338 {
339         switch (s) {
340         case CEPH_MDS_SESSION_NEW: return "new";
341         case CEPH_MDS_SESSION_OPENING: return "opening";
342         case CEPH_MDS_SESSION_OPEN: return "open";
343         case CEPH_MDS_SESSION_HUNG: return "hung";
344         case CEPH_MDS_SESSION_CLOSING: return "closing";
345         case CEPH_MDS_SESSION_RESTARTING: return "restarting";
346         case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
347         default: return "???";
348         }
349 }
350
351 static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
352 {
353         if (atomic_inc_not_zero(&s->s_ref)) {
354                 dout("mdsc get_session %p %d -> %d\n", s,
355                      atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref));
356                 return s;
357         } else {
358                 dout("mdsc get_session %p 0 -- FAIL", s);
359                 return NULL;
360         }
361 }
362
363 void ceph_put_mds_session(struct ceph_mds_session *s)
364 {
365         dout("mdsc put_session %p %d -> %d\n", s,
366              atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
367         if (atomic_dec_and_test(&s->s_ref)) {
368                 if (s->s_auth.authorizer)
369                         ceph_auth_destroy_authorizer(
370                                 s->s_mdsc->fsc->client->monc.auth,
371                                 s->s_auth.authorizer);
372                 kfree(s);
373         }
374 }
375
376 /*
377  * called under mdsc->mutex
378  */
379 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
380                                                    int mds)
381 {
382         struct ceph_mds_session *session;
383
384         if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
385                 return NULL;
386         session = mdsc->sessions[mds];
387         dout("lookup_mds_session %p %d\n", session,
388              atomic_read(&session->s_ref));
389         get_session(session);
390         return session;
391 }
392
393 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
394 {
395         if (mds >= mdsc->max_sessions)
396                 return false;
397         return mdsc->sessions[mds];
398 }
399
400 static int __verify_registered_session(struct ceph_mds_client *mdsc,
401                                        struct ceph_mds_session *s)
402 {
403         if (s->s_mds >= mdsc->max_sessions ||
404             mdsc->sessions[s->s_mds] != s)
405                 return -ENOENT;
406         return 0;
407 }
408
409 /*
410  * create+register a new session for given mds.
411  * called under mdsc->mutex.
412  */
413 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
414                                                  int mds)
415 {
416         struct ceph_mds_session *s;
417
418         if (mds >= mdsc->mdsmap->m_max_mds)
419                 return ERR_PTR(-EINVAL);
420
421         s = kzalloc(sizeof(*s), GFP_NOFS);
422         if (!s)
423                 return ERR_PTR(-ENOMEM);
424         s->s_mdsc = mdsc;
425         s->s_mds = mds;
426         s->s_state = CEPH_MDS_SESSION_NEW;
427         s->s_ttl = 0;
428         s->s_seq = 0;
429         mutex_init(&s->s_mutex);
430
431         ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
432
433         spin_lock_init(&s->s_gen_ttl_lock);
434         s->s_cap_gen = 0;
435         s->s_cap_ttl = jiffies - 1;
436
437         spin_lock_init(&s->s_cap_lock);
438         s->s_renew_requested = 0;
439         s->s_renew_seq = 0;
440         INIT_LIST_HEAD(&s->s_caps);
441         s->s_nr_caps = 0;
442         s->s_trim_caps = 0;
443         atomic_set(&s->s_ref, 1);
444         INIT_LIST_HEAD(&s->s_waiting);
445         INIT_LIST_HEAD(&s->s_unsafe);
446         s->s_num_cap_releases = 0;
447         s->s_cap_reconnect = 0;
448         s->s_cap_iterator = NULL;
449         INIT_LIST_HEAD(&s->s_cap_releases);
450         INIT_LIST_HEAD(&s->s_cap_releases_done);
451         INIT_LIST_HEAD(&s->s_cap_flushing);
452         INIT_LIST_HEAD(&s->s_cap_snaps_flushing);
453
454         dout("register_session mds%d\n", mds);
455         if (mds >= mdsc->max_sessions) {
456                 int newmax = 1 << get_count_order(mds+1);
457                 struct ceph_mds_session **sa;
458
459                 dout("register_session realloc to %d\n", newmax);
460                 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
461                 if (sa == NULL)
462                         goto fail_realloc;
463                 if (mdsc->sessions) {
464                         memcpy(sa, mdsc->sessions,
465                                mdsc->max_sessions * sizeof(void *));
466                         kfree(mdsc->sessions);
467                 }
468                 mdsc->sessions = sa;
469                 mdsc->max_sessions = newmax;
470         }
471         mdsc->sessions[mds] = s;
472         atomic_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
473
474         ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
475                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
476
477         return s;
478
479 fail_realloc:
480         kfree(s);
481         return ERR_PTR(-ENOMEM);
482 }
483
484 /*
485  * called under mdsc->mutex
486  */
487 static void __unregister_session(struct ceph_mds_client *mdsc,
488                                struct ceph_mds_session *s)
489 {
490         dout("__unregister_session mds%d %p\n", s->s_mds, s);
491         BUG_ON(mdsc->sessions[s->s_mds] != s);
492         mdsc->sessions[s->s_mds] = NULL;
493         ceph_con_close(&s->s_con);
494         ceph_put_mds_session(s);
495 }
496
497 /*
498  * drop session refs in request.
499  *
500  * should be last request ref, or hold mdsc->mutex
501  */
502 static void put_request_session(struct ceph_mds_request *req)
503 {
504         if (req->r_session) {
505                 ceph_put_mds_session(req->r_session);
506                 req->r_session = NULL;
507         }
508 }
509
510 void ceph_mdsc_release_request(struct kref *kref)
511 {
512         struct ceph_mds_request *req = container_of(kref,
513                                                     struct ceph_mds_request,
514                                                     r_kref);
515         destroy_reply_info(&req->r_reply_info);
516         if (req->r_request)
517                 ceph_msg_put(req->r_request);
518         if (req->r_reply)
519                 ceph_msg_put(req->r_reply);
520         if (req->r_inode) {
521                 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
522                 iput(req->r_inode);
523         }
524         if (req->r_locked_dir)
525                 ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
526         if (req->r_target_inode)
527                 iput(req->r_target_inode);
528         if (req->r_dentry)
529                 dput(req->r_dentry);
530         if (req->r_old_dentry)
531                 dput(req->r_old_dentry);
532         if (req->r_old_dentry_dir) {
533                 /*
534                  * track (and drop pins for) r_old_dentry_dir
535                  * separately, since r_old_dentry's d_parent may have
536                  * changed between the dir mutex being dropped and
537                  * this request being freed.
538                  */
539                 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
540                                   CEPH_CAP_PIN);
541                 iput(req->r_old_dentry_dir);
542         }
543         kfree(req->r_path1);
544         kfree(req->r_path2);
545         put_request_session(req);
546         ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
547         kfree(req);
548 }
549
550 /*
551  * lookup session, bump ref if found.
552  *
553  * called under mdsc->mutex.
554  */
555 static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc,
556                                              u64 tid)
557 {
558         struct ceph_mds_request *req;
559         struct rb_node *n = mdsc->request_tree.rb_node;
560
561         while (n) {
562                 req = rb_entry(n, struct ceph_mds_request, r_node);
563                 if (tid < req->r_tid)
564                         n = n->rb_left;
565                 else if (tid > req->r_tid)
566                         n = n->rb_right;
567                 else {
568                         ceph_mdsc_get_request(req);
569                         return req;
570                 }
571         }
572         return NULL;
573 }
574
575 static void __insert_request(struct ceph_mds_client *mdsc,
576                              struct ceph_mds_request *new)
577 {
578         struct rb_node **p = &mdsc->request_tree.rb_node;
579         struct rb_node *parent = NULL;
580         struct ceph_mds_request *req = NULL;
581
582         while (*p) {
583                 parent = *p;
584                 req = rb_entry(parent, struct ceph_mds_request, r_node);
585                 if (new->r_tid < req->r_tid)
586                         p = &(*p)->rb_left;
587                 else if (new->r_tid > req->r_tid)
588                         p = &(*p)->rb_right;
589                 else
590                         BUG();
591         }
592
593         rb_link_node(&new->r_node, parent, p);
594         rb_insert_color(&new->r_node, &mdsc->request_tree);
595 }
596
597 /*
598  * Register an in-flight request, and assign a tid.  Link to directory
599  * are modifying (if any).
600  *
601  * Called under mdsc->mutex.
602  */
603 static void __register_request(struct ceph_mds_client *mdsc,
604                                struct ceph_mds_request *req,
605                                struct inode *dir)
606 {
607         req->r_tid = ++mdsc->last_tid;
608         if (req->r_num_caps)
609                 ceph_reserve_caps(mdsc, &req->r_caps_reservation,
610                                   req->r_num_caps);
611         dout("__register_request %p tid %lld\n", req, req->r_tid);
612         ceph_mdsc_get_request(req);
613         __insert_request(mdsc, req);
614
615         req->r_uid = current_fsuid();
616         req->r_gid = current_fsgid();
617
618         if (dir) {
619                 struct ceph_inode_info *ci = ceph_inode(dir);
620
621                 ihold(dir);
622                 spin_lock(&ci->i_unsafe_lock);
623                 req->r_unsafe_dir = dir;
624                 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
625                 spin_unlock(&ci->i_unsafe_lock);
626         }
627 }
628
629 static void __unregister_request(struct ceph_mds_client *mdsc,
630                                  struct ceph_mds_request *req)
631 {
632         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
633         rb_erase(&req->r_node, &mdsc->request_tree);
634         RB_CLEAR_NODE(&req->r_node);
635
636         if (req->r_unsafe_dir) {
637                 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
638
639                 spin_lock(&ci->i_unsafe_lock);
640                 list_del_init(&req->r_unsafe_dir_item);
641                 spin_unlock(&ci->i_unsafe_lock);
642
643                 iput(req->r_unsafe_dir);
644                 req->r_unsafe_dir = NULL;
645         }
646
647         complete_all(&req->r_safe_completion);
648
649         ceph_mdsc_put_request(req);
650 }
651
652 /*
653  * Choose mds to send request to next.  If there is a hint set in the
654  * request (e.g., due to a prior forward hint from the mds), use that.
655  * Otherwise, consult frag tree and/or caps to identify the
656  * appropriate mds.  If all else fails, choose randomly.
657  *
658  * Called under mdsc->mutex.
659  */
660 static struct dentry *get_nonsnap_parent(struct dentry *dentry)
661 {
662         /*
663          * we don't need to worry about protecting the d_parent access
664          * here because we never renaming inside the snapped namespace
665          * except to resplice to another snapdir, and either the old or new
666          * result is a valid result.
667          */
668         while (!IS_ROOT(dentry) && ceph_snap(dentry->d_inode) != CEPH_NOSNAP)
669                 dentry = dentry->d_parent;
670         return dentry;
671 }
672
673 static int __choose_mds(struct ceph_mds_client *mdsc,
674                         struct ceph_mds_request *req)
675 {
676         struct inode *inode;
677         struct ceph_inode_info *ci;
678         struct ceph_cap *cap;
679         int mode = req->r_direct_mode;
680         int mds = -1;
681         u32 hash = req->r_direct_hash;
682         bool is_hash = req->r_direct_is_hash;
683
684         /*
685          * is there a specific mds we should try?  ignore hint if we have
686          * no session and the mds is not up (active or recovering).
687          */
688         if (req->r_resend_mds >= 0 &&
689             (__have_session(mdsc, req->r_resend_mds) ||
690              ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
691                 dout("choose_mds using resend_mds mds%d\n",
692                      req->r_resend_mds);
693                 return req->r_resend_mds;
694         }
695
696         if (mode == USE_RANDOM_MDS)
697                 goto random;
698
699         inode = NULL;
700         if (req->r_inode) {
701                 inode = req->r_inode;
702         } else if (req->r_dentry) {
703                 /* ignore race with rename; old or new d_parent is okay */
704                 struct dentry *parent = req->r_dentry->d_parent;
705                 struct inode *dir = parent->d_inode;
706
707                 if (dir->i_sb != mdsc->fsc->sb) {
708                         /* not this fs! */
709                         inode = req->r_dentry->d_inode;
710                 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
711                         /* direct snapped/virtual snapdir requests
712                          * based on parent dir inode */
713                         struct dentry *dn = get_nonsnap_parent(parent);
714                         inode = dn->d_inode;
715                         dout("__choose_mds using nonsnap parent %p\n", inode);
716                 } else {
717                         /* dentry target */
718                         inode = req->r_dentry->d_inode;
719                         if (!inode || mode == USE_AUTH_MDS) {
720                                 /* dir + name */
721                                 inode = dir;
722                                 hash = ceph_dentry_hash(dir, req->r_dentry);
723                                 is_hash = true;
724                         }
725                 }
726         }
727
728         dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
729              (int)hash, mode);
730         if (!inode)
731                 goto random;
732         ci = ceph_inode(inode);
733
734         if (is_hash && S_ISDIR(inode->i_mode)) {
735                 struct ceph_inode_frag frag;
736                 int found;
737
738                 ceph_choose_frag(ci, hash, &frag, &found);
739                 if (found) {
740                         if (mode == USE_ANY_MDS && frag.ndist > 0) {
741                                 u8 r;
742
743                                 /* choose a random replica */
744                                 get_random_bytes(&r, 1);
745                                 r %= frag.ndist;
746                                 mds = frag.dist[r];
747                                 dout("choose_mds %p %llx.%llx "
748                                      "frag %u mds%d (%d/%d)\n",
749                                      inode, ceph_vinop(inode),
750                                      frag.frag, mds,
751                                      (int)r, frag.ndist);
752                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
753                                     CEPH_MDS_STATE_ACTIVE)
754                                         return mds;
755                         }
756
757                         /* since this file/dir wasn't known to be
758                          * replicated, then we want to look for the
759                          * authoritative mds. */
760                         mode = USE_AUTH_MDS;
761                         if (frag.mds >= 0) {
762                                 /* choose auth mds */
763                                 mds = frag.mds;
764                                 dout("choose_mds %p %llx.%llx "
765                                      "frag %u mds%d (auth)\n",
766                                      inode, ceph_vinop(inode), frag.frag, mds);
767                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
768                                     CEPH_MDS_STATE_ACTIVE)
769                                         return mds;
770                         }
771                 }
772         }
773
774         spin_lock(&ci->i_ceph_lock);
775         cap = NULL;
776         if (mode == USE_AUTH_MDS)
777                 cap = ci->i_auth_cap;
778         if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
779                 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
780         if (!cap) {
781                 spin_unlock(&ci->i_ceph_lock);
782                 goto random;
783         }
784         mds = cap->session->s_mds;
785         dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
786              inode, ceph_vinop(inode), mds,
787              cap == ci->i_auth_cap ? "auth " : "", cap);
788         spin_unlock(&ci->i_ceph_lock);
789         return mds;
790
791 random:
792         mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
793         dout("choose_mds chose random mds%d\n", mds);
794         return mds;
795 }
796
797
798 /*
799  * session messages
800  */
801 static struct ceph_msg *create_session_msg(u32 op, u64 seq)
802 {
803         struct ceph_msg *msg;
804         struct ceph_mds_session_head *h;
805
806         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
807                            false);
808         if (!msg) {
809                 pr_err("create_session_msg ENOMEM creating msg\n");
810                 return NULL;
811         }
812         h = msg->front.iov_base;
813         h->op = cpu_to_le32(op);
814         h->seq = cpu_to_le64(seq);
815         return msg;
816 }
817
818 /*
819  * send session open request.
820  *
821  * called under mdsc->mutex
822  */
823 static int __open_session(struct ceph_mds_client *mdsc,
824                           struct ceph_mds_session *session)
825 {
826         struct ceph_msg *msg;
827         int mstate;
828         int mds = session->s_mds;
829
830         /* wait for mds to go active? */
831         mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
832         dout("open_session to mds%d (%s)\n", mds,
833              ceph_mds_state_name(mstate));
834         session->s_state = CEPH_MDS_SESSION_OPENING;
835         session->s_renew_requested = jiffies;
836
837         /* send connect message */
838         msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq);
839         if (!msg)
840                 return -ENOMEM;
841         ceph_con_send(&session->s_con, msg);
842         return 0;
843 }
844
845 /*
846  * open sessions for any export targets for the given mds
847  *
848  * called under mdsc->mutex
849  */
850 static struct ceph_mds_session *
851 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
852 {
853         struct ceph_mds_session *session;
854
855         session = __ceph_lookup_mds_session(mdsc, target);
856         if (!session) {
857                 session = register_session(mdsc, target);
858                 if (IS_ERR(session))
859                         return session;
860         }
861         if (session->s_state == CEPH_MDS_SESSION_NEW ||
862             session->s_state == CEPH_MDS_SESSION_CLOSING)
863                 __open_session(mdsc, session);
864
865         return session;
866 }
867
868 struct ceph_mds_session *
869 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
870 {
871         struct ceph_mds_session *session;
872
873         dout("open_export_target_session to mds%d\n", target);
874
875         mutex_lock(&mdsc->mutex);
876         session = __open_export_target_session(mdsc, target);
877         mutex_unlock(&mdsc->mutex);
878
879         return session;
880 }
881
882 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
883                                           struct ceph_mds_session *session)
884 {
885         struct ceph_mds_info *mi;
886         struct ceph_mds_session *ts;
887         int i, mds = session->s_mds;
888
889         if (mds >= mdsc->mdsmap->m_max_mds)
890                 return;
891
892         mi = &mdsc->mdsmap->m_info[mds];
893         dout("open_export_target_sessions for mds%d (%d targets)\n",
894              session->s_mds, mi->num_export_targets);
895
896         for (i = 0; i < mi->num_export_targets; i++) {
897                 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
898                 if (!IS_ERR(ts))
899                         ceph_put_mds_session(ts);
900         }
901 }
902
903 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
904                                            struct ceph_mds_session *session)
905 {
906         mutex_lock(&mdsc->mutex);
907         __open_export_target_sessions(mdsc, session);
908         mutex_unlock(&mdsc->mutex);
909 }
910
911 /*
912  * session caps
913  */
914
915 /*
916  * Free preallocated cap messages assigned to this session
917  */
918 static void cleanup_cap_releases(struct ceph_mds_session *session)
919 {
920         struct ceph_msg *msg;
921
922         spin_lock(&session->s_cap_lock);
923         while (!list_empty(&session->s_cap_releases)) {
924                 msg = list_first_entry(&session->s_cap_releases,
925                                        struct ceph_msg, list_head);
926                 list_del_init(&msg->list_head);
927                 ceph_msg_put(msg);
928         }
929         while (!list_empty(&session->s_cap_releases_done)) {
930                 msg = list_first_entry(&session->s_cap_releases_done,
931                                        struct ceph_msg, list_head);
932                 list_del_init(&msg->list_head);
933                 ceph_msg_put(msg);
934         }
935         spin_unlock(&session->s_cap_lock);
936 }
937
938 /*
939  * Helper to safely iterate over all caps associated with a session, with
940  * special care taken to handle a racing __ceph_remove_cap().
941  *
942  * Caller must hold session s_mutex.
943  */
944 static int iterate_session_caps(struct ceph_mds_session *session,
945                                  int (*cb)(struct inode *, struct ceph_cap *,
946                                             void *), void *arg)
947 {
948         struct list_head *p;
949         struct ceph_cap *cap;
950         struct inode *inode, *last_inode = NULL;
951         struct ceph_cap *old_cap = NULL;
952         int ret;
953
954         dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
955         spin_lock(&session->s_cap_lock);
956         p = session->s_caps.next;
957         while (p != &session->s_caps) {
958                 cap = list_entry(p, struct ceph_cap, session_caps);
959                 inode = igrab(&cap->ci->vfs_inode);
960                 if (!inode) {
961                         p = p->next;
962                         continue;
963                 }
964                 session->s_cap_iterator = cap;
965                 spin_unlock(&session->s_cap_lock);
966
967                 if (last_inode) {
968                         iput(last_inode);
969                         last_inode = NULL;
970                 }
971                 if (old_cap) {
972                         ceph_put_cap(session->s_mdsc, old_cap);
973                         old_cap = NULL;
974                 }
975
976                 ret = cb(inode, cap, arg);
977                 last_inode = inode;
978
979                 spin_lock(&session->s_cap_lock);
980                 p = p->next;
981                 if (cap->ci == NULL) {
982                         dout("iterate_session_caps  finishing cap %p removal\n",
983                              cap);
984                         BUG_ON(cap->session != session);
985                         list_del_init(&cap->session_caps);
986                         session->s_nr_caps--;
987                         cap->session = NULL;
988                         old_cap = cap;  /* put_cap it w/o locks held */
989                 }
990                 if (ret < 0)
991                         goto out;
992         }
993         ret = 0;
994 out:
995         session->s_cap_iterator = NULL;
996         spin_unlock(&session->s_cap_lock);
997
998         if (last_inode)
999                 iput(last_inode);
1000         if (old_cap)
1001                 ceph_put_cap(session->s_mdsc, old_cap);
1002
1003         return ret;
1004 }
1005
1006 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1007                                   void *arg)
1008 {
1009         struct ceph_inode_info *ci = ceph_inode(inode);
1010         int drop = 0;
1011
1012         dout("removing cap %p, ci is %p, inode is %p\n",
1013              cap, ci, &ci->vfs_inode);
1014         spin_lock(&ci->i_ceph_lock);
1015         __ceph_remove_cap(cap, false);
1016         if (!__ceph_is_any_real_caps(ci)) {
1017                 struct ceph_mds_client *mdsc =
1018                         ceph_sb_to_client(inode->i_sb)->mdsc;
1019
1020                 spin_lock(&mdsc->cap_dirty_lock);
1021                 if (!list_empty(&ci->i_dirty_item)) {
1022                         pr_info(" dropping dirty %s state for %p %lld\n",
1023                                 ceph_cap_string(ci->i_dirty_caps),
1024                                 inode, ceph_ino(inode));
1025                         ci->i_dirty_caps = 0;
1026                         list_del_init(&ci->i_dirty_item);
1027                         drop = 1;
1028                 }
1029                 if (!list_empty(&ci->i_flushing_item)) {
1030                         pr_info(" dropping dirty+flushing %s state for %p %lld\n",
1031                                 ceph_cap_string(ci->i_flushing_caps),
1032                                 inode, ceph_ino(inode));
1033                         ci->i_flushing_caps = 0;
1034                         list_del_init(&ci->i_flushing_item);
1035                         mdsc->num_cap_flushing--;
1036                         drop = 1;
1037                 }
1038                 if (drop && ci->i_wrbuffer_ref) {
1039                         pr_info(" dropping dirty data for %p %lld\n",
1040                                 inode, ceph_ino(inode));
1041                         ci->i_wrbuffer_ref = 0;
1042                         ci->i_wrbuffer_ref_head = 0;
1043                         drop++;
1044                 }
1045                 spin_unlock(&mdsc->cap_dirty_lock);
1046         }
1047         spin_unlock(&ci->i_ceph_lock);
1048         while (drop--)
1049                 iput(inode);
1050         return 0;
1051 }
1052
1053 /*
1054  * caller must hold session s_mutex
1055  */
1056 static void remove_session_caps(struct ceph_mds_session *session)
1057 {
1058         dout("remove_session_caps on %p\n", session);
1059         iterate_session_caps(session, remove_session_caps_cb, NULL);
1060
1061         spin_lock(&session->s_cap_lock);
1062         if (session->s_nr_caps > 0) {
1063                 struct super_block *sb = session->s_mdsc->fsc->sb;
1064                 struct inode *inode;
1065                 struct ceph_cap *cap, *prev = NULL;
1066                 struct ceph_vino vino;
1067                 /*
1068                  * iterate_session_caps() skips inodes that are being
1069                  * deleted, we need to wait until deletions are complete.
1070                  * __wait_on_freeing_inode() is designed for the job,
1071                  * but it is not exported, so use lookup inode function
1072                  * to access it.
1073                  */
1074                 while (!list_empty(&session->s_caps)) {
1075                         cap = list_entry(session->s_caps.next,
1076                                          struct ceph_cap, session_caps);
1077                         if (cap == prev)
1078                                 break;
1079                         prev = cap;
1080                         vino = cap->ci->i_vino;
1081                         spin_unlock(&session->s_cap_lock);
1082
1083                         inode = ceph_find_inode(sb, vino);
1084                         iput(inode);
1085
1086                         spin_lock(&session->s_cap_lock);
1087                 }
1088         }
1089         spin_unlock(&session->s_cap_lock);
1090
1091         BUG_ON(session->s_nr_caps > 0);
1092         BUG_ON(!list_empty(&session->s_cap_flushing));
1093         cleanup_cap_releases(session);
1094 }
1095
1096 /*
1097  * wake up any threads waiting on this session's caps.  if the cap is
1098  * old (didn't get renewed on the client reconnect), remove it now.
1099  *
1100  * caller must hold s_mutex.
1101  */
1102 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1103                               void *arg)
1104 {
1105         struct ceph_inode_info *ci = ceph_inode(inode);
1106
1107         wake_up_all(&ci->i_cap_wq);
1108         if (arg) {
1109                 spin_lock(&ci->i_ceph_lock);
1110                 ci->i_wanted_max_size = 0;
1111                 ci->i_requested_max_size = 0;
1112                 spin_unlock(&ci->i_ceph_lock);
1113         }
1114         return 0;
1115 }
1116
1117 static void wake_up_session_caps(struct ceph_mds_session *session,
1118                                  int reconnect)
1119 {
1120         dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1121         iterate_session_caps(session, wake_up_session_cb,
1122                              (void *)(unsigned long)reconnect);
1123 }
1124
1125 /*
1126  * Send periodic message to MDS renewing all currently held caps.  The
1127  * ack will reset the expiration for all caps from this session.
1128  *
1129  * caller holds s_mutex
1130  */
1131 static int send_renew_caps(struct ceph_mds_client *mdsc,
1132                            struct ceph_mds_session *session)
1133 {
1134         struct ceph_msg *msg;
1135         int state;
1136
1137         if (time_after_eq(jiffies, session->s_cap_ttl) &&
1138             time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1139                 pr_info("mds%d caps stale\n", session->s_mds);
1140         session->s_renew_requested = jiffies;
1141
1142         /* do not try to renew caps until a recovering mds has reconnected
1143          * with its clients. */
1144         state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1145         if (state < CEPH_MDS_STATE_RECONNECT) {
1146                 dout("send_renew_caps ignoring mds%d (%s)\n",
1147                      session->s_mds, ceph_mds_state_name(state));
1148                 return 0;
1149         }
1150
1151         dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1152                 ceph_mds_state_name(state));
1153         msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1154                                  ++session->s_renew_seq);
1155         if (!msg)
1156                 return -ENOMEM;
1157         ceph_con_send(&session->s_con, msg);
1158         return 0;
1159 }
1160
1161 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1162                              struct ceph_mds_session *session, u64 seq)
1163 {
1164         struct ceph_msg *msg;
1165
1166         dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1167              session->s_mds, session_state_name(session->s_state), seq);
1168         msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1169         if (!msg)
1170                 return -ENOMEM;
1171         ceph_con_send(&session->s_con, msg);
1172         return 0;
1173 }
1174
1175
1176 /*
1177  * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1178  *
1179  * Called under session->s_mutex
1180  */
1181 static void renewed_caps(struct ceph_mds_client *mdsc,
1182                          struct ceph_mds_session *session, int is_renew)
1183 {
1184         int was_stale;
1185         int wake = 0;
1186
1187         spin_lock(&session->s_cap_lock);
1188         was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1189
1190         session->s_cap_ttl = session->s_renew_requested +
1191                 mdsc->mdsmap->m_session_timeout*HZ;
1192
1193         if (was_stale) {
1194                 if (time_before(jiffies, session->s_cap_ttl)) {
1195                         pr_info("mds%d caps renewed\n", session->s_mds);
1196                         wake = 1;
1197                 } else {
1198                         pr_info("mds%d caps still stale\n", session->s_mds);
1199                 }
1200         }
1201         dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1202              session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1203              time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1204         spin_unlock(&session->s_cap_lock);
1205
1206         if (wake)
1207                 wake_up_session_caps(session, 0);
1208 }
1209
1210 /*
1211  * send a session close request
1212  */
1213 static int request_close_session(struct ceph_mds_client *mdsc,
1214                                  struct ceph_mds_session *session)
1215 {
1216         struct ceph_msg *msg;
1217
1218         dout("request_close_session mds%d state %s seq %lld\n",
1219              session->s_mds, session_state_name(session->s_state),
1220              session->s_seq);
1221         msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
1222         if (!msg)
1223                 return -ENOMEM;
1224         ceph_con_send(&session->s_con, msg);
1225         return 0;
1226 }
1227
1228 /*
1229  * Called with s_mutex held.
1230  */
1231 static int __close_session(struct ceph_mds_client *mdsc,
1232                          struct ceph_mds_session *session)
1233 {
1234         if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1235                 return 0;
1236         session->s_state = CEPH_MDS_SESSION_CLOSING;
1237         return request_close_session(mdsc, session);
1238 }
1239
1240 /*
1241  * Trim old(er) caps.
1242  *
1243  * Because we can't cache an inode without one or more caps, we do
1244  * this indirectly: if a cap is unused, we prune its aliases, at which
1245  * point the inode will hopefully get dropped to.
1246  *
1247  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
1248  * memory pressure from the MDS, though, so it needn't be perfect.
1249  */
1250 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1251 {
1252         struct ceph_mds_session *session = arg;
1253         struct ceph_inode_info *ci = ceph_inode(inode);
1254         int used, wanted, oissued, mine;
1255
1256         if (session->s_trim_caps <= 0)
1257                 return -1;
1258
1259         spin_lock(&ci->i_ceph_lock);
1260         mine = cap->issued | cap->implemented;
1261         used = __ceph_caps_used(ci);
1262         wanted = __ceph_caps_file_wanted(ci);
1263         oissued = __ceph_caps_issued_other(ci, cap);
1264
1265         dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1266              inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1267              ceph_cap_string(used), ceph_cap_string(wanted));
1268         if (cap == ci->i_auth_cap) {
1269                 if (ci->i_dirty_caps | ci->i_flushing_caps)
1270                         goto out;
1271                 if ((used | wanted) & CEPH_CAP_ANY_WR)
1272                         goto out;
1273         }
1274         if ((used | wanted) & ~oissued & mine)
1275                 goto out;   /* we need these caps */
1276
1277         session->s_trim_caps--;
1278         if (oissued) {
1279                 /* we aren't the only cap.. just remove us */
1280                 __ceph_remove_cap(cap, true);
1281         } else {
1282                 /* try to drop referring dentries */
1283                 spin_unlock(&ci->i_ceph_lock);
1284                 d_prune_aliases(inode);
1285                 dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
1286                      inode, cap, atomic_read(&inode->i_count));
1287                 return 0;
1288         }
1289
1290 out:
1291         spin_unlock(&ci->i_ceph_lock);
1292         return 0;
1293 }
1294
1295 /*
1296  * Trim session cap count down to some max number.
1297  */
1298 static int trim_caps(struct ceph_mds_client *mdsc,
1299                      struct ceph_mds_session *session,
1300                      int max_caps)
1301 {
1302         int trim_caps = session->s_nr_caps - max_caps;
1303
1304         dout("trim_caps mds%d start: %d / %d, trim %d\n",
1305              session->s_mds, session->s_nr_caps, max_caps, trim_caps);
1306         if (trim_caps > 0) {
1307                 session->s_trim_caps = trim_caps;
1308                 iterate_session_caps(session, trim_caps_cb, session);
1309                 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1310                      session->s_mds, session->s_nr_caps, max_caps,
1311                         trim_caps - session->s_trim_caps);
1312                 session->s_trim_caps = 0;
1313         }
1314
1315         ceph_add_cap_releases(mdsc, session);
1316         ceph_send_cap_releases(mdsc, session);
1317         return 0;
1318 }
1319
1320 /*
1321  * Allocate cap_release messages.  If there is a partially full message
1322  * in the queue, try to allocate enough to cover it's remainder, so that
1323  * we can send it immediately.
1324  *
1325  * Called under s_mutex.
1326  */
1327 int ceph_add_cap_releases(struct ceph_mds_client *mdsc,
1328                           struct ceph_mds_session *session)
1329 {
1330         struct ceph_msg *msg, *partial = NULL;
1331         struct ceph_mds_cap_release *head;
1332         int err = -ENOMEM;
1333         int extra = mdsc->fsc->mount_options->cap_release_safety;
1334         int num;
1335
1336         dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds,
1337              extra);
1338
1339         spin_lock(&session->s_cap_lock);
1340
1341         if (!list_empty(&session->s_cap_releases)) {
1342                 msg = list_first_entry(&session->s_cap_releases,
1343                                        struct ceph_msg,
1344                                  list_head);
1345                 head = msg->front.iov_base;
1346                 num = le32_to_cpu(head->num);
1347                 if (num) {
1348                         dout(" partial %p with (%d/%d)\n", msg, num,
1349                              (int)CEPH_CAPS_PER_RELEASE);
1350                         extra += CEPH_CAPS_PER_RELEASE - num;
1351                         partial = msg;
1352                 }
1353         }
1354         while (session->s_num_cap_releases < session->s_nr_caps + extra) {
1355                 spin_unlock(&session->s_cap_lock);
1356                 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE,
1357                                    GFP_NOFS, false);
1358                 if (!msg)
1359                         goto out_unlocked;
1360                 dout("add_cap_releases %p msg %p now %d\n", session, msg,
1361                      (int)msg->front.iov_len);
1362                 head = msg->front.iov_base;
1363                 head->num = cpu_to_le32(0);
1364                 msg->front.iov_len = sizeof(*head);
1365                 spin_lock(&session->s_cap_lock);
1366                 list_add(&msg->list_head, &session->s_cap_releases);
1367                 session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE;
1368         }
1369
1370         if (partial) {
1371                 head = partial->front.iov_base;
1372                 num = le32_to_cpu(head->num);
1373                 dout(" queueing partial %p with %d/%d\n", partial, num,
1374                      (int)CEPH_CAPS_PER_RELEASE);
1375                 list_move_tail(&partial->list_head,
1376                                &session->s_cap_releases_done);
1377                 session->s_num_cap_releases -= CEPH_CAPS_PER_RELEASE - num;
1378         }
1379         err = 0;
1380         spin_unlock(&session->s_cap_lock);
1381 out_unlocked:
1382         return err;
1383 }
1384
1385 /*
1386  * flush all dirty inode data to disk.
1387  *
1388  * returns true if we've flushed through want_flush_seq
1389  */
1390 static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
1391 {
1392         int mds, ret = 1;
1393
1394         dout("check_cap_flush want %lld\n", want_flush_seq);
1395         mutex_lock(&mdsc->mutex);
1396         for (mds = 0; ret && mds < mdsc->max_sessions; mds++) {
1397                 struct ceph_mds_session *session = mdsc->sessions[mds];
1398
1399                 if (!session)
1400                         continue;
1401                 get_session(session);
1402                 mutex_unlock(&mdsc->mutex);
1403
1404                 mutex_lock(&session->s_mutex);
1405                 if (!list_empty(&session->s_cap_flushing)) {
1406                         struct ceph_inode_info *ci =
1407                                 list_entry(session->s_cap_flushing.next,
1408                                            struct ceph_inode_info,
1409                                            i_flushing_item);
1410                         struct inode *inode = &ci->vfs_inode;
1411
1412                         spin_lock(&ci->i_ceph_lock);
1413                         if (ci->i_cap_flush_seq <= want_flush_seq) {
1414                                 dout("check_cap_flush still flushing %p "
1415                                      "seq %lld <= %lld to mds%d\n", inode,
1416                                      ci->i_cap_flush_seq, want_flush_seq,
1417                                      session->s_mds);
1418                                 ret = 0;
1419                         }
1420                         spin_unlock(&ci->i_ceph_lock);
1421                 }
1422                 mutex_unlock(&session->s_mutex);
1423                 ceph_put_mds_session(session);
1424
1425                 if (!ret)
1426                         return ret;
1427                 mutex_lock(&mdsc->mutex);
1428         }
1429
1430         mutex_unlock(&mdsc->mutex);
1431         dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq);
1432         return ret;
1433 }
1434
1435 /*
1436  * called under s_mutex
1437  */
1438 void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
1439                             struct ceph_mds_session *session)
1440 {
1441         struct ceph_msg *msg;
1442
1443         dout("send_cap_releases mds%d\n", session->s_mds);
1444         spin_lock(&session->s_cap_lock);
1445         while (!list_empty(&session->s_cap_releases_done)) {
1446                 msg = list_first_entry(&session->s_cap_releases_done,
1447                                  struct ceph_msg, list_head);
1448                 list_del_init(&msg->list_head);
1449                 spin_unlock(&session->s_cap_lock);
1450                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1451                 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1452                 ceph_con_send(&session->s_con, msg);
1453                 spin_lock(&session->s_cap_lock);
1454         }
1455         spin_unlock(&session->s_cap_lock);
1456 }
1457
1458 static void discard_cap_releases(struct ceph_mds_client *mdsc,
1459                                  struct ceph_mds_session *session)
1460 {
1461         struct ceph_msg *msg;
1462         struct ceph_mds_cap_release *head;
1463         unsigned num;
1464
1465         dout("discard_cap_releases mds%d\n", session->s_mds);
1466
1467         if (!list_empty(&session->s_cap_releases)) {
1468                 /* zero out the in-progress message */
1469                 msg = list_first_entry(&session->s_cap_releases,
1470                                         struct ceph_msg, list_head);
1471                 head = msg->front.iov_base;
1472                 num = le32_to_cpu(head->num);
1473                 dout("discard_cap_releases mds%d %p %u\n",
1474                      session->s_mds, msg, num);
1475                 head->num = cpu_to_le32(0);
1476                 msg->front.iov_len = sizeof(*head);
1477                 session->s_num_cap_releases += num;
1478         }
1479
1480         /* requeue completed messages */
1481         while (!list_empty(&session->s_cap_releases_done)) {
1482                 msg = list_first_entry(&session->s_cap_releases_done,
1483                                  struct ceph_msg, list_head);
1484                 list_del_init(&msg->list_head);
1485
1486                 head = msg->front.iov_base;
1487                 num = le32_to_cpu(head->num);
1488                 dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg,
1489                      num);
1490                 session->s_num_cap_releases += num;
1491                 head->num = cpu_to_le32(0);
1492                 msg->front.iov_len = sizeof(*head);
1493                 list_add(&msg->list_head, &session->s_cap_releases);
1494         }
1495 }
1496
1497 /*
1498  * requests
1499  */
1500
1501 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
1502                                     struct inode *dir)
1503 {
1504         struct ceph_inode_info *ci = ceph_inode(dir);
1505         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1506         struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
1507         size_t size = sizeof(*rinfo->dir_in) + sizeof(*rinfo->dir_dname_len) +
1508                       sizeof(*rinfo->dir_dname) + sizeof(*rinfo->dir_dlease);
1509         int order, num_entries;
1510
1511         spin_lock(&ci->i_ceph_lock);
1512         num_entries = ci->i_files + ci->i_subdirs;
1513         spin_unlock(&ci->i_ceph_lock);
1514         num_entries = max(num_entries, 1);
1515         num_entries = min(num_entries, opt->max_readdir);
1516
1517         order = get_order(size * num_entries);
1518         while (order >= 0) {
1519                 rinfo->dir_in = (void*)__get_free_pages(GFP_NOFS | __GFP_NOWARN,
1520                                                         order);
1521                 if (rinfo->dir_in)
1522                         break;
1523                 order--;
1524         }
1525         if (!rinfo->dir_in)
1526                 return -ENOMEM;
1527
1528         num_entries = (PAGE_SIZE << order) / size;
1529         num_entries = min(num_entries, opt->max_readdir);
1530
1531         rinfo->dir_buf_size = PAGE_SIZE << order;
1532         req->r_num_caps = num_entries + 1;
1533         req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
1534         req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
1535         return 0;
1536 }
1537
1538 /*
1539  * Create an mds request.
1540  */
1541 struct ceph_mds_request *
1542 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
1543 {
1544         struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
1545
1546         if (!req)
1547                 return ERR_PTR(-ENOMEM);
1548
1549         mutex_init(&req->r_fill_mutex);
1550         req->r_mdsc = mdsc;
1551         req->r_started = jiffies;
1552         req->r_resend_mds = -1;
1553         INIT_LIST_HEAD(&req->r_unsafe_dir_item);
1554         req->r_fmode = -1;
1555         kref_init(&req->r_kref);
1556         INIT_LIST_HEAD(&req->r_wait);
1557         init_completion(&req->r_completion);
1558         init_completion(&req->r_safe_completion);
1559         INIT_LIST_HEAD(&req->r_unsafe_item);
1560
1561         req->r_stamp = CURRENT_TIME;
1562
1563         req->r_op = op;
1564         req->r_direct_mode = mode;
1565         return req;
1566 }
1567
1568 /*
1569  * return oldest (lowest) request, tid in request tree, 0 if none.
1570  *
1571  * called under mdsc->mutex.
1572  */
1573 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
1574 {
1575         if (RB_EMPTY_ROOT(&mdsc->request_tree))
1576                 return NULL;
1577         return rb_entry(rb_first(&mdsc->request_tree),
1578                         struct ceph_mds_request, r_node);
1579 }
1580
1581 static u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
1582 {
1583         struct ceph_mds_request *req = __get_oldest_req(mdsc);
1584
1585         if (req)
1586                 return req->r_tid;
1587         return 0;
1588 }
1589
1590 /*
1591  * Build a dentry's path.  Allocate on heap; caller must kfree.  Based
1592  * on build_path_from_dentry in fs/cifs/dir.c.
1593  *
1594  * If @stop_on_nosnap, generate path relative to the first non-snapped
1595  * inode.
1596  *
1597  * Encode hidden .snap dirs as a double /, i.e.
1598  *   foo/.snap/bar -> foo//bar
1599  */
1600 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
1601                            int stop_on_nosnap)
1602 {
1603         struct dentry *temp;
1604         char *path;
1605         int len, pos;
1606         unsigned seq;
1607
1608         if (dentry == NULL)
1609                 return ERR_PTR(-EINVAL);
1610
1611 retry:
1612         len = 0;
1613         seq = read_seqbegin(&rename_lock);
1614         rcu_read_lock();
1615         for (temp = dentry; !IS_ROOT(temp);) {
1616                 struct inode *inode = temp->d_inode;
1617                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
1618                         len++;  /* slash only */
1619                 else if (stop_on_nosnap && inode &&
1620                          ceph_snap(inode) == CEPH_NOSNAP)
1621                         break;
1622                 else
1623                         len += 1 + temp->d_name.len;
1624                 temp = temp->d_parent;
1625         }
1626         rcu_read_unlock();
1627         if (len)
1628                 len--;  /* no leading '/' */
1629
1630         path = kmalloc(len+1, GFP_NOFS);
1631         if (path == NULL)
1632                 return ERR_PTR(-ENOMEM);
1633         pos = len;
1634         path[pos] = 0;  /* trailing null */
1635         rcu_read_lock();
1636         for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
1637                 struct inode *inode;
1638
1639                 spin_lock(&temp->d_lock);
1640                 inode = temp->d_inode;
1641                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
1642                         dout("build_path path+%d: %p SNAPDIR\n",
1643                              pos, temp);
1644                 } else if (stop_on_nosnap && inode &&
1645                            ceph_snap(inode) == CEPH_NOSNAP) {
1646                         spin_unlock(&temp->d_lock);
1647                         break;
1648                 } else {
1649                         pos -= temp->d_name.len;
1650                         if (pos < 0) {
1651                                 spin_unlock(&temp->d_lock);
1652                                 break;
1653                         }
1654                         strncpy(path + pos, temp->d_name.name,
1655                                 temp->d_name.len);
1656                 }
1657                 spin_unlock(&temp->d_lock);
1658                 if (pos)
1659                         path[--pos] = '/';
1660                 temp = temp->d_parent;
1661         }
1662         rcu_read_unlock();
1663         if (pos != 0 || read_seqretry(&rename_lock, seq)) {
1664                 pr_err("build_path did not end path lookup where "
1665                        "expected, namelen is %d, pos is %d\n", len, pos);
1666                 /* presumably this is only possible if racing with a
1667                    rename of one of the parent directories (we can not
1668                    lock the dentries above us to prevent this, but
1669                    retrying should be harmless) */
1670                 kfree(path);
1671                 goto retry;
1672         }
1673
1674         *base = ceph_ino(temp->d_inode);
1675         *plen = len;
1676         dout("build_path on %p %d built %llx '%.*s'\n",
1677              dentry, d_count(dentry), *base, len, path);
1678         return path;
1679 }
1680
1681 static int build_dentry_path(struct dentry *dentry,
1682                              const char **ppath, int *ppathlen, u64 *pino,
1683                              int *pfreepath)
1684 {
1685         char *path;
1686
1687         if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) {
1688                 *pino = ceph_ino(dentry->d_parent->d_inode);
1689                 *ppath = dentry->d_name.name;
1690                 *ppathlen = dentry->d_name.len;
1691                 return 0;
1692         }
1693         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1694         if (IS_ERR(path))
1695                 return PTR_ERR(path);
1696         *ppath = path;
1697         *pfreepath = 1;
1698         return 0;
1699 }
1700
1701 static int build_inode_path(struct inode *inode,
1702                             const char **ppath, int *ppathlen, u64 *pino,
1703                             int *pfreepath)
1704 {
1705         struct dentry *dentry;
1706         char *path;
1707
1708         if (ceph_snap(inode) == CEPH_NOSNAP) {
1709                 *pino = ceph_ino(inode);
1710                 *ppathlen = 0;
1711                 return 0;
1712         }
1713         dentry = d_find_alias(inode);
1714         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
1715         dput(dentry);
1716         if (IS_ERR(path))
1717                 return PTR_ERR(path);
1718         *ppath = path;
1719         *pfreepath = 1;
1720         return 0;
1721 }
1722
1723 /*
1724  * request arguments may be specified via an inode *, a dentry *, or
1725  * an explicit ino+path.
1726  */
1727 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1728                                   const char *rpath, u64 rino,
1729                                   const char **ppath, int *pathlen,
1730                                   u64 *ino, int *freepath)
1731 {
1732         int r = 0;
1733
1734         if (rinode) {
1735                 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
1736                 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
1737                      ceph_snap(rinode));
1738         } else if (rdentry) {
1739                 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
1740                 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1741                      *ppath);
1742         } else if (rpath || rino) {
1743                 *ino = rino;
1744                 *ppath = rpath;
1745                 *pathlen = rpath ? strlen(rpath) : 0;
1746                 dout(" path %.*s\n", *pathlen, rpath);
1747         }
1748
1749         return r;
1750 }
1751
1752 /*
1753  * called under mdsc->mutex
1754  */
1755 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1756                                                struct ceph_mds_request *req,
1757                                                int mds)
1758 {
1759         struct ceph_msg *msg;
1760         struct ceph_mds_request_head *head;
1761         const char *path1 = NULL;
1762         const char *path2 = NULL;
1763         u64 ino1 = 0, ino2 = 0;
1764         int pathlen1 = 0, pathlen2 = 0;
1765         int freepath1 = 0, freepath2 = 0;
1766         int len;
1767         u16 releases;
1768         void *p, *end;
1769         int ret;
1770
1771         ret = set_request_path_attr(req->r_inode, req->r_dentry,
1772                               req->r_path1, req->r_ino1.ino,
1773                               &path1, &pathlen1, &ino1, &freepath1);
1774         if (ret < 0) {
1775                 msg = ERR_PTR(ret);
1776                 goto out;
1777         }
1778
1779         ret = set_request_path_attr(NULL, req->r_old_dentry,
1780                               req->r_path2, req->r_ino2.ino,
1781                               &path2, &pathlen2, &ino2, &freepath2);
1782         if (ret < 0) {
1783                 msg = ERR_PTR(ret);
1784                 goto out_free1;
1785         }
1786
1787         len = sizeof(*head) +
1788                 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
1789                 sizeof(struct timespec);
1790
1791         /* calculate (max) length for cap releases */
1792         len += sizeof(struct ceph_mds_request_release) *
1793                 (!!req->r_inode_drop + !!req->r_dentry_drop +
1794                  !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
1795         if (req->r_dentry_drop)
1796                 len += req->r_dentry->d_name.len;
1797         if (req->r_old_dentry_drop)
1798                 len += req->r_old_dentry->d_name.len;
1799
1800         msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS, false);
1801         if (!msg) {
1802                 msg = ERR_PTR(-ENOMEM);
1803                 goto out_free2;
1804         }
1805
1806         msg->hdr.version = 2;
1807         msg->hdr.tid = cpu_to_le64(req->r_tid);
1808
1809         head = msg->front.iov_base;
1810         p = msg->front.iov_base + sizeof(*head);
1811         end = msg->front.iov_base + msg->front.iov_len;
1812
1813         head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
1814         head->op = cpu_to_le32(req->r_op);
1815         head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
1816         head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
1817         head->args = req->r_args;
1818
1819         ceph_encode_filepath(&p, end, ino1, path1);
1820         ceph_encode_filepath(&p, end, ino2, path2);
1821
1822         /* make note of release offset, in case we need to replay */
1823         req->r_request_release_offset = p - msg->front.iov_base;
1824
1825         /* cap releases */
1826         releases = 0;
1827         if (req->r_inode_drop)
1828                 releases += ceph_encode_inode_release(&p,
1829                       req->r_inode ? req->r_inode : req->r_dentry->d_inode,
1830                       mds, req->r_inode_drop, req->r_inode_unless, 0);
1831         if (req->r_dentry_drop)
1832                 releases += ceph_encode_dentry_release(&p, req->r_dentry,
1833                        mds, req->r_dentry_drop, req->r_dentry_unless);
1834         if (req->r_old_dentry_drop)
1835                 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
1836                        mds, req->r_old_dentry_drop, req->r_old_dentry_unless);
1837         if (req->r_old_inode_drop)
1838                 releases += ceph_encode_inode_release(&p,
1839                       req->r_old_dentry->d_inode,
1840                       mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
1841         head->num_releases = cpu_to_le16(releases);
1842
1843         /* time stamp */
1844         ceph_encode_copy(&p, &req->r_stamp, sizeof(req->r_stamp));
1845
1846         BUG_ON(p > end);
1847         msg->front.iov_len = p - msg->front.iov_base;
1848         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1849
1850         if (req->r_data_len) {
1851                 /* outbound data set only by ceph_sync_setxattr() */
1852                 BUG_ON(!req->r_pages);
1853                 ceph_msg_data_add_pages(msg, req->r_pages, req->r_data_len, 0);
1854         }
1855
1856         msg->hdr.data_len = cpu_to_le32(req->r_data_len);
1857         msg->hdr.data_off = cpu_to_le16(0);
1858
1859 out_free2:
1860         if (freepath2)
1861                 kfree((char *)path2);
1862 out_free1:
1863         if (freepath1)
1864                 kfree((char *)path1);
1865 out:
1866         return msg;
1867 }
1868
1869 /*
1870  * called under mdsc->mutex if error, under no mutex if
1871  * success.
1872  */
1873 static void complete_request(struct ceph_mds_client *mdsc,
1874                              struct ceph_mds_request *req)
1875 {
1876         if (req->r_callback)
1877                 req->r_callback(mdsc, req);
1878         else
1879                 complete_all(&req->r_completion);
1880 }
1881
1882 /*
1883  * called under mdsc->mutex
1884  */
1885 static int __prepare_send_request(struct ceph_mds_client *mdsc,
1886                                   struct ceph_mds_request *req,
1887                                   int mds)
1888 {
1889         struct ceph_mds_request_head *rhead;
1890         struct ceph_msg *msg;
1891         int flags = 0;
1892
1893         req->r_attempts++;
1894         if (req->r_inode) {
1895                 struct ceph_cap *cap =
1896                         ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
1897
1898                 if (cap)
1899                         req->r_sent_on_mseq = cap->mseq;
1900                 else
1901                         req->r_sent_on_mseq = -1;
1902         }
1903         dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
1904              req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
1905
1906         if (req->r_got_unsafe) {
1907                 /*
1908                  * Replay.  Do not regenerate message (and rebuild
1909                  * paths, etc.); just use the original message.
1910                  * Rebuilding paths will break for renames because
1911                  * d_move mangles the src name.
1912                  */
1913                 msg = req->r_request;
1914                 rhead = msg->front.iov_base;
1915
1916                 flags = le32_to_cpu(rhead->flags);
1917                 flags |= CEPH_MDS_FLAG_REPLAY;
1918                 rhead->flags = cpu_to_le32(flags);
1919
1920                 if (req->r_target_inode)
1921                         rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
1922
1923                 rhead->num_retry = req->r_attempts - 1;
1924
1925                 /* remove cap/dentry releases from message */
1926                 rhead->num_releases = 0;
1927                 msg->hdr.front_len = cpu_to_le32(req->r_request_release_offset);
1928                 msg->front.iov_len = req->r_request_release_offset;
1929                 return 0;
1930         }
1931
1932         if (req->r_request) {
1933                 ceph_msg_put(req->r_request);
1934                 req->r_request = NULL;
1935         }
1936         msg = create_request_message(mdsc, req, mds);
1937         if (IS_ERR(msg)) {
1938                 req->r_err = PTR_ERR(msg);
1939                 complete_request(mdsc, req);
1940                 return PTR_ERR(msg);
1941         }
1942         req->r_request = msg;
1943
1944         rhead = msg->front.iov_base;
1945         rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
1946         if (req->r_got_unsafe)
1947                 flags |= CEPH_MDS_FLAG_REPLAY;
1948         if (req->r_locked_dir)
1949                 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
1950         rhead->flags = cpu_to_le32(flags);
1951         rhead->num_fwd = req->r_num_fwd;
1952         rhead->num_retry = req->r_attempts - 1;
1953         rhead->ino = 0;
1954
1955         dout(" r_locked_dir = %p\n", req->r_locked_dir);
1956         return 0;
1957 }
1958
1959 /*
1960  * send request, or put it on the appropriate wait list.
1961  */
1962 static int __do_request(struct ceph_mds_client *mdsc,
1963                         struct ceph_mds_request *req)
1964 {
1965         struct ceph_mds_session *session = NULL;
1966         int mds = -1;
1967         int err = -EAGAIN;
1968
1969         if (req->r_err || req->r_got_result) {
1970                 if (req->r_aborted)
1971                         __unregister_request(mdsc, req);
1972                 goto out;
1973         }
1974
1975         if (req->r_timeout &&
1976             time_after_eq(jiffies, req->r_started + req->r_timeout)) {
1977                 dout("do_request timed out\n");
1978                 err = -EIO;
1979                 goto finish;
1980         }
1981
1982         put_request_session(req);
1983
1984         mds = __choose_mds(mdsc, req);
1985         if (mds < 0 ||
1986             ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
1987                 dout("do_request no mds or not active, waiting for map\n");
1988                 list_add(&req->r_wait, &mdsc->waiting_for_map);
1989                 goto out;
1990         }
1991
1992         /* get, open session */
1993         session = __ceph_lookup_mds_session(mdsc, mds);
1994         if (!session) {
1995                 session = register_session(mdsc, mds);
1996                 if (IS_ERR(session)) {
1997                         err = PTR_ERR(session);
1998                         goto finish;
1999                 }
2000         }
2001         req->r_session = get_session(session);
2002
2003         dout("do_request mds%d session %p state %s\n", mds, session,
2004              session_state_name(session->s_state));
2005         if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2006             session->s_state != CEPH_MDS_SESSION_HUNG) {
2007                 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2008                     session->s_state == CEPH_MDS_SESSION_CLOSING)
2009                         __open_session(mdsc, session);
2010                 list_add(&req->r_wait, &session->s_waiting);
2011                 goto out_session;
2012         }
2013
2014         /* send request */
2015         req->r_resend_mds = -1;   /* forget any previous mds hint */
2016
2017         if (req->r_request_started == 0)   /* note request start time */
2018                 req->r_request_started = jiffies;
2019
2020         err = __prepare_send_request(mdsc, req, mds);
2021         if (!err) {
2022                 ceph_msg_get(req->r_request);
2023                 ceph_con_send(&session->s_con, req->r_request);
2024         }
2025
2026 out_session:
2027         ceph_put_mds_session(session);
2028 out:
2029         return err;
2030
2031 finish:
2032         req->r_err = err;
2033         complete_request(mdsc, req);
2034         goto out;
2035 }
2036
2037 /*
2038  * called under mdsc->mutex
2039  */
2040 static void __wake_requests(struct ceph_mds_client *mdsc,
2041                             struct list_head *head)
2042 {
2043         struct ceph_mds_request *req;
2044         LIST_HEAD(tmp_list);
2045
2046         list_splice_init(head, &tmp_list);
2047
2048         while (!list_empty(&tmp_list)) {
2049                 req = list_entry(tmp_list.next,
2050                                  struct ceph_mds_request, r_wait);
2051                 list_del_init(&req->r_wait);
2052                 dout(" wake request %p tid %llu\n", req, req->r_tid);
2053                 __do_request(mdsc, req);
2054         }
2055 }
2056
2057 /*
2058  * Wake up threads with requests pending for @mds, so that they can
2059  * resubmit their requests to a possibly different mds.
2060  */
2061 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2062 {
2063         struct ceph_mds_request *req;
2064         struct rb_node *p;
2065
2066         dout("kick_requests mds%d\n", mds);
2067         for (p = rb_first(&mdsc->request_tree); p; p = rb_next(p)) {
2068                 req = rb_entry(p, struct ceph_mds_request, r_node);
2069                 if (req->r_got_unsafe)
2070                         continue;
2071                 if (req->r_session &&
2072                     req->r_session->s_mds == mds) {
2073                         dout(" kicking tid %llu\n", req->r_tid);
2074                         __do_request(mdsc, req);
2075                 }
2076         }
2077 }
2078
2079 void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
2080                               struct ceph_mds_request *req)
2081 {
2082         dout("submit_request on %p\n", req);
2083         mutex_lock(&mdsc->mutex);
2084         __register_request(mdsc, req, NULL);
2085         __do_request(mdsc, req);
2086         mutex_unlock(&mdsc->mutex);
2087 }
2088
2089 /*
2090  * Synchrously perform an mds request.  Take care of all of the
2091  * session setup, forwarding, retry details.
2092  */
2093 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
2094                          struct inode *dir,
2095                          struct ceph_mds_request *req)
2096 {
2097         int err;
2098
2099         dout("do_request on %p\n", req);
2100
2101         /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
2102         if (req->r_inode)
2103                 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
2104         if (req->r_locked_dir)
2105                 ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
2106         if (req->r_old_dentry_dir)
2107                 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2108                                   CEPH_CAP_PIN);
2109
2110         /* issue */
2111         mutex_lock(&mdsc->mutex);
2112         __register_request(mdsc, req, dir);
2113         __do_request(mdsc, req);
2114
2115         if (req->r_err) {
2116                 err = req->r_err;
2117                 __unregister_request(mdsc, req);
2118                 dout("do_request early error %d\n", err);
2119                 goto out;
2120         }
2121
2122         /* wait */
2123         mutex_unlock(&mdsc->mutex);
2124         dout("do_request waiting\n");
2125         if (req->r_timeout) {
2126                 err = (long)wait_for_completion_killable_timeout(
2127                         &req->r_completion, req->r_timeout);
2128                 if (err == 0)
2129                         err = -EIO;
2130         } else {
2131                 err = wait_for_completion_killable(&req->r_completion);
2132         }
2133         dout("do_request waited, got %d\n", err);
2134         mutex_lock(&mdsc->mutex);
2135
2136         /* only abort if we didn't race with a real reply */
2137         if (req->r_got_result) {
2138                 err = le32_to_cpu(req->r_reply_info.head->result);
2139         } else if (err < 0) {
2140                 dout("aborted request %lld with %d\n", req->r_tid, err);
2141
2142                 /*
2143                  * ensure we aren't running concurrently with
2144                  * ceph_fill_trace or ceph_readdir_prepopulate, which
2145                  * rely on locks (dir mutex) held by our caller.
2146                  */
2147                 mutex_lock(&req->r_fill_mutex);
2148                 req->r_err = err;
2149                 req->r_aborted = true;
2150                 mutex_unlock(&req->r_fill_mutex);
2151
2152                 if (req->r_locked_dir &&
2153                     (req->r_op & CEPH_MDS_OP_WRITE))
2154                         ceph_invalidate_dir_request(req);
2155         } else {
2156                 err = req->r_err;
2157         }
2158
2159 out:
2160         mutex_unlock(&mdsc->mutex);
2161         dout("do_request %p done, result %d\n", req, err);
2162         return err;
2163 }
2164
2165 /*
2166  * Invalidate dir's completeness, dentry lease state on an aborted MDS
2167  * namespace request.
2168  */
2169 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
2170 {
2171         struct inode *inode = req->r_locked_dir;
2172
2173         dout("invalidate_dir_request %p (complete, lease(s))\n", inode);
2174
2175         ceph_dir_clear_complete(inode);
2176         if (req->r_dentry)
2177                 ceph_invalidate_dentry_lease(req->r_dentry);
2178         if (req->r_old_dentry)
2179                 ceph_invalidate_dentry_lease(req->r_old_dentry);
2180 }
2181
2182 /*
2183  * Handle mds reply.
2184  *
2185  * We take the session mutex and parse and process the reply immediately.
2186  * This preserves the logical ordering of replies, capabilities, etc., sent
2187  * by the MDS as they are applied to our local cache.
2188  */
2189 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2190 {
2191         struct ceph_mds_client *mdsc = session->s_mdsc;
2192         struct ceph_mds_request *req;
2193         struct ceph_mds_reply_head *head = msg->front.iov_base;
2194         struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
2195         u64 tid;
2196         int err, result;
2197         int mds = session->s_mds;
2198
2199         if (msg->front.iov_len < sizeof(*head)) {
2200                 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2201                 ceph_msg_dump(msg);
2202                 return;
2203         }
2204
2205         /* get request, session */
2206         tid = le64_to_cpu(msg->hdr.tid);
2207         mutex_lock(&mdsc->mutex);
2208         req = __lookup_request(mdsc, tid);
2209         if (!req) {
2210                 dout("handle_reply on unknown tid %llu\n", tid);
2211                 mutex_unlock(&mdsc->mutex);
2212                 return;
2213         }
2214         dout("handle_reply %p\n", req);
2215
2216         /* correct session? */
2217         if (req->r_session != session) {
2218                 pr_err("mdsc_handle_reply got %llu on session mds%d"
2219                        " not mds%d\n", tid, session->s_mds,
2220                        req->r_session ? req->r_session->s_mds : -1);
2221                 mutex_unlock(&mdsc->mutex);
2222                 goto out;
2223         }
2224
2225         /* dup? */
2226         if ((req->r_got_unsafe && !head->safe) ||
2227             (req->r_got_safe && head->safe)) {
2228                 pr_warn("got a dup %s reply on %llu from mds%d\n",
2229                            head->safe ? "safe" : "unsafe", tid, mds);
2230                 mutex_unlock(&mdsc->mutex);
2231                 goto out;
2232         }
2233         if (req->r_got_safe && !head->safe) {
2234                 pr_warn("got unsafe after safe on %llu from mds%d\n",
2235                            tid, mds);
2236                 mutex_unlock(&mdsc->mutex);
2237                 goto out;
2238         }
2239
2240         result = le32_to_cpu(head->result);
2241
2242         /*
2243          * Handle an ESTALE
2244          * if we're not talking to the authority, send to them
2245          * if the authority has changed while we weren't looking,
2246          * send to new authority
2247          * Otherwise we just have to return an ESTALE
2248          */
2249         if (result == -ESTALE) {
2250                 dout("got ESTALE on request %llu", req->r_tid);
2251                 if (req->r_direct_mode != USE_AUTH_MDS) {
2252                         dout("not using auth, setting for that now");
2253                         req->r_direct_mode = USE_AUTH_MDS;
2254                         __do_request(mdsc, req);
2255                         mutex_unlock(&mdsc->mutex);
2256                         goto out;
2257                 } else  {
2258                         int mds = __choose_mds(mdsc, req);
2259                         if (mds >= 0 && mds != req->r_session->s_mds) {
2260                                 dout("but auth changed, so resending");
2261                                 __do_request(mdsc, req);
2262                                 mutex_unlock(&mdsc->mutex);
2263                                 goto out;
2264                         }
2265                 }
2266                 dout("have to return ESTALE on request %llu", req->r_tid);
2267         }
2268
2269
2270         if (head->safe) {
2271                 req->r_got_safe = true;
2272                 __unregister_request(mdsc, req);
2273
2274                 if (req->r_got_unsafe) {
2275                         /*
2276                          * We already handled the unsafe response, now do the
2277                          * cleanup.  No need to examine the response; the MDS
2278                          * doesn't include any result info in the safe
2279                          * response.  And even if it did, there is nothing
2280                          * useful we could do with a revised return value.
2281                          */
2282                         dout("got safe reply %llu, mds%d\n", tid, mds);
2283                         list_del_init(&req->r_unsafe_item);
2284
2285                         /* last unsafe request during umount? */
2286                         if (mdsc->stopping && !__get_oldest_req(mdsc))
2287                                 complete_all(&mdsc->safe_umount_waiters);
2288                         mutex_unlock(&mdsc->mutex);
2289                         goto out;
2290                 }
2291         } else {
2292                 req->r_got_unsafe = true;
2293                 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
2294         }
2295
2296         dout("handle_reply tid %lld result %d\n", tid, result);
2297         rinfo = &req->r_reply_info;
2298         err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
2299         mutex_unlock(&mdsc->mutex);
2300
2301         mutex_lock(&session->s_mutex);
2302         if (err < 0) {
2303                 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
2304                 ceph_msg_dump(msg);
2305                 goto out_err;
2306         }
2307
2308         /* snap trace */
2309         if (rinfo->snapblob_len) {
2310                 down_write(&mdsc->snap_rwsem);
2311                 ceph_update_snap_trace(mdsc, rinfo->snapblob,
2312                                rinfo->snapblob + rinfo->snapblob_len,
2313                                le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP);
2314                 downgrade_write(&mdsc->snap_rwsem);
2315         } else {
2316                 down_read(&mdsc->snap_rwsem);
2317         }
2318
2319         /* insert trace into our cache */
2320         mutex_lock(&req->r_fill_mutex);
2321         err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
2322         if (err == 0) {
2323                 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
2324                                     req->r_op == CEPH_MDS_OP_LSSNAP))
2325                         ceph_readdir_prepopulate(req, req->r_session);
2326                 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
2327         }
2328         mutex_unlock(&req->r_fill_mutex);
2329
2330         up_read(&mdsc->snap_rwsem);
2331 out_err:
2332         mutex_lock(&mdsc->mutex);
2333         if (!req->r_aborted) {
2334                 if (err) {
2335                         req->r_err = err;
2336                 } else {
2337                         req->r_reply = msg;
2338                         ceph_msg_get(msg);
2339                         req->r_got_result = true;
2340                 }
2341         } else {
2342                 dout("reply arrived after request %lld was aborted\n", tid);
2343         }
2344         mutex_unlock(&mdsc->mutex);
2345
2346         ceph_add_cap_releases(mdsc, req->r_session);
2347         mutex_unlock(&session->s_mutex);
2348
2349         /* kick calling process */
2350         complete_request(mdsc, req);
2351 out:
2352         ceph_mdsc_put_request(req);
2353         return;
2354 }
2355
2356
2357
2358 /*
2359  * handle mds notification that our request has been forwarded.
2360  */
2361 static void handle_forward(struct ceph_mds_client *mdsc,
2362                            struct ceph_mds_session *session,
2363                            struct ceph_msg *msg)
2364 {
2365         struct ceph_mds_request *req;
2366         u64 tid = le64_to_cpu(msg->hdr.tid);
2367         u32 next_mds;
2368         u32 fwd_seq;
2369         int err = -EINVAL;
2370         void *p = msg->front.iov_base;
2371         void *end = p + msg->front.iov_len;
2372
2373         ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2374         next_mds = ceph_decode_32(&p);
2375         fwd_seq = ceph_decode_32(&p);
2376
2377         mutex_lock(&mdsc->mutex);
2378         req = __lookup_request(mdsc, tid);
2379         if (!req) {
2380                 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
2381                 goto out;  /* dup reply? */
2382         }
2383
2384         if (req->r_aborted) {
2385                 dout("forward tid %llu aborted, unregistering\n", tid);
2386                 __unregister_request(mdsc, req);
2387         } else if (fwd_seq <= req->r_num_fwd) {
2388                 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2389                      tid, next_mds, req->r_num_fwd, fwd_seq);
2390         } else {
2391                 /* resend. forward race not possible; mds would drop */
2392                 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
2393                 BUG_ON(req->r_err);
2394                 BUG_ON(req->r_got_result);
2395                 req->r_num_fwd = fwd_seq;
2396                 req->r_resend_mds = next_mds;
2397                 put_request_session(req);
2398                 __do_request(mdsc, req);
2399         }
2400         ceph_mdsc_put_request(req);
2401 out:
2402         mutex_unlock(&mdsc->mutex);
2403         return;
2404
2405 bad:
2406         pr_err("mdsc_handle_forward decode error err=%d\n", err);
2407 }
2408
2409 /*
2410  * handle a mds session control message
2411  */
2412 static void handle_session(struct ceph_mds_session *session,
2413                            struct ceph_msg *msg)
2414 {
2415         struct ceph_mds_client *mdsc = session->s_mdsc;
2416         u32 op;
2417         u64 seq;
2418         int mds = session->s_mds;
2419         struct ceph_mds_session_head *h = msg->front.iov_base;
2420         int wake = 0;
2421
2422         /* decode */
2423         if (msg->front.iov_len != sizeof(*h))
2424                 goto bad;
2425         op = le32_to_cpu(h->op);
2426         seq = le64_to_cpu(h->seq);
2427
2428         mutex_lock(&mdsc->mutex);
2429         if (op == CEPH_SESSION_CLOSE)
2430                 __unregister_session(mdsc, session);
2431         /* FIXME: this ttl calculation is generous */
2432         session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
2433         mutex_unlock(&mdsc->mutex);
2434
2435         mutex_lock(&session->s_mutex);
2436
2437         dout("handle_session mds%d %s %p state %s seq %llu\n",
2438              mds, ceph_session_op_name(op), session,
2439              session_state_name(session->s_state), seq);
2440
2441         if (session->s_state == CEPH_MDS_SESSION_HUNG) {
2442                 session->s_state = CEPH_MDS_SESSION_OPEN;
2443                 pr_info("mds%d came back\n", session->s_mds);
2444         }
2445
2446         switch (op) {
2447         case CEPH_SESSION_OPEN:
2448                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2449                         pr_info("mds%d reconnect success\n", session->s_mds);
2450                 session->s_state = CEPH_MDS_SESSION_OPEN;
2451                 renewed_caps(mdsc, session, 0);
2452                 wake = 1;
2453                 if (mdsc->stopping)
2454                         __close_session(mdsc, session);
2455                 break;
2456
2457         case CEPH_SESSION_RENEWCAPS:
2458                 if (session->s_renew_seq == seq)
2459                         renewed_caps(mdsc, session, 1);
2460                 break;
2461
2462         case CEPH_SESSION_CLOSE:
2463                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
2464                         pr_info("mds%d reconnect denied\n", session->s_mds);
2465                 remove_session_caps(session);
2466                 wake = 1; /* for good measure */
2467                 wake_up_all(&mdsc->session_close_wq);
2468                 kick_requests(mdsc, mds);
2469                 break;
2470
2471         case CEPH_SESSION_STALE:
2472                 pr_info("mds%d caps went stale, renewing\n",
2473                         session->s_mds);
2474                 spin_lock(&session->s_gen_ttl_lock);
2475                 session->s_cap_gen++;
2476                 session->s_cap_ttl = jiffies - 1;
2477                 spin_unlock(&session->s_gen_ttl_lock);
2478                 send_renew_caps(mdsc, session);
2479                 break;
2480
2481         case CEPH_SESSION_RECALL_STATE:
2482                 trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
2483                 break;
2484
2485         case CEPH_SESSION_FLUSHMSG:
2486                 send_flushmsg_ack(mdsc, session, seq);
2487                 break;
2488
2489         default:
2490                 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
2491                 WARN_ON(1);
2492         }
2493
2494         mutex_unlock(&session->s_mutex);
2495         if (wake) {
2496                 mutex_lock(&mdsc->mutex);
2497                 __wake_requests(mdsc, &session->s_waiting);
2498                 mutex_unlock(&mdsc->mutex);
2499         }
2500         return;
2501
2502 bad:
2503         pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
2504                (int)msg->front.iov_len);
2505         ceph_msg_dump(msg);
2506         return;
2507 }
2508
2509
2510 /*
2511  * called under session->mutex.
2512  */
2513 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
2514                                    struct ceph_mds_session *session)
2515 {
2516         struct ceph_mds_request *req, *nreq;
2517         int err;
2518
2519         dout("replay_unsafe_requests mds%d\n", session->s_mds);
2520
2521         mutex_lock(&mdsc->mutex);
2522         list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
2523                 err = __prepare_send_request(mdsc, req, session->s_mds);
2524                 if (!err) {
2525                         ceph_msg_get(req->r_request);
2526                         ceph_con_send(&session->s_con, req->r_request);
2527                 }
2528         }
2529         mutex_unlock(&mdsc->mutex);
2530 }
2531
2532 /*
2533  * Encode information about a cap for a reconnect with the MDS.
2534  */
2535 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
2536                           void *arg)
2537 {
2538         union {
2539                 struct ceph_mds_cap_reconnect v2;
2540                 struct ceph_mds_cap_reconnect_v1 v1;
2541         } rec;
2542         size_t reclen;
2543         struct ceph_inode_info *ci;
2544         struct ceph_reconnect_state *recon_state = arg;
2545         struct ceph_pagelist *pagelist = recon_state->pagelist;
2546         char *path;
2547         int pathlen, err;
2548         u64 pathbase;
2549         struct dentry *dentry;
2550
2551         ci = cap->ci;
2552
2553         dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2554              inode, ceph_vinop(inode), cap, cap->cap_id,
2555              ceph_cap_string(cap->issued));
2556         err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
2557         if (err)
2558                 return err;
2559
2560         dentry = d_find_alias(inode);
2561         if (dentry) {
2562                 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
2563                 if (IS_ERR(path)) {
2564                         err = PTR_ERR(path);
2565                         goto out_dput;
2566                 }
2567         } else {
2568                 path = NULL;
2569                 pathlen = 0;
2570         }
2571         err = ceph_pagelist_encode_string(pagelist, path, pathlen);
2572         if (err)
2573                 goto out_free;
2574
2575         spin_lock(&ci->i_ceph_lock);
2576         cap->seq = 0;        /* reset cap seq */
2577         cap->issue_seq = 0;  /* and issue_seq */
2578         cap->mseq = 0;       /* and migrate_seq */
2579         cap->cap_gen = cap->session->s_cap_gen;
2580
2581         if (recon_state->flock) {
2582                 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
2583                 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2584                 rec.v2.issued = cpu_to_le32(cap->issued);
2585                 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2586                 rec.v2.pathbase = cpu_to_le64(pathbase);
2587                 rec.v2.flock_len = 0;
2588                 reclen = sizeof(rec.v2);
2589         } else {
2590                 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
2591                 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
2592                 rec.v1.issued = cpu_to_le32(cap->issued);
2593                 rec.v1.size = cpu_to_le64(inode->i_size);
2594                 ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime);
2595                 ceph_encode_timespec(&rec.v1.atime, &inode->i_atime);
2596                 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
2597                 rec.v1.pathbase = cpu_to_le64(pathbase);
2598                 reclen = sizeof(rec.v1);
2599         }
2600         spin_unlock(&ci->i_ceph_lock);
2601
2602         if (recon_state->flock) {
2603                 int num_fcntl_locks, num_flock_locks;
2604                 struct ceph_filelock *flocks;
2605
2606 encode_again:
2607                 spin_lock(&inode->i_lock);
2608                 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
2609                 spin_unlock(&inode->i_lock);
2610                 flocks = kmalloc((num_fcntl_locks+num_flock_locks) *
2611                                  sizeof(struct ceph_filelock), GFP_NOFS);
2612                 if (!flocks) {
2613                         err = -ENOMEM;
2614                         goto out_free;
2615                 }
2616                 spin_lock(&inode->i_lock);
2617                 err = ceph_encode_locks_to_buffer(inode, flocks,
2618                                                   num_fcntl_locks,
2619                                                   num_flock_locks);
2620                 spin_unlock(&inode->i_lock);
2621                 if (err) {
2622                         kfree(flocks);
2623                         if (err == -ENOSPC)
2624                                 goto encode_again;
2625                         goto out_free;
2626                 }
2627                 /*
2628                  * number of encoded locks is stable, so copy to pagelist
2629                  */
2630                 rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) +
2631                                     (num_fcntl_locks+num_flock_locks) *
2632                                     sizeof(struct ceph_filelock));
2633                 err = ceph_pagelist_append(pagelist, &rec, reclen);
2634                 if (!err)
2635                         err = ceph_locks_to_pagelist(flocks, pagelist,
2636                                                      num_fcntl_locks,
2637                                                      num_flock_locks);
2638                 kfree(flocks);
2639         } else {
2640                 err = ceph_pagelist_append(pagelist, &rec, reclen);
2641         }
2642
2643         recon_state->nr_caps++;
2644 out_free:
2645         kfree(path);
2646 out_dput:
2647         dput(dentry);
2648         return err;
2649 }
2650
2651
2652 /*
2653  * If an MDS fails and recovers, clients need to reconnect in order to
2654  * reestablish shared state.  This includes all caps issued through
2655  * this session _and_ the snap_realm hierarchy.  Because it's not
2656  * clear which snap realms the mds cares about, we send everything we
2657  * know about.. that ensures we'll then get any new info the
2658  * recovering MDS might have.
2659  *
2660  * This is a relatively heavyweight operation, but it's rare.
2661  *
2662  * called with mdsc->mutex held.
2663  */
2664 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
2665                                struct ceph_mds_session *session)
2666 {
2667         struct ceph_msg *reply;
2668         struct rb_node *p;
2669         int mds = session->s_mds;
2670         int err = -ENOMEM;
2671         int s_nr_caps;
2672         struct ceph_pagelist *pagelist;
2673         struct ceph_reconnect_state recon_state;
2674
2675         pr_info("mds%d reconnect start\n", mds);
2676
2677         pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
2678         if (!pagelist)
2679                 goto fail_nopagelist;
2680         ceph_pagelist_init(pagelist);
2681
2682         reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS, false);
2683         if (!reply)
2684                 goto fail_nomsg;
2685
2686         mutex_lock(&session->s_mutex);
2687         session->s_state = CEPH_MDS_SESSION_RECONNECTING;
2688         session->s_seq = 0;
2689
2690         ceph_con_close(&session->s_con);
2691         ceph_con_open(&session->s_con,
2692                       CEPH_ENTITY_TYPE_MDS, mds,
2693                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
2694
2695         /* replay unsafe requests */
2696         replay_unsafe_requests(mdsc, session);
2697
2698         down_read(&mdsc->snap_rwsem);
2699
2700         dout("session %p state %s\n", session,
2701              session_state_name(session->s_state));
2702
2703         spin_lock(&session->s_gen_ttl_lock);
2704         session->s_cap_gen++;
2705         spin_unlock(&session->s_gen_ttl_lock);
2706
2707         spin_lock(&session->s_cap_lock);
2708         /*
2709          * notify __ceph_remove_cap() that we are composing cap reconnect.
2710          * If a cap get released before being added to the cap reconnect,
2711          * __ceph_remove_cap() should skip queuing cap release.
2712          */
2713         session->s_cap_reconnect = 1;
2714         /* drop old cap expires; we're about to reestablish that state */
2715         discard_cap_releases(mdsc, session);
2716         spin_unlock(&session->s_cap_lock);
2717
2718         /* traverse this session's caps */
2719         s_nr_caps = session->s_nr_caps;
2720         err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
2721         if (err)
2722                 goto fail;
2723
2724         recon_state.nr_caps = 0;
2725         recon_state.pagelist = pagelist;
2726         recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK;
2727         err = iterate_session_caps(session, encode_caps_cb, &recon_state);
2728         if (err < 0)
2729                 goto fail;
2730
2731         spin_lock(&session->s_cap_lock);
2732         session->s_cap_reconnect = 0;
2733         spin_unlock(&session->s_cap_lock);
2734
2735         /*
2736          * snaprealms.  we provide mds with the ino, seq (version), and
2737          * parent for all of our realms.  If the mds has any newer info,
2738          * it will tell us.
2739          */
2740         for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
2741                 struct ceph_snap_realm *realm =
2742                         rb_entry(p, struct ceph_snap_realm, node);
2743                 struct ceph_mds_snaprealm_reconnect sr_rec;
2744
2745                 dout(" adding snap realm %llx seq %lld parent %llx\n",
2746                      realm->ino, realm->seq, realm->parent_ino);
2747                 sr_rec.ino = cpu_to_le64(realm->ino);
2748                 sr_rec.seq = cpu_to_le64(realm->seq);
2749                 sr_rec.parent = cpu_to_le64(realm->parent_ino);
2750                 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
2751                 if (err)
2752                         goto fail;
2753         }
2754
2755         if (recon_state.flock)
2756                 reply->hdr.version = cpu_to_le16(2);
2757
2758         /* raced with cap release? */
2759         if (s_nr_caps != recon_state.nr_caps) {
2760                 struct page *page = list_first_entry(&pagelist->head,
2761                                                      struct page, lru);
2762                 __le32 *addr = kmap_atomic(page);
2763                 *addr = cpu_to_le32(recon_state.nr_caps);
2764                 kunmap_atomic(addr);
2765         }
2766
2767         reply->hdr.data_len = cpu_to_le32(pagelist->length);
2768         ceph_msg_data_add_pagelist(reply, pagelist);
2769         ceph_con_send(&session->s_con, reply);
2770
2771         mutex_unlock(&session->s_mutex);
2772
2773         mutex_lock(&mdsc->mutex);
2774         __wake_requests(mdsc, &session->s_waiting);
2775         mutex_unlock(&mdsc->mutex);
2776
2777         up_read(&mdsc->snap_rwsem);
2778         return;
2779
2780 fail:
2781         ceph_msg_put(reply);
2782         up_read(&mdsc->snap_rwsem);
2783         mutex_unlock(&session->s_mutex);
2784 fail_nomsg:
2785         ceph_pagelist_release(pagelist);
2786         kfree(pagelist);
2787 fail_nopagelist:
2788         pr_err("error %d preparing reconnect for mds%d\n", err, mds);
2789         return;
2790 }
2791
2792
2793 /*
2794  * compare old and new mdsmaps, kicking requests
2795  * and closing out old connections as necessary
2796  *
2797  * called under mdsc->mutex.
2798  */
2799 static void check_new_map(struct ceph_mds_client *mdsc,
2800                           struct ceph_mdsmap *newmap,
2801                           struct ceph_mdsmap *oldmap)
2802 {
2803         int i;
2804         int oldstate, newstate;
2805         struct ceph_mds_session *s;
2806
2807         dout("check_new_map new %u old %u\n",
2808              newmap->m_epoch, oldmap->m_epoch);
2809
2810         for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
2811                 if (mdsc->sessions[i] == NULL)
2812                         continue;
2813                 s = mdsc->sessions[i];
2814                 oldstate = ceph_mdsmap_get_state(oldmap, i);
2815                 newstate = ceph_mdsmap_get_state(newmap, i);
2816
2817                 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
2818                      i, ceph_mds_state_name(oldstate),
2819                      ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
2820                      ceph_mds_state_name(newstate),
2821                      ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
2822                      session_state_name(s->s_state));
2823
2824                 if (i >= newmap->m_max_mds ||
2825                     memcmp(ceph_mdsmap_get_addr(oldmap, i),
2826                            ceph_mdsmap_get_addr(newmap, i),
2827                            sizeof(struct ceph_entity_addr))) {
2828                         if (s->s_state == CEPH_MDS_SESSION_OPENING) {
2829                                 /* the session never opened, just close it
2830                                  * out now */
2831                                 __wake_requests(mdsc, &s->s_waiting);
2832                                 __unregister_session(mdsc, s);
2833                         } else {
2834                                 /* just close it */
2835                                 mutex_unlock(&mdsc->mutex);
2836                                 mutex_lock(&s->s_mutex);
2837                                 mutex_lock(&mdsc->mutex);
2838                                 ceph_con_close(&s->s_con);
2839                                 mutex_unlock(&s->s_mutex);
2840                                 s->s_state = CEPH_MDS_SESSION_RESTARTING;
2841                         }
2842
2843                         /* kick any requests waiting on the recovering mds */
2844                         kick_requests(mdsc, i);
2845                 } else if (oldstate == newstate) {
2846                         continue;  /* nothing new with this mds */
2847                 }
2848
2849                 /*
2850                  * send reconnect?
2851                  */
2852                 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
2853                     newstate >= CEPH_MDS_STATE_RECONNECT) {
2854                         mutex_unlock(&mdsc->mutex);
2855                         send_mds_reconnect(mdsc, s);
2856                         mutex_lock(&mdsc->mutex);
2857                 }
2858
2859                 /*
2860                  * kick request on any mds that has gone active.
2861                  */
2862                 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
2863                     newstate >= CEPH_MDS_STATE_ACTIVE) {
2864                         if (oldstate != CEPH_MDS_STATE_CREATING &&
2865                             oldstate != CEPH_MDS_STATE_STARTING)
2866                                 pr_info("mds%d recovery completed\n", s->s_mds);
2867                         kick_requests(mdsc, i);
2868                         ceph_kick_flushing_caps(mdsc, s);
2869                         wake_up_session_caps(s, 1);
2870                 }
2871         }
2872
2873         for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) {
2874                 s = mdsc->sessions[i];
2875                 if (!s)
2876                         continue;
2877                 if (!ceph_mdsmap_is_laggy(newmap, i))
2878                         continue;
2879                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
2880                     s->s_state == CEPH_MDS_SESSION_HUNG ||
2881                     s->s_state == CEPH_MDS_SESSION_CLOSING) {
2882                         dout(" connecting to export targets of laggy mds%d\n",
2883                              i);
2884                         __open_export_target_sessions(mdsc, s);
2885                 }
2886         }
2887 }
2888
2889
2890
2891 /*
2892  * leases
2893  */
2894
2895 /*
2896  * caller must hold session s_mutex, dentry->d_lock
2897  */
2898 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
2899 {
2900         struct ceph_dentry_info *di = ceph_dentry(dentry);
2901
2902         ceph_put_mds_session(di->lease_session);
2903         di->lease_session = NULL;
2904 }
2905
2906 static void handle_lease(struct ceph_mds_client *mdsc,
2907                          struct ceph_mds_session *session,
2908                          struct ceph_msg *msg)
2909 {
2910         struct super_block *sb = mdsc->fsc->sb;
2911         struct inode *inode;
2912         struct dentry *parent, *dentry;
2913         struct ceph_dentry_info *di;
2914         int mds = session->s_mds;
2915         struct ceph_mds_lease *h = msg->front.iov_base;
2916         u32 seq;
2917         struct ceph_vino vino;
2918         struct qstr dname;
2919         int release = 0;
2920
2921         dout("handle_lease from mds%d\n", mds);
2922
2923         /* decode */
2924         if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
2925                 goto bad;
2926         vino.ino = le64_to_cpu(h->ino);
2927         vino.snap = CEPH_NOSNAP;
2928         seq = le32_to_cpu(h->seq);
2929         dname.name = (void *)h + sizeof(*h) + sizeof(u32);
2930         dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
2931         if (dname.len != get_unaligned_le32(h+1))
2932                 goto bad;
2933
2934         mutex_lock(&session->s_mutex);
2935         session->s_seq++;
2936
2937         /* lookup inode */
2938         inode = ceph_find_inode(sb, vino);
2939         dout("handle_lease %s, ino %llx %p %.*s\n",
2940              ceph_lease_op_name(h->action), vino.ino, inode,
2941              dname.len, dname.name);
2942         if (inode == NULL) {
2943                 dout("handle_lease no inode %llx\n", vino.ino);
2944                 goto release;
2945         }
2946
2947         /* dentry */
2948         parent = d_find_alias(inode);
2949         if (!parent) {
2950                 dout("no parent dentry on inode %p\n", inode);
2951                 WARN_ON(1);
2952                 goto release;  /* hrm... */
2953         }
2954         dname.hash = full_name_hash(dname.name, dname.len);
2955         dentry = d_lookup(parent, &dname);
2956         dput(parent);
2957         if (!dentry)
2958                 goto release;
2959
2960         spin_lock(&dentry->d_lock);
2961         di = ceph_dentry(dentry);
2962         switch (h->action) {
2963         case CEPH_MDS_LEASE_REVOKE:
2964                 if (di->lease_session == session) {
2965                         if (ceph_seq_cmp(di->lease_seq, seq) > 0)
2966                                 h->seq = cpu_to_le32(di->lease_seq);
2967                         __ceph_mdsc_drop_dentry_lease(dentry);
2968                 }
2969                 release = 1;
2970                 break;
2971
2972         case CEPH_MDS_LEASE_RENEW:
2973                 if (di->lease_session == session &&
2974                     di->lease_gen == session->s_cap_gen &&
2975                     di->lease_renew_from &&
2976                     di->lease_renew_after == 0) {
2977                         unsigned long duration =
2978                                 le32_to_cpu(h->duration_ms) * HZ / 1000;
2979
2980                         di->lease_seq = seq;
2981                         dentry->d_time = di->lease_renew_from + duration;
2982                         di->lease_renew_after = di->lease_renew_from +
2983                                 (duration >> 1);
2984                         di->lease_renew_from = 0;
2985                 }
2986                 break;
2987         }
2988         spin_unlock(&dentry->d_lock);
2989         dput(dentry);
2990
2991         if (!release)
2992                 goto out;
2993
2994 release:
2995         /* let's just reuse the same message */
2996         h->action = CEPH_MDS_LEASE_REVOKE_ACK;
2997         ceph_msg_get(msg);
2998         ceph_con_send(&session->s_con, msg);
2999
3000 out:
3001         iput(inode);
3002         mutex_unlock(&session->s_mutex);
3003         return;
3004
3005 bad:
3006         pr_err("corrupt lease message\n");
3007         ceph_msg_dump(msg);
3008 }
3009
3010 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
3011                               struct inode *inode,
3012                               struct dentry *dentry, char action,
3013                               u32 seq)
3014 {
3015         struct ceph_msg *msg;
3016         struct ceph_mds_lease *lease;
3017         int len = sizeof(*lease) + sizeof(u32);
3018         int dnamelen = 0;
3019
3020         dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
3021              inode, dentry, ceph_lease_op_name(action), session->s_mds);
3022         dnamelen = dentry->d_name.len;
3023         len += dnamelen;
3024
3025         msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
3026         if (!msg)
3027                 return;
3028         lease = msg->front.iov_base;
3029         lease->action = action;
3030         lease->ino = cpu_to_le64(ceph_vino(inode).ino);
3031         lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
3032         lease->seq = cpu_to_le32(seq);
3033         put_unaligned_le32(dnamelen, lease + 1);
3034         memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
3035
3036         /*
3037          * if this is a preemptive lease RELEASE, no need to
3038          * flush request stream, since the actual request will
3039          * soon follow.
3040          */
3041         msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
3042
3043         ceph_con_send(&session->s_con, msg);
3044 }
3045
3046 /*
3047  * Preemptively release a lease we expect to invalidate anyway.
3048  * Pass @inode always, @dentry is optional.
3049  */
3050 void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
3051                              struct dentry *dentry)
3052 {
3053         struct ceph_dentry_info *di;
3054         struct ceph_mds_session *session;
3055         u32 seq;
3056
3057         BUG_ON(inode == NULL);
3058         BUG_ON(dentry == NULL);
3059
3060         /* is dentry lease valid? */
3061         spin_lock(&dentry->d_lock);
3062         di = ceph_dentry(dentry);
3063         if (!di || !di->lease_session ||
3064             di->lease_session->s_mds < 0 ||
3065             di->lease_gen != di->lease_session->s_cap_gen ||
3066             !time_before(jiffies, dentry->d_time)) {
3067                 dout("lease_release inode %p dentry %p -- "
3068                      "no lease\n",
3069                      inode, dentry);
3070                 spin_unlock(&dentry->d_lock);
3071                 return;
3072         }
3073
3074         /* we do have a lease on this dentry; note mds and seq */
3075         session = ceph_get_mds_session(di->lease_session);
3076         seq = di->lease_seq;
3077         __ceph_mdsc_drop_dentry_lease(dentry);
3078         spin_unlock(&dentry->d_lock);
3079
3080         dout("lease_release inode %p dentry %p to mds%d\n",
3081              inode, dentry, session->s_mds);
3082         ceph_mdsc_lease_send_msg(session, inode, dentry,
3083                                  CEPH_MDS_LEASE_RELEASE, seq);
3084         ceph_put_mds_session(session);
3085 }
3086
3087 /*
3088  * drop all leases (and dentry refs) in preparation for umount
3089  */
3090 static void drop_leases(struct ceph_mds_client *mdsc)
3091 {
3092         int i;
3093
3094         dout("drop_leases\n");
3095         mutex_lock(&mdsc->mutex);
3096         for (i = 0; i < mdsc->max_sessions; i++) {
3097                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3098                 if (!s)
3099                         continue;
3100                 mutex_unlock(&mdsc->mutex);
3101                 mutex_lock(&s->s_mutex);
3102                 mutex_unlock(&s->s_mutex);
3103                 ceph_put_mds_session(s);
3104                 mutex_lock(&mdsc->mutex);
3105         }
3106         mutex_unlock(&mdsc->mutex);
3107 }
3108
3109
3110
3111 /*
3112  * delayed work -- periodically trim expired leases, renew caps with mds
3113  */
3114 static void schedule_delayed(struct ceph_mds_client *mdsc)
3115 {
3116         int delay = 5;
3117         unsigned hz = round_jiffies_relative(HZ * delay);
3118         schedule_delayed_work(&mdsc->delayed_work, hz);
3119 }
3120
3121 static void delayed_work(struct work_struct *work)
3122 {
3123         int i;
3124         struct ceph_mds_client *mdsc =
3125                 container_of(work, struct ceph_mds_client, delayed_work.work);
3126         int renew_interval;
3127         int renew_caps;
3128
3129         dout("mdsc delayed_work\n");
3130         ceph_check_delayed_caps(mdsc);
3131
3132         mutex_lock(&mdsc->mutex);
3133         renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
3134         renew_caps = time_after_eq(jiffies, HZ*renew_interval +
3135                                    mdsc->last_renew_caps);
3136         if (renew_caps)
3137                 mdsc->last_renew_caps = jiffies;
3138
3139         for (i = 0; i < mdsc->max_sessions; i++) {
3140                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
3141                 if (s == NULL)
3142                         continue;
3143                 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
3144                         dout("resending session close request for mds%d\n",
3145                              s->s_mds);
3146                         request_close_session(mdsc, s);
3147                         ceph_put_mds_session(s);
3148                         continue;
3149                 }
3150                 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
3151                         if (s->s_state == CEPH_MDS_SESSION_OPEN) {
3152                                 s->s_state = CEPH_MDS_SESSION_HUNG;
3153                                 pr_info("mds%d hung\n", s->s_mds);
3154                         }
3155                 }
3156                 if (s->s_state < CEPH_MDS_SESSION_OPEN) {
3157                         /* this mds is failed or recovering, just wait */
3158                         ceph_put_mds_session(s);
3159                         continue;
3160                 }
3161                 mutex_unlock(&mdsc->mutex);
3162
3163                 mutex_lock(&s->s_mutex);
3164                 if (renew_caps)
3165                         send_renew_caps(mdsc, s);
3166                 else
3167                         ceph_con_keepalive(&s->s_con);
3168                 ceph_add_cap_releases(mdsc, s);
3169                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3170                     s->s_state == CEPH_MDS_SESSION_HUNG)
3171                         ceph_send_cap_releases(mdsc, s);
3172                 mutex_unlock(&s->s_mutex);
3173                 ceph_put_mds_session(s);
3174
3175                 mutex_lock(&mdsc->mutex);
3176         }
3177         mutex_unlock(&mdsc->mutex);
3178
3179         schedule_delayed(mdsc);
3180 }
3181
3182 int ceph_mdsc_init(struct ceph_fs_client *fsc)
3183
3184 {
3185         struct ceph_mds_client *mdsc;
3186
3187         mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
3188         if (!mdsc)
3189                 return -ENOMEM;
3190         mdsc->fsc = fsc;
3191         fsc->mdsc = mdsc;
3192         mutex_init(&mdsc->mutex);
3193         mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
3194         if (mdsc->mdsmap == NULL) {
3195                 kfree(mdsc);
3196                 return -ENOMEM;
3197         }
3198
3199         init_completion(&mdsc->safe_umount_waiters);
3200         init_waitqueue_head(&mdsc->session_close_wq);
3201         INIT_LIST_HEAD(&mdsc->waiting_for_map);
3202         mdsc->sessions = NULL;
3203         mdsc->max_sessions = 0;
3204         mdsc->stopping = 0;
3205         init_rwsem(&mdsc->snap_rwsem);
3206         mdsc->snap_realms = RB_ROOT;
3207         INIT_LIST_HEAD(&mdsc->snap_empty);
3208         spin_lock_init(&mdsc->snap_empty_lock);
3209         mdsc->last_tid = 0;
3210         mdsc->request_tree = RB_ROOT;
3211         INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
3212         mdsc->last_renew_caps = jiffies;
3213         INIT_LIST_HEAD(&mdsc->cap_delay_list);
3214         spin_lock_init(&mdsc->cap_delay_lock);
3215         INIT_LIST_HEAD(&mdsc->snap_flush_list);
3216         spin_lock_init(&mdsc->snap_flush_lock);
3217         mdsc->cap_flush_seq = 0;
3218         INIT_LIST_HEAD(&mdsc->cap_dirty);
3219         INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
3220         mdsc->num_cap_flushing = 0;
3221         spin_lock_init(&mdsc->cap_dirty_lock);
3222         init_waitqueue_head(&mdsc->cap_flushing_wq);
3223         spin_lock_init(&mdsc->dentry_lru_lock);
3224         INIT_LIST_HEAD(&mdsc->dentry_lru);
3225
3226         ceph_caps_init(mdsc);
3227         ceph_adjust_min_caps(mdsc, fsc->min_caps);
3228
3229         return 0;
3230 }
3231
3232 /*
3233  * Wait for safe replies on open mds requests.  If we time out, drop
3234  * all requests from the tree to avoid dangling dentry refs.
3235  */
3236 static void wait_requests(struct ceph_mds_client *mdsc)
3237 {
3238         struct ceph_mds_request *req;
3239         struct ceph_fs_client *fsc = mdsc->fsc;
3240
3241         mutex_lock(&mdsc->mutex);
3242         if (__get_oldest_req(mdsc)) {
3243                 mutex_unlock(&mdsc->mutex);
3244
3245                 dout("wait_requests waiting for requests\n");
3246                 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
3247                                     fsc->client->options->mount_timeout * HZ);
3248
3249                 /* tear down remaining requests */
3250                 mutex_lock(&mdsc->mutex);
3251                 while ((req = __get_oldest_req(mdsc))) {
3252                         dout("wait_requests timed out on tid %llu\n",
3253                              req->r_tid);
3254                         __unregister_request(mdsc, req);
3255                 }
3256         }
3257         mutex_unlock(&mdsc->mutex);
3258         dout("wait_requests done\n");
3259 }
3260
3261 /*
3262  * called before mount is ro, and before dentries are torn down.
3263  * (hmm, does this still race with new lookups?)
3264  */
3265 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
3266 {
3267         dout("pre_umount\n");
3268         mdsc->stopping = 1;
3269
3270         drop_leases(mdsc);
3271         ceph_flush_dirty_caps(mdsc);
3272         wait_requests(mdsc);
3273
3274         /*
3275          * wait for reply handlers to drop their request refs and
3276          * their inode/dcache refs
3277          */
3278         ceph_msgr_flush();
3279 }
3280
3281 /*
3282  * wait for all write mds requests to flush.
3283  */
3284 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
3285 {
3286         struct ceph_mds_request *req = NULL, *nextreq;
3287         struct rb_node *n;
3288
3289         mutex_lock(&mdsc->mutex);
3290         dout("wait_unsafe_requests want %lld\n", want_tid);
3291 restart:
3292         req = __get_oldest_req(mdsc);
3293         while (req && req->r_tid <= want_tid) {
3294                 /* find next request */
3295                 n = rb_next(&req->r_node);
3296                 if (n)
3297                         nextreq = rb_entry(n, struct ceph_mds_request, r_node);
3298                 else
3299                         nextreq = NULL;
3300                 if ((req->r_op & CEPH_MDS_OP_WRITE)) {
3301                         /* write op */
3302                         ceph_mdsc_get_request(req);
3303                         if (nextreq)
3304                                 ceph_mdsc_get_request(nextreq);
3305                         mutex_unlock(&mdsc->mutex);
3306                         dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
3307                              req->r_tid, want_tid);
3308                         wait_for_completion(&req->r_safe_completion);
3309                         mutex_lock(&mdsc->mutex);
3310                         ceph_mdsc_put_request(req);
3311                         if (!nextreq)
3312                                 break;  /* next dne before, so we're done! */
3313                         if (RB_EMPTY_NODE(&nextreq->r_node)) {
3314                                 /* next request was removed from tree */
3315                                 ceph_mdsc_put_request(nextreq);
3316                                 goto restart;
3317                         }
3318                         ceph_mdsc_put_request(nextreq);  /* won't go away */
3319                 }
3320                 req = nextreq;
3321         }
3322         mutex_unlock(&mdsc->mutex);
3323         dout("wait_unsafe_requests done\n");
3324 }
3325
3326 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3327 {
3328         u64 want_tid, want_flush;
3329
3330         if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
3331                 return;
3332
3333         dout("sync\n");
3334         mutex_lock(&mdsc->mutex);
3335         want_tid = mdsc->last_tid;
3336         want_flush = mdsc->cap_flush_seq;
3337         mutex_unlock(&mdsc->mutex);
3338         dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
3339
3340         ceph_flush_dirty_caps(mdsc);
3341
3342         wait_unsafe_requests(mdsc, want_tid);
3343         wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
3344 }
3345
3346 /*
3347  * true if all sessions are closed, or we force unmount
3348  */
3349 static bool done_closing_sessions(struct ceph_mds_client *mdsc)
3350 {
3351         int i, n = 0;
3352
3353         if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
3354                 return true;
3355
3356         mutex_lock(&mdsc->mutex);
3357         for (i = 0; i < mdsc->max_sessions; i++)
3358                 if (mdsc->sessions[i])
3359                         n++;
3360         mutex_unlock(&mdsc->mutex);
3361         return n == 0;
3362 }
3363
3364 /*
3365  * called after sb is ro.
3366  */
3367 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3368 {
3369         struct ceph_mds_session *session;
3370         int i;
3371         struct ceph_fs_client *fsc = mdsc->fsc;
3372         unsigned long timeout = fsc->client->options->mount_timeout * HZ;
3373
3374         dout("close_sessions\n");
3375
3376         /* close sessions */
3377         mutex_lock(&mdsc->mutex);
3378         for (i = 0; i < mdsc->max_sessions; i++) {
3379                 session = __ceph_lookup_mds_session(mdsc, i);
3380                 if (!session)
3381                         continue;
3382                 mutex_unlock(&mdsc->mutex);
3383                 mutex_lock(&session->s_mutex);
3384                 __close_session(mdsc, session);
3385                 mutex_unlock(&session->s_mutex);
3386                 ceph_put_mds_session(session);
3387                 mutex_lock(&mdsc->mutex);
3388         }
3389         mutex_unlock(&mdsc->mutex);
3390
3391         dout("waiting for sessions to close\n");
3392         wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc),
3393                            timeout);
3394
3395         /* tear down remaining sessions */
3396         mutex_lock(&mdsc->mutex);
3397         for (i = 0; i < mdsc->max_sessions; i++) {
3398                 if (mdsc->sessions[i]) {
3399                         session = get_session(mdsc->sessions[i]);
3400                         __unregister_session(mdsc, session);
3401                         mutex_unlock(&mdsc->mutex);
3402                         mutex_lock(&session->s_mutex);
3403                         remove_session_caps(session);
3404                         mutex_unlock(&session->s_mutex);
3405                         ceph_put_mds_session(session);
3406                         mutex_lock(&mdsc->mutex);
3407                 }
3408         }
3409         WARN_ON(!list_empty(&mdsc->cap_delay_list));
3410         mutex_unlock(&mdsc->mutex);
3411
3412         ceph_cleanup_empty_realms(mdsc);
3413
3414         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3415
3416         dout("stopped\n");
3417 }
3418
3419 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
3420 {
3421         dout("stop\n");
3422         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
3423         if (mdsc->mdsmap)
3424                 ceph_mdsmap_destroy(mdsc->mdsmap);
3425         kfree(mdsc->sessions);
3426         ceph_caps_finalize(mdsc);
3427 }
3428
3429 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
3430 {
3431         struct ceph_mds_client *mdsc = fsc->mdsc;
3432
3433         dout("mdsc_destroy %p\n", mdsc);
3434         ceph_mdsc_stop(mdsc);
3435
3436         /* flush out any connection work with references to us */
3437         ceph_msgr_flush();
3438
3439         fsc->mdsc = NULL;
3440         kfree(mdsc);
3441         dout("mdsc_destroy %p done\n", mdsc);
3442 }
3443
3444
3445 /*
3446  * handle mds map update.
3447  */
3448 void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3449 {
3450         u32 epoch;
3451         u32 maplen;
3452         void *p = msg->front.iov_base;
3453         void *end = p + msg->front.iov_len;
3454         struct ceph_mdsmap *newmap, *oldmap;
3455         struct ceph_fsid fsid;
3456         int err = -EINVAL;
3457
3458         ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
3459         ceph_decode_copy(&p, &fsid, sizeof(fsid));
3460         if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
3461                 return;
3462         epoch = ceph_decode_32(&p);
3463         maplen = ceph_decode_32(&p);
3464         dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
3465
3466         /* do we need it? */
3467         ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch);
3468         mutex_lock(&mdsc->mutex);
3469         if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
3470                 dout("handle_map epoch %u <= our %u\n",
3471                      epoch, mdsc->mdsmap->m_epoch);
3472                 mutex_unlock(&mdsc->mutex);
3473                 return;
3474         }
3475
3476         newmap = ceph_mdsmap_decode(&p, end);
3477         if (IS_ERR(newmap)) {
3478                 err = PTR_ERR(newmap);
3479                 goto bad_unlock;
3480         }
3481
3482         /* swap into place */
3483         if (mdsc->mdsmap) {
3484                 oldmap = mdsc->mdsmap;
3485                 mdsc->mdsmap = newmap;
3486                 check_new_map(mdsc, newmap, oldmap);
3487                 ceph_mdsmap_destroy(oldmap);
3488         } else {
3489                 mdsc->mdsmap = newmap;  /* first mds map */
3490         }
3491         mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
3492
3493         __wake_requests(mdsc, &mdsc->waiting_for_map);
3494
3495         mutex_unlock(&mdsc->mutex);
3496         schedule_delayed(mdsc);
3497         return;
3498
3499 bad_unlock:
3500         mutex_unlock(&mdsc->mutex);
3501 bad:
3502         pr_err("error decoding mdsmap %d\n", err);
3503         return;
3504 }
3505
3506 static struct ceph_connection *con_get(struct ceph_connection *con)
3507 {
3508         struct ceph_mds_session *s = con->private;
3509
3510         if (get_session(s)) {
3511                 dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref));
3512                 return con;
3513         }
3514         dout("mdsc con_get %p FAIL\n", s);
3515         return NULL;
3516 }
3517
3518 static void con_put(struct ceph_connection *con)
3519 {
3520         struct ceph_mds_session *s = con->private;
3521
3522         dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1);
3523         ceph_put_mds_session(s);
3524 }
3525
3526 /*
3527  * if the client is unresponsive for long enough, the mds will kill
3528  * the session entirely.
3529  */
3530 static void peer_reset(struct ceph_connection *con)
3531 {
3532         struct ceph_mds_session *s = con->private;
3533         struct ceph_mds_client *mdsc = s->s_mdsc;
3534
3535         pr_warn("mds%d closed our session\n", s->s_mds);
3536         send_mds_reconnect(mdsc, s);
3537 }
3538
3539 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
3540 {
3541         struct ceph_mds_session *s = con->private;
3542         struct ceph_mds_client *mdsc = s->s_mdsc;
3543         int type = le16_to_cpu(msg->hdr.type);
3544
3545         mutex_lock(&mdsc->mutex);
3546         if (__verify_registered_session(mdsc, s) < 0) {
3547                 mutex_unlock(&mdsc->mutex);
3548                 goto out;
3549         }
3550         mutex_unlock(&mdsc->mutex);
3551
3552         switch (type) {
3553         case CEPH_MSG_MDS_MAP:
3554                 ceph_mdsc_handle_map(mdsc, msg);
3555                 break;
3556         case CEPH_MSG_CLIENT_SESSION:
3557                 handle_session(s, msg);
3558                 break;
3559         case CEPH_MSG_CLIENT_REPLY:
3560                 handle_reply(s, msg);
3561                 break;
3562         case CEPH_MSG_CLIENT_REQUEST_FORWARD:
3563                 handle_forward(mdsc, s, msg);
3564                 break;
3565         case CEPH_MSG_CLIENT_CAPS:
3566                 ceph_handle_caps(s, msg);
3567                 break;
3568         case CEPH_MSG_CLIENT_SNAP:
3569                 ceph_handle_snap(mdsc, s, msg);
3570                 break;
3571         case CEPH_MSG_CLIENT_LEASE:
3572                 handle_lease(mdsc, s, msg);
3573                 break;
3574
3575         default:
3576                 pr_err("received unknown message type %d %s\n", type,
3577                        ceph_msg_type_name(type));
3578         }
3579 out:
3580         ceph_msg_put(msg);
3581 }
3582
3583 /*
3584  * authentication
3585  */
3586
3587 /*
3588  * Note: returned pointer is the address of a structure that's
3589  * managed separately.  Caller must *not* attempt to free it.
3590  */
3591 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
3592                                         int *proto, int force_new)
3593 {
3594         struct ceph_mds_session *s = con->private;
3595         struct ceph_mds_client *mdsc = s->s_mdsc;
3596         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3597         struct ceph_auth_handshake *auth = &s->s_auth;
3598
3599         if (force_new && auth->authorizer) {
3600                 ceph_auth_destroy_authorizer(ac, auth->authorizer);
3601                 auth->authorizer = NULL;
3602         }
3603         if (!auth->authorizer) {
3604                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
3605                                                       auth);
3606                 if (ret)
3607                         return ERR_PTR(ret);
3608         } else {
3609                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
3610                                                       auth);
3611                 if (ret)
3612                         return ERR_PTR(ret);
3613         }
3614         *proto = ac->protocol;
3615
3616         return auth;
3617 }
3618
3619
3620 static int verify_authorizer_reply(struct ceph_connection *con, int len)
3621 {
3622         struct ceph_mds_session *s = con->private;
3623         struct ceph_mds_client *mdsc = s->s_mdsc;
3624         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3625
3626         return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer, len);
3627 }
3628
3629 static int invalidate_authorizer(struct ceph_connection *con)
3630 {
3631         struct ceph_mds_session *s = con->private;
3632         struct ceph_mds_client *mdsc = s->s_mdsc;
3633         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3634
3635         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
3636
3637         return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
3638 }
3639
3640 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
3641                                 struct ceph_msg_header *hdr, int *skip)
3642 {
3643         struct ceph_msg *msg;
3644         int type = (int) le16_to_cpu(hdr->type);
3645         int front_len = (int) le32_to_cpu(hdr->front_len);
3646
3647         if (con->in_msg)
3648                 return con->in_msg;
3649
3650         *skip = 0;
3651         msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
3652         if (!msg) {
3653                 pr_err("unable to allocate msg type %d len %d\n",
3654                        type, front_len);
3655                 return NULL;
3656         }
3657
3658         return msg;
3659 }
3660
3661 static const struct ceph_connection_operations mds_con_ops = {
3662         .get = con_get,
3663         .put = con_put,
3664         .dispatch = dispatch,
3665         .get_authorizer = get_authorizer,
3666         .verify_authorizer_reply = verify_authorizer_reply,
3667         .invalidate_authorizer = invalidate_authorizer,
3668         .peer_reset = peer_reset,
3669         .alloc_msg = mds_alloc_msg,
3670 };
3671
3672 /* eof */