1 #include "ceph_debug.h"
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/vmalloc.h>
7 #include <linux/wait.h>
11 #include "messenger.h"
14 * Capability management
16 * The Ceph metadata servers control client access to inode metadata
17 * and file data by issuing capabilities, granting clients permission
18 * to read and/or write both inode field and file data to OSDs
19 * (storage nodes). Each capability consists of a set of bits
20 * indicating which operations are allowed.
22 * If the client holds a *_SHARED cap, the client has a coherent value
23 * that can be safely read from the cached inode.
25 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
26 * client is allowed to change inode attributes (e.g., file size,
27 * mtime), note its dirty state in the ceph_cap, and asynchronously
28 * flush that metadata change to the MDS.
30 * In the event of a conflicting operation (perhaps by another
31 * client), the MDS will revoke the conflicting client capabilities.
33 * In order for a client to cache an inode, it must hold a capability
34 * with at least one MDS server. When inodes are released, release
35 * notifications are batched and periodically sent en masse to the MDS
36 * cluster to release server state.
41 * Generate readable cap strings for debugging output.
43 #define MAX_CAP_STR 20
44 static char cap_str[MAX_CAP_STR][40];
45 static DEFINE_SPINLOCK(cap_str_lock);
46 static int last_cap_str;
48 static char *gcap_string(char *s, int c)
50 if (c & CEPH_CAP_GSHARED)
52 if (c & CEPH_CAP_GEXCL)
54 if (c & CEPH_CAP_GCACHE)
60 if (c & CEPH_CAP_GBUFFER)
62 if (c & CEPH_CAP_GLAZYIO)
67 const char *ceph_cap_string(int caps)
73 spin_lock(&cap_str_lock);
75 if (last_cap_str == MAX_CAP_STR)
77 spin_unlock(&cap_str_lock);
81 if (caps & CEPH_CAP_PIN)
84 c = (caps >> CEPH_CAP_SAUTH) & 3;
87 s = gcap_string(s, c);
90 c = (caps >> CEPH_CAP_SLINK) & 3;
93 s = gcap_string(s, c);
96 c = (caps >> CEPH_CAP_SXATTR) & 3;
99 s = gcap_string(s, c);
102 c = caps >> CEPH_CAP_SFILE;
105 s = gcap_string(s, c);
117 * Maintain a global pool of preallocated struct ceph_caps, referenced
118 * by struct ceph_caps_reservations. This ensures that we preallocate
119 * memory needed to successfully process an MDS response. (If an MDS
120 * sends us cap information and we fail to process it, we will have
121 * problems due to the client and MDS being out of sync.)
123 * Reservations are 'owned' by a ceph_cap_reservation context.
125 static spinlock_t caps_list_lock;
126 static struct list_head caps_list; /* unused (reserved or unreserved) */
127 static int caps_total_count; /* total caps allocated */
128 static int caps_use_count; /* in use */
129 static int caps_reserve_count; /* unused, reserved */
130 static int caps_avail_count; /* unused, unreserved */
131 static int caps_min_count; /* keep at least this many (unreserved) */
133 void __init ceph_caps_init(void)
135 INIT_LIST_HEAD(&caps_list);
136 spin_lock_init(&caps_list_lock);
139 void ceph_caps_finalize(void)
141 struct ceph_cap *cap;
143 spin_lock(&caps_list_lock);
144 while (!list_empty(&caps_list)) {
145 cap = list_first_entry(&caps_list, struct ceph_cap, caps_item);
146 list_del(&cap->caps_item);
147 kmem_cache_free(ceph_cap_cachep, cap);
149 caps_total_count = 0;
150 caps_avail_count = 0;
152 caps_reserve_count = 0;
154 spin_unlock(&caps_list_lock);
157 void ceph_adjust_min_caps(int delta)
159 spin_lock(&caps_list_lock);
160 caps_min_count += delta;
161 BUG_ON(caps_min_count < 0);
162 spin_unlock(&caps_list_lock);
165 int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need)
168 struct ceph_cap *cap;
174 dout("reserve caps ctx=%p need=%d\n", ctx, need);
176 /* first reserve any caps that are already allocated */
177 spin_lock(&caps_list_lock);
178 if (caps_avail_count >= need)
181 have = caps_avail_count;
182 caps_avail_count -= have;
183 caps_reserve_count += have;
184 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
186 spin_unlock(&caps_list_lock);
188 for (i = have; i < need; i++) {
189 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
192 goto out_alloc_count;
194 list_add(&cap->caps_item, &newcaps);
197 BUG_ON(have + alloc != need);
199 spin_lock(&caps_list_lock);
200 caps_total_count += alloc;
201 caps_reserve_count += alloc;
202 list_splice(&newcaps, &caps_list);
204 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
206 spin_unlock(&caps_list_lock);
209 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
210 ctx, caps_total_count, caps_use_count, caps_reserve_count,
215 /* we didn't manage to reserve as much as we needed */
216 pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
221 int ceph_unreserve_caps(struct ceph_cap_reservation *ctx)
223 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
225 spin_lock(&caps_list_lock);
226 BUG_ON(caps_reserve_count < ctx->count);
227 caps_reserve_count -= ctx->count;
228 caps_avail_count += ctx->count;
230 dout("unreserve caps %d = %d used + %d resv + %d avail\n",
231 caps_total_count, caps_use_count, caps_reserve_count,
233 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
235 spin_unlock(&caps_list_lock);
240 static struct ceph_cap *get_cap(struct ceph_cap_reservation *ctx)
242 struct ceph_cap *cap = NULL;
244 /* temporary, until we do something about cap import/export */
246 return kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
248 spin_lock(&caps_list_lock);
249 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
250 ctx, ctx->count, caps_total_count, caps_use_count,
251 caps_reserve_count, caps_avail_count);
253 BUG_ON(ctx->count > caps_reserve_count);
254 BUG_ON(list_empty(&caps_list));
257 caps_reserve_count--;
260 cap = list_first_entry(&caps_list, struct ceph_cap, caps_item);
261 list_del(&cap->caps_item);
263 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
265 spin_unlock(&caps_list_lock);
269 void ceph_put_cap(struct ceph_cap *cap)
271 spin_lock(&caps_list_lock);
272 dout("put_cap %p %d = %d used + %d resv + %d avail\n",
273 cap, caps_total_count, caps_use_count,
274 caps_reserve_count, caps_avail_count);
277 * Keep some preallocated caps around (ceph_min_count), to
278 * avoid lots of free/alloc churn.
280 if (caps_avail_count >= caps_reserve_count + caps_min_count) {
282 kmem_cache_free(ceph_cap_cachep, cap);
285 list_add(&cap->caps_item, &caps_list);
288 BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
290 spin_unlock(&caps_list_lock);
293 void ceph_reservation_status(struct ceph_client *client,
294 int *total, int *avail, int *used, int *reserved,
298 *total = caps_total_count;
300 *avail = caps_avail_count;
302 *used = caps_use_count;
304 *reserved = caps_reserve_count;
306 *min = caps_min_count;
310 * Find ceph_cap for given mds, if any.
312 * Called with i_lock held.
314 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
316 struct ceph_cap *cap;
317 struct rb_node *n = ci->i_caps.rb_node;
320 cap = rb_entry(n, struct ceph_cap, ci_node);
323 else if (mds > cap->mds)
332 * Return id of any MDS with a cap, preferably FILE_WR|WRBUFFER|EXCL, else
335 static int __ceph_get_cap_mds(struct ceph_inode_info *ci, u32 *mseq)
337 struct ceph_cap *cap;
341 /* prefer mds with WR|WRBUFFER|EXCL caps */
342 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
343 cap = rb_entry(p, struct ceph_cap, ci_node);
347 if (cap->issued & (CEPH_CAP_FILE_WR |
348 CEPH_CAP_FILE_BUFFER |
355 int ceph_get_cap_mds(struct inode *inode)
358 spin_lock(&inode->i_lock);
359 mds = __ceph_get_cap_mds(ceph_inode(inode), NULL);
360 spin_unlock(&inode->i_lock);
365 * Called under i_lock.
367 static void __insert_cap_node(struct ceph_inode_info *ci,
368 struct ceph_cap *new)
370 struct rb_node **p = &ci->i_caps.rb_node;
371 struct rb_node *parent = NULL;
372 struct ceph_cap *cap = NULL;
376 cap = rb_entry(parent, struct ceph_cap, ci_node);
377 if (new->mds < cap->mds)
379 else if (new->mds > cap->mds)
385 rb_link_node(&new->ci_node, parent, p);
386 rb_insert_color(&new->ci_node, &ci->i_caps);
390 * (re)set cap hold timeouts, which control the delayed release
391 * of unused caps back to the MDS. Should be called on cap use.
393 static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
394 struct ceph_inode_info *ci)
396 struct ceph_mount_args *ma = mdsc->client->mount_args;
398 ci->i_hold_caps_min = round_jiffies(jiffies +
399 ma->caps_wanted_delay_min * HZ);
400 ci->i_hold_caps_max = round_jiffies(jiffies +
401 ma->caps_wanted_delay_max * HZ);
402 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
403 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
407 * (Re)queue cap at the end of the delayed cap release list.
409 * If I_FLUSH is set, leave the inode at the front of the list.
411 * Caller holds i_lock
412 * -> we take mdsc->cap_delay_lock
414 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
415 struct ceph_inode_info *ci)
417 __cap_set_timeouts(mdsc, ci);
418 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
419 ci->i_ceph_flags, ci->i_hold_caps_max);
420 if (!mdsc->stopping) {
421 spin_lock(&mdsc->cap_delay_lock);
422 if (!list_empty(&ci->i_cap_delay_list)) {
423 if (ci->i_ceph_flags & CEPH_I_FLUSH)
425 list_del_init(&ci->i_cap_delay_list);
427 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
429 spin_unlock(&mdsc->cap_delay_lock);
434 * Queue an inode for immediate writeback. Mark inode with I_FLUSH,
435 * indicating we should send a cap message to flush dirty metadata
436 * asap, and move to the front of the delayed cap list.
438 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
439 struct ceph_inode_info *ci)
441 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
442 spin_lock(&mdsc->cap_delay_lock);
443 ci->i_ceph_flags |= CEPH_I_FLUSH;
444 if (!list_empty(&ci->i_cap_delay_list))
445 list_del_init(&ci->i_cap_delay_list);
446 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
447 spin_unlock(&mdsc->cap_delay_lock);
451 * Cancel delayed work on cap.
453 * Caller must hold i_lock.
455 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
456 struct ceph_inode_info *ci)
458 dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
459 if (list_empty(&ci->i_cap_delay_list))
461 spin_lock(&mdsc->cap_delay_lock);
462 list_del_init(&ci->i_cap_delay_list);
463 spin_unlock(&mdsc->cap_delay_lock);
467 * Common issue checks for add_cap, handle_cap_grant.
469 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
472 unsigned had = __ceph_caps_issued(ci, NULL);
475 * Each time we receive FILE_CACHE anew, we increment
478 if ((issued & CEPH_CAP_FILE_CACHE) &&
479 (had & CEPH_CAP_FILE_CACHE) == 0)
483 * if we are newly issued FILE_SHARED, clear I_COMPLETE; we
484 * don't know what happened to this directory while we didn't
487 if ((issued & CEPH_CAP_FILE_SHARED) &&
488 (had & CEPH_CAP_FILE_SHARED) == 0) {
490 if (S_ISDIR(ci->vfs_inode.i_mode)) {
491 dout(" marking %p NOT complete\n", &ci->vfs_inode);
492 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
498 * Add a capability under the given MDS session.
500 * Caller should hold session snap_rwsem (read) and s_mutex.
502 * @fmode is the open file mode, if we are opening a file, otherwise
503 * it is < 0. (This is so we can atomically add the cap and add an
504 * open file reference to it.)
506 int ceph_add_cap(struct inode *inode,
507 struct ceph_mds_session *session, u64 cap_id,
508 int fmode, unsigned issued, unsigned wanted,
509 unsigned seq, unsigned mseq, u64 realmino, int flags,
510 struct ceph_cap_reservation *caps_reservation)
512 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
513 struct ceph_inode_info *ci = ceph_inode(inode);
514 struct ceph_cap *new_cap = NULL;
515 struct ceph_cap *cap;
516 int mds = session->s_mds;
519 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
520 session->s_mds, cap_id, ceph_cap_string(issued), seq);
523 * If we are opening the file, include file mode wanted bits
527 wanted |= ceph_caps_for_mode(fmode);
530 spin_lock(&inode->i_lock);
531 cap = __get_cap_for_mds(ci, mds);
537 spin_unlock(&inode->i_lock);
538 new_cap = get_cap(caps_reservation);
545 cap->implemented = 0;
550 __insert_cap_node(ci, cap);
552 /* clear out old exporting info? (i.e. on cap import) */
553 if (ci->i_cap_exporting_mds == mds) {
554 ci->i_cap_exporting_issued = 0;
555 ci->i_cap_exporting_mseq = 0;
556 ci->i_cap_exporting_mds = -1;
559 /* add to session cap list */
560 cap->session = session;
561 spin_lock(&session->s_cap_lock);
562 list_add_tail(&cap->session_caps, &session->s_caps);
563 session->s_nr_caps++;
564 spin_unlock(&session->s_cap_lock);
567 if (!ci->i_snap_realm) {
569 * add this inode to the appropriate snap realm
571 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
574 ceph_get_snap_realm(mdsc, realm);
575 spin_lock(&realm->inodes_with_caps_lock);
576 ci->i_snap_realm = realm;
577 list_add(&ci->i_snap_realm_item,
578 &realm->inodes_with_caps);
579 spin_unlock(&realm->inodes_with_caps_lock);
581 pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
586 __check_cap_issue(ci, cap, issued);
589 * If we are issued caps we don't want, or the mds' wanted
590 * value appears to be off, queue a check so we'll release
591 * later and/or update the mds wanted value.
593 actual_wanted = __ceph_caps_wanted(ci);
594 if ((wanted & ~actual_wanted) ||
595 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
596 dout(" issued %s, mds wanted %s, actual %s, queueing\n",
597 ceph_cap_string(issued), ceph_cap_string(wanted),
598 ceph_cap_string(actual_wanted));
599 __cap_delay_requeue(mdsc, ci);
602 if (flags & CEPH_CAP_FLAG_AUTH)
603 ci->i_auth_cap = cap;
604 else if (ci->i_auth_cap == cap)
605 ci->i_auth_cap = NULL;
607 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
608 inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
609 ceph_cap_string(issued|cap->issued), seq, mds);
610 cap->cap_id = cap_id;
611 cap->issued = issued;
612 cap->implemented |= issued;
613 cap->mds_wanted |= wanted;
615 cap->issue_seq = seq;
617 cap->cap_gen = session->s_cap_gen;
620 __ceph_get_fmode(ci, fmode);
621 spin_unlock(&inode->i_lock);
622 wake_up(&ci->i_cap_wq);
627 * Return true if cap has not timed out and belongs to the current
628 * generation of the MDS session (i.e. has not gone 'stale' due to
629 * us losing touch with the mds).
631 static int __cap_is_valid(struct ceph_cap *cap)
636 spin_lock(&cap->session->s_cap_lock);
637 gen = cap->session->s_cap_gen;
638 ttl = cap->session->s_cap_ttl;
639 spin_unlock(&cap->session->s_cap_lock);
641 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
642 dout("__cap_is_valid %p cap %p issued %s "
643 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
644 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
652 * Return set of valid cap bits issued to us. Note that caps time
653 * out, and may be invalidated in bulk if the client session times out
654 * and session->s_cap_gen is bumped.
656 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
658 int have = ci->i_snap_caps;
659 struct ceph_cap *cap;
664 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
665 cap = rb_entry(p, struct ceph_cap, ci_node);
666 if (!__cap_is_valid(cap))
668 dout("__ceph_caps_issued %p cap %p issued %s\n",
669 &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
672 *implemented |= cap->implemented;
678 * Get cap bits issued by caps other than @ocap
680 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
682 int have = ci->i_snap_caps;
683 struct ceph_cap *cap;
686 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
687 cap = rb_entry(p, struct ceph_cap, ci_node);
690 if (!__cap_is_valid(cap))
698 * Move a cap to the end of the LRU (oldest caps at list head, newest
701 static void __touch_cap(struct ceph_cap *cap)
703 struct ceph_mds_session *s = cap->session;
705 spin_lock(&s->s_cap_lock);
706 if (s->s_cap_iterator == NULL) {
707 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
709 list_move_tail(&cap->session_caps, &s->s_caps);
711 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
712 &cap->ci->vfs_inode, cap, s->s_mds);
714 spin_unlock(&s->s_cap_lock);
718 * Check if we hold the given mask. If so, move the cap(s) to the
719 * front of their respective LRUs. (This is the preferred way for
720 * callers to check for caps they want.)
722 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
724 struct ceph_cap *cap;
726 int have = ci->i_snap_caps;
728 if ((have & mask) == mask) {
729 dout("__ceph_caps_issued_mask %p snap issued %s"
730 " (mask %s)\n", &ci->vfs_inode,
731 ceph_cap_string(have),
732 ceph_cap_string(mask));
736 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
737 cap = rb_entry(p, struct ceph_cap, ci_node);
738 if (!__cap_is_valid(cap))
740 if ((cap->issued & mask) == mask) {
741 dout("__ceph_caps_issued_mask %p cap %p issued %s"
742 " (mask %s)\n", &ci->vfs_inode, cap,
743 ceph_cap_string(cap->issued),
744 ceph_cap_string(mask));
750 /* does a combination of caps satisfy mask? */
752 if ((have & mask) == mask) {
753 dout("__ceph_caps_issued_mask %p combo issued %s"
754 " (mask %s)\n", &ci->vfs_inode,
755 ceph_cap_string(cap->issued),
756 ceph_cap_string(mask));
760 /* touch this + preceeding caps */
762 for (q = rb_first(&ci->i_caps); q != p;
764 cap = rb_entry(q, struct ceph_cap,
766 if (!__cap_is_valid(cap))
779 * Return true if mask caps are currently being revoked by an MDS.
781 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
783 struct inode *inode = &ci->vfs_inode;
784 struct ceph_cap *cap;
788 spin_lock(&inode->i_lock);
789 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
790 cap = rb_entry(p, struct ceph_cap, ci_node);
791 if (__cap_is_valid(cap) &&
792 (cap->implemented & ~cap->issued & mask)) {
797 spin_unlock(&inode->i_lock);
798 dout("ceph_caps_revoking %p %s = %d\n", inode,
799 ceph_cap_string(mask), ret);
803 int __ceph_caps_used(struct ceph_inode_info *ci)
807 used |= CEPH_CAP_PIN;
809 used |= CEPH_CAP_FILE_RD;
810 if (ci->i_rdcache_ref || ci->i_rdcache_gen)
811 used |= CEPH_CAP_FILE_CACHE;
813 used |= CEPH_CAP_FILE_WR;
814 if (ci->i_wrbuffer_ref)
815 used |= CEPH_CAP_FILE_BUFFER;
820 * wanted, by virtue of open file modes
822 int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
826 for (mode = 0; mode < 4; mode++)
827 if (ci->i_nr_by_mode[mode])
828 want |= ceph_caps_for_mode(mode);
833 * Return caps we have registered with the MDS(s) as 'wanted'.
835 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
837 struct ceph_cap *cap;
841 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
842 cap = rb_entry(p, struct ceph_cap, ci_node);
843 if (!__cap_is_valid(cap))
845 mds_wanted |= cap->mds_wanted;
851 * called under i_lock
853 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
855 return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0;
859 * caller should hold i_lock.
860 * caller will not hold session s_mutex if called from destroy_inode.
862 void __ceph_remove_cap(struct ceph_cap *cap)
864 struct ceph_mds_session *session = cap->session;
865 struct ceph_inode_info *ci = cap->ci;
866 struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc;
868 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
870 /* remove from inode list */
871 rb_erase(&cap->ci_node, &ci->i_caps);
873 if (ci->i_auth_cap == cap)
874 ci->i_auth_cap = NULL;
876 /* remove from session list */
877 spin_lock(&session->s_cap_lock);
878 if (session->s_cap_iterator == cap) {
879 /* not yet, we are iterating over this very cap */
880 dout("__ceph_remove_cap delaying %p removal from session %p\n",
883 list_del_init(&cap->session_caps);
884 session->s_nr_caps--;
887 spin_unlock(&session->s_cap_lock);
889 if (cap->session == NULL)
892 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
893 struct ceph_snap_realm *realm = ci->i_snap_realm;
894 spin_lock(&realm->inodes_with_caps_lock);
895 list_del_init(&ci->i_snap_realm_item);
896 ci->i_snap_realm_counter++;
897 ci->i_snap_realm = NULL;
898 spin_unlock(&realm->inodes_with_caps_lock);
899 ceph_put_snap_realm(mdsc, realm);
901 if (!__ceph_is_any_real_caps(ci))
902 __cap_delay_cancel(mdsc, ci);
906 * Build and send a cap message to the given MDS.
908 * Caller should be holding s_mutex.
910 static int send_cap_msg(struct ceph_mds_session *session,
911 u64 ino, u64 cid, int op,
912 int caps, int wanted, int dirty,
913 u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq,
914 u64 size, u64 max_size,
915 struct timespec *mtime, struct timespec *atime,
917 uid_t uid, gid_t gid, mode_t mode,
919 struct ceph_buffer *xattrs_buf,
922 struct ceph_mds_caps *fc;
923 struct ceph_msg *msg;
925 dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
926 " seq %u/%u mseq %u follows %lld size %llu/%llu"
927 " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
928 cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
929 ceph_cap_string(dirty),
930 seq, issue_seq, mseq, follows, size, max_size,
931 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
933 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), 0, 0, NULL);
937 msg->hdr.tid = cpu_to_le64(flush_tid);
939 fc = msg->front.iov_base;
940 memset(fc, 0, sizeof(*fc));
942 fc->cap_id = cpu_to_le64(cid);
943 fc->op = cpu_to_le32(op);
944 fc->seq = cpu_to_le32(seq);
945 fc->issue_seq = cpu_to_le32(issue_seq);
946 fc->migrate_seq = cpu_to_le32(mseq);
947 fc->caps = cpu_to_le32(caps);
948 fc->wanted = cpu_to_le32(wanted);
949 fc->dirty = cpu_to_le32(dirty);
950 fc->ino = cpu_to_le64(ino);
951 fc->snap_follows = cpu_to_le64(follows);
953 fc->size = cpu_to_le64(size);
954 fc->max_size = cpu_to_le64(max_size);
956 ceph_encode_timespec(&fc->mtime, mtime);
958 ceph_encode_timespec(&fc->atime, atime);
959 fc->time_warp_seq = cpu_to_le32(time_warp_seq);
961 fc->uid = cpu_to_le32(uid);
962 fc->gid = cpu_to_le32(gid);
963 fc->mode = cpu_to_le32(mode);
965 fc->xattr_version = cpu_to_le64(xattr_version);
967 msg->middle = ceph_buffer_get(xattrs_buf);
968 fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
969 msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
972 ceph_con_send(&session->s_con, msg);
977 * Queue cap releases when an inode is dropped from our cache. Since
978 * inode is about to be destroyed, there is no need for i_lock.
980 void ceph_queue_caps_release(struct inode *inode)
982 struct ceph_inode_info *ci = ceph_inode(inode);
985 p = rb_first(&ci->i_caps);
987 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
988 struct ceph_mds_session *session = cap->session;
989 struct ceph_msg *msg;
990 struct ceph_mds_cap_release *head;
991 struct ceph_mds_cap_item *item;
993 spin_lock(&session->s_cap_lock);
994 BUG_ON(!session->s_num_cap_releases);
995 msg = list_first_entry(&session->s_cap_releases,
996 struct ceph_msg, list_head);
998 dout(" adding %p release to mds%d msg %p (%d left)\n",
999 inode, session->s_mds, msg, session->s_num_cap_releases);
1001 BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
1002 head = msg->front.iov_base;
1003 head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
1004 item = msg->front.iov_base + msg->front.iov_len;
1005 item->ino = cpu_to_le64(ceph_ino(inode));
1006 item->cap_id = cpu_to_le64(cap->cap_id);
1007 item->migrate_seq = cpu_to_le32(cap->mseq);
1008 item->seq = cpu_to_le32(cap->issue_seq);
1010 session->s_num_cap_releases--;
1012 msg->front.iov_len += sizeof(*item);
1013 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1014 dout(" release msg %p full\n", msg);
1015 list_move_tail(&msg->list_head,
1016 &session->s_cap_releases_done);
1018 dout(" release msg %p at %d/%d (%d)\n", msg,
1019 (int)le32_to_cpu(head->num),
1020 (int)CEPH_CAPS_PER_RELEASE,
1021 (int)msg->front.iov_len);
1023 spin_unlock(&session->s_cap_lock);
1025 __ceph_remove_cap(cap);
1030 * Send a cap msg on the given inode. Update our caps state, then
1031 * drop i_lock and send the message.
1033 * Make note of max_size reported/requested from mds, revoked caps
1034 * that have now been implemented.
1036 * Make half-hearted attempt ot to invalidate page cache if we are
1037 * dropping RDCACHE. Note that this will leave behind locked pages
1038 * that we'll then need to deal with elsewhere.
1040 * Return non-zero if delayed release, or we experienced an error
1041 * such that the caller should requeue + retry later.
1043 * called with i_lock, then drops it.
1044 * caller should hold snap_rwsem (read), s_mutex.
1046 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1047 int op, int used, int want, int retain, int flushing,
1048 unsigned *pflush_tid)
1049 __releases(cap->ci->vfs_inode->i_lock)
1051 struct ceph_inode_info *ci = cap->ci;
1052 struct inode *inode = &ci->vfs_inode;
1053 u64 cap_id = cap->cap_id;
1054 int held, revoking, dropping, keep;
1055 u64 seq, issue_seq, mseq, time_warp_seq, follows;
1057 struct timespec mtime, atime;
1062 struct ceph_mds_session *session;
1063 u64 xattr_version = 0;
1069 held = cap->issued | cap->implemented;
1070 revoking = cap->implemented & ~cap->issued;
1071 retain &= ~revoking;
1072 dropping = cap->issued & ~retain;
1074 dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1075 inode, cap, cap->session,
1076 ceph_cap_string(held), ceph_cap_string(held & retain),
1077 ceph_cap_string(revoking));
1078 BUG_ON((retain & CEPH_CAP_PIN) == 0);
1080 session = cap->session;
1082 /* don't release wanted unless we've waited a bit. */
1083 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1084 time_before(jiffies, ci->i_hold_caps_min)) {
1085 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1086 ceph_cap_string(cap->issued),
1087 ceph_cap_string(cap->issued & retain),
1088 ceph_cap_string(cap->mds_wanted),
1089 ceph_cap_string(want));
1090 want |= cap->mds_wanted;
1091 retain |= cap->issued;
1094 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1096 cap->issued &= retain; /* drop bits we don't want */
1097 if (cap->implemented & ~cap->issued) {
1099 * Wake up any waiters on wanted -> needed transition.
1100 * This is due to the weird transition from buffered
1101 * to sync IO... we need to flush dirty pages _before_
1102 * allowing sync writes to avoid reordering.
1106 cap->implemented &= cap->issued | used;
1107 cap->mds_wanted = want;
1111 * assign a tid for flush operations so we can avoid
1112 * flush1 -> dirty1 -> flush2 -> flushack1 -> mark
1113 * clean type races. track latest tid for every bit
1114 * so we can handle flush AxFw, flush Fw, and have the
1115 * first ack clean Ax.
1117 flush_tid = ++ci->i_cap_flush_last_tid;
1119 *pflush_tid = flush_tid;
1120 dout(" cap_flush_tid %d\n", (int)flush_tid);
1121 for (i = 0; i < CEPH_CAP_BITS; i++)
1122 if (flushing & (1 << i))
1123 ci->i_cap_flush_tid[i] = flush_tid;
1126 keep = cap->implemented;
1128 issue_seq = cap->issue_seq;
1130 size = inode->i_size;
1131 ci->i_reported_size = size;
1132 max_size = ci->i_wanted_max_size;
1133 ci->i_requested_max_size = max_size;
1134 mtime = inode->i_mtime;
1135 atime = inode->i_atime;
1136 time_warp_seq = ci->i_time_warp_seq;
1137 follows = ci->i_snap_realm->cached_context->seq;
1140 mode = inode->i_mode;
1142 if (dropping & CEPH_CAP_XATTR_EXCL) {
1143 __ceph_build_xattrs_blob(ci);
1144 xattr_version = ci->i_xattrs.version + 1;
1147 spin_unlock(&inode->i_lock);
1149 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1150 op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
1151 size, max_size, &mtime, &atime, time_warp_seq,
1154 (flushing & CEPH_CAP_XATTR_EXCL) ? ci->i_xattrs.blob : NULL,
1157 dout("error sending cap msg, must requeue %p\n", inode);
1162 wake_up(&ci->i_cap_wq);
1168 * When a snapshot is taken, clients accumulate dirty metadata on
1169 * inodes with capabilities in ceph_cap_snaps to describe the file
1170 * state at the time the snapshot was taken. This must be flushed
1171 * asynchronously back to the MDS once sync writes complete and dirty
1172 * data is written out.
1174 * Called under i_lock. Takes s_mutex as needed.
1176 void __ceph_flush_snaps(struct ceph_inode_info *ci,
1177 struct ceph_mds_session **psession)
1179 struct inode *inode = &ci->vfs_inode;
1181 struct ceph_cap_snap *capsnap;
1183 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
1184 struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1186 u64 next_follows = 0; /* keep track of how far we've gotten through the
1187 i_cap_snaps list, and skip these entries next time
1188 around to avoid an infinite loop */
1191 session = *psession;
1193 dout("__flush_snaps %p\n", inode);
1195 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1196 /* avoid an infiniute loop after retry */
1197 if (capsnap->follows < next_follows)
1200 * we need to wait for sync writes to complete and for dirty
1201 * pages to be written out.
1203 if (capsnap->dirty_pages || capsnap->writing)
1206 /* pick mds, take s_mutex */
1207 mds = __ceph_get_cap_mds(ci, &mseq);
1208 if (session && session->s_mds != mds) {
1209 dout("oops, wrong session %p mutex\n", session);
1210 mutex_unlock(&session->s_mutex);
1211 ceph_put_mds_session(session);
1215 spin_unlock(&inode->i_lock);
1216 mutex_lock(&mdsc->mutex);
1217 session = __ceph_lookup_mds_session(mdsc, mds);
1218 mutex_unlock(&mdsc->mutex);
1220 dout("inverting session/ino locks on %p\n",
1222 mutex_lock(&session->s_mutex);
1225 * if session == NULL, we raced against a cap
1226 * deletion. retry, and we'll get a better
1227 * @mds value next time.
1229 spin_lock(&inode->i_lock);
1233 capsnap->flush_tid = ++ci->i_cap_flush_last_tid;
1234 atomic_inc(&capsnap->nref);
1235 if (!list_empty(&capsnap->flushing_item))
1236 list_del_init(&capsnap->flushing_item);
1237 list_add_tail(&capsnap->flushing_item,
1238 &session->s_cap_snaps_flushing);
1239 spin_unlock(&inode->i_lock);
1241 dout("flush_snaps %p cap_snap %p follows %lld size %llu\n",
1242 inode, capsnap, next_follows, capsnap->size);
1243 send_cap_msg(session, ceph_vino(inode).ino, 0,
1244 CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1245 capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
1247 &capsnap->mtime, &capsnap->atime,
1248 capsnap->time_warp_seq,
1249 capsnap->uid, capsnap->gid, capsnap->mode,
1253 next_follows = capsnap->follows + 1;
1254 ceph_put_cap_snap(capsnap);
1256 spin_lock(&inode->i_lock);
1260 /* we flushed them all; remove this inode from the queue */
1261 spin_lock(&mdsc->snap_flush_lock);
1262 list_del_init(&ci->i_snap_flush_item);
1263 spin_unlock(&mdsc->snap_flush_lock);
1266 *psession = session;
1268 mutex_unlock(&session->s_mutex);
1269 ceph_put_mds_session(session);
1273 static void ceph_flush_snaps(struct ceph_inode_info *ci)
1275 struct inode *inode = &ci->vfs_inode;
1277 spin_lock(&inode->i_lock);
1278 __ceph_flush_snaps(ci, NULL);
1279 spin_unlock(&inode->i_lock);
1283 * Mark caps dirty. If inode is newly dirty, add to the global dirty
1286 void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1288 struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc;
1289 struct inode *inode = &ci->vfs_inode;
1290 int was = ci->i_dirty_caps;
1293 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1294 ceph_cap_string(mask), ceph_cap_string(was),
1295 ceph_cap_string(was | mask));
1296 ci->i_dirty_caps |= mask;
1298 dout(" inode %p now dirty\n", &ci->vfs_inode);
1299 BUG_ON(!list_empty(&ci->i_dirty_item));
1300 spin_lock(&mdsc->cap_dirty_lock);
1301 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1302 spin_unlock(&mdsc->cap_dirty_lock);
1303 if (ci->i_flushing_caps == 0) {
1305 dirty |= I_DIRTY_SYNC;
1308 BUG_ON(list_empty(&ci->i_dirty_item));
1309 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1310 (mask & CEPH_CAP_FILE_BUFFER))
1311 dirty |= I_DIRTY_DATASYNC;
1313 __mark_inode_dirty(inode, dirty);
1314 __cap_delay_requeue(mdsc, ci);
1318 * Add dirty inode to the flushing list. Assigned a seq number so we
1319 * can wait for caps to flush without starving.
1321 * Called under i_lock.
1323 static int __mark_caps_flushing(struct inode *inode,
1324 struct ceph_mds_session *session)
1326 struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
1327 struct ceph_inode_info *ci = ceph_inode(inode);
1330 BUG_ON(ci->i_dirty_caps == 0);
1331 BUG_ON(list_empty(&ci->i_dirty_item));
1333 flushing = ci->i_dirty_caps;
1334 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1335 ceph_cap_string(flushing),
1336 ceph_cap_string(ci->i_flushing_caps),
1337 ceph_cap_string(ci->i_flushing_caps | flushing));
1338 ci->i_flushing_caps |= flushing;
1339 ci->i_dirty_caps = 0;
1340 dout(" inode %p now !dirty\n", inode);
1342 spin_lock(&mdsc->cap_dirty_lock);
1343 list_del_init(&ci->i_dirty_item);
1345 ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
1346 if (list_empty(&ci->i_flushing_item)) {
1347 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1348 mdsc->num_cap_flushing++;
1349 dout(" inode %p now flushing seq %lld\n", inode,
1350 ci->i_cap_flush_seq);
1352 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1353 dout(" inode %p now flushing (more) seq %lld\n", inode,
1354 ci->i_cap_flush_seq);
1356 spin_unlock(&mdsc->cap_dirty_lock);
1362 * try to invalidate mapping pages without blocking.
1364 static int mapping_is_empty(struct address_space *mapping)
1366 struct page *page = find_get_page(mapping, 0);
1375 static int try_nonblocking_invalidate(struct inode *inode)
1377 struct ceph_inode_info *ci = ceph_inode(inode);
1378 u32 invalidating_gen = ci->i_rdcache_gen;
1380 spin_unlock(&inode->i_lock);
1381 invalidate_mapping_pages(&inode->i_data, 0, -1);
1382 spin_lock(&inode->i_lock);
1384 if (mapping_is_empty(&inode->i_data) &&
1385 invalidating_gen == ci->i_rdcache_gen) {
1387 dout("try_nonblocking_invalidate %p success\n", inode);
1388 ci->i_rdcache_gen = 0;
1389 ci->i_rdcache_revoking = 0;
1392 dout("try_nonblocking_invalidate %p failed\n", inode);
1397 * Swiss army knife function to examine currently used and wanted
1398 * versus held caps. Release, flush, ack revoked caps to mds as
1401 * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1402 * cap release further.
1403 * CHECK_CAPS_AUTHONLY - we should only check the auth cap
1404 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1407 void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1408 struct ceph_mds_session *session)
1410 struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode);
1411 struct ceph_mds_client *mdsc = &client->mdsc;
1412 struct inode *inode = &ci->vfs_inode;
1413 struct ceph_cap *cap;
1414 int file_wanted, used;
1415 int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
1416 int drop_session_lock = session ? 0 : 1;
1417 int issued, implemented, want, retain, revoking, flushing = 0;
1418 int mds = -1; /* keep track of how far we've gone through i_caps list
1419 to avoid an infinite loop on retry */
1421 int tried_invalidate = 0;
1422 int delayed = 0, sent = 0, force_requeue = 0, num;
1423 int queue_invalidate = 0;
1424 int is_delayed = flags & CHECK_CAPS_NODELAY;
1426 /* if we are unmounting, flush any unused caps immediately. */
1430 spin_lock(&inode->i_lock);
1432 if (ci->i_ceph_flags & CEPH_I_FLUSH)
1433 flags |= CHECK_CAPS_FLUSH;
1435 /* flush snaps first time around only */
1436 if (!list_empty(&ci->i_cap_snaps))
1437 __ceph_flush_snaps(ci, &session);
1440 spin_lock(&inode->i_lock);
1442 file_wanted = __ceph_caps_file_wanted(ci);
1443 used = __ceph_caps_used(ci);
1444 want = file_wanted | used;
1445 issued = __ceph_caps_issued(ci, &implemented);
1446 revoking = implemented & ~issued;
1448 retain = want | CEPH_CAP_PIN;
1449 if (!mdsc->stopping && inode->i_nlink > 0) {
1451 retain |= CEPH_CAP_ANY; /* be greedy */
1453 retain |= CEPH_CAP_ANY_SHARED;
1455 * keep RD only if we didn't have the file open RW,
1456 * because then the mds would revoke it anyway to
1457 * journal max_size=0.
1459 if (ci->i_max_size == 0)
1460 retain |= CEPH_CAP_ANY_RD;
1464 dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1465 " issued %s revoking %s retain %s %s%s%s\n", inode,
1466 ceph_cap_string(file_wanted),
1467 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1468 ceph_cap_string(ci->i_flushing_caps),
1469 ceph_cap_string(issued), ceph_cap_string(revoking),
1470 ceph_cap_string(retain),
1471 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1472 (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1473 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1476 * If we no longer need to hold onto old our caps, and we may
1477 * have cached pages, but don't want them, then try to invalidate.
1478 * If we fail, it's because pages are locked.... try again later.
1480 if ((!is_delayed || mdsc->stopping) &&
1481 ci->i_wrbuffer_ref == 0 && /* no dirty pages... */
1482 ci->i_rdcache_gen && /* may have cached pages */
1483 (file_wanted == 0 || /* no open files */
1484 (revoking & CEPH_CAP_FILE_CACHE)) && /* or revoking cache */
1485 !tried_invalidate) {
1486 dout("check_caps trying to invalidate on %p\n", inode);
1487 if (try_nonblocking_invalidate(inode) < 0) {
1488 if (revoking & CEPH_CAP_FILE_CACHE) {
1489 dout("check_caps queuing invalidate\n");
1490 queue_invalidate = 1;
1491 ci->i_rdcache_revoking = ci->i_rdcache_gen;
1493 dout("check_caps failed to invalidate pages\n");
1494 /* we failed to invalidate pages. check these
1495 caps again later. */
1497 __cap_set_timeouts(mdsc, ci);
1500 tried_invalidate = 1;
1505 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1506 cap = rb_entry(p, struct ceph_cap, ci_node);
1509 /* avoid looping forever */
1510 if (mds >= cap->mds ||
1511 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1514 /* NOTE: no side-effects allowed, until we take s_mutex */
1516 revoking = cap->implemented & ~cap->issued;
1518 dout(" mds%d revoking %s\n", cap->mds,
1519 ceph_cap_string(revoking));
1521 if (cap == ci->i_auth_cap &&
1522 (cap->issued & CEPH_CAP_FILE_WR)) {
1523 /* request larger max_size from MDS? */
1524 if (ci->i_wanted_max_size > ci->i_max_size &&
1525 ci->i_wanted_max_size > ci->i_requested_max_size) {
1526 dout("requesting new max_size\n");
1530 /* approaching file_max? */
1531 if ((inode->i_size << 1) >= ci->i_max_size &&
1532 (ci->i_reported_size << 1) < ci->i_max_size) {
1533 dout("i_size approaching max_size\n");
1537 /* flush anything dirty? */
1538 if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
1540 dout("flushing dirty caps\n");
1544 /* completed revocation? going down and there are no caps? */
1545 if (revoking && (revoking & used) == 0) {
1546 dout("completed revocation of %s\n",
1547 ceph_cap_string(cap->implemented & ~cap->issued));
1551 /* want more caps from mds? */
1552 if (want & ~(cap->mds_wanted | cap->issued))
1555 /* things we might delay */
1556 if ((cap->issued & ~retain) == 0 &&
1557 cap->mds_wanted == want)
1558 continue; /* nope, all good */
1564 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1565 time_before(jiffies, ci->i_hold_caps_max)) {
1566 dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1567 ceph_cap_string(cap->issued),
1568 ceph_cap_string(cap->issued & retain),
1569 ceph_cap_string(cap->mds_wanted),
1570 ceph_cap_string(want));
1576 if (session && session != cap->session) {
1577 dout("oops, wrong session %p mutex\n", session);
1578 mutex_unlock(&session->s_mutex);
1582 session = cap->session;
1583 if (mutex_trylock(&session->s_mutex) == 0) {
1584 dout("inverting session/ino locks on %p\n",
1586 spin_unlock(&inode->i_lock);
1587 if (took_snap_rwsem) {
1588 up_read(&mdsc->snap_rwsem);
1589 took_snap_rwsem = 0;
1591 mutex_lock(&session->s_mutex);
1595 /* take snap_rwsem after session mutex */
1596 if (!took_snap_rwsem) {
1597 if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1598 dout("inverting snap/in locks on %p\n",
1600 spin_unlock(&inode->i_lock);
1601 down_read(&mdsc->snap_rwsem);
1602 took_snap_rwsem = 1;
1605 took_snap_rwsem = 1;
1608 if (cap == ci->i_auth_cap && ci->i_dirty_caps)
1609 flushing = __mark_caps_flushing(inode, session);
1611 mds = cap->mds; /* remember mds, so we don't repeat */
1614 /* __send_cap drops i_lock */
1615 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
1616 retain, flushing, NULL);
1617 goto retry; /* retake i_lock and restart our cap scan. */
1621 * Reschedule delayed caps release if we delayed anything,
1624 if (delayed && is_delayed)
1625 force_requeue = 1; /* __send_cap delayed release; requeue */
1626 if (!delayed && !is_delayed)
1627 __cap_delay_cancel(mdsc, ci);
1628 else if (!is_delayed || force_requeue)
1629 __cap_delay_requeue(mdsc, ci);
1631 spin_unlock(&inode->i_lock);
1633 if (queue_invalidate)
1634 ceph_queue_invalidate(inode);
1636 if (session && drop_session_lock)
1637 mutex_unlock(&session->s_mutex);
1638 if (took_snap_rwsem)
1639 up_read(&mdsc->snap_rwsem);
1643 * Try to flush dirty caps back to the auth mds.
1645 static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1646 unsigned *flush_tid)
1648 struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
1649 struct ceph_inode_info *ci = ceph_inode(inode);
1650 int unlock_session = session ? 0 : 1;
1654 spin_lock(&inode->i_lock);
1655 if (ci->i_dirty_caps && ci->i_auth_cap) {
1656 struct ceph_cap *cap = ci->i_auth_cap;
1657 int used = __ceph_caps_used(ci);
1658 int want = __ceph_caps_wanted(ci);
1662 spin_unlock(&inode->i_lock);
1663 session = cap->session;
1664 mutex_lock(&session->s_mutex);
1667 BUG_ON(session != cap->session);
1668 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
1671 flushing = __mark_caps_flushing(inode, session);
1673 /* __send_cap drops i_lock */
1674 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1675 cap->issued | cap->implemented, flushing,
1680 spin_lock(&inode->i_lock);
1681 __cap_delay_requeue(mdsc, ci);
1684 spin_unlock(&inode->i_lock);
1686 if (session && unlock_session)
1687 mutex_unlock(&session->s_mutex);
1692 * Return true if we've flushed caps through the given flush_tid.
1694 static int caps_are_flushed(struct inode *inode, unsigned tid)
1696 struct ceph_inode_info *ci = ceph_inode(inode);
1697 int dirty, i, ret = 1;
1699 spin_lock(&inode->i_lock);
1700 dirty = __ceph_caps_dirty(ci);
1701 for (i = 0; i < CEPH_CAP_BITS; i++)
1702 if ((ci->i_flushing_caps & (1 << i)) &&
1703 ci->i_cap_flush_tid[i] <= tid) {
1704 /* still flushing this bit */
1708 spin_unlock(&inode->i_lock);
1713 * Wait on any unsafe replies for the given inode. First wait on the
1714 * newest request, and make that the upper bound. Then, if there are
1715 * more requests, keep waiting on the oldest as long as it is still older
1716 * than the original request.
1718 static void sync_write_wait(struct inode *inode)
1720 struct ceph_inode_info *ci = ceph_inode(inode);
1721 struct list_head *head = &ci->i_unsafe_writes;
1722 struct ceph_osd_request *req;
1725 spin_lock(&ci->i_unsafe_lock);
1726 if (list_empty(head))
1729 /* set upper bound as _last_ entry in chain */
1730 req = list_entry(head->prev, struct ceph_osd_request,
1732 last_tid = req->r_tid;
1735 ceph_osdc_get_request(req);
1736 spin_unlock(&ci->i_unsafe_lock);
1737 dout("sync_write_wait on tid %llu (until %llu)\n",
1738 req->r_tid, last_tid);
1739 wait_for_completion(&req->r_safe_completion);
1740 spin_lock(&ci->i_unsafe_lock);
1741 ceph_osdc_put_request(req);
1744 * from here on look at first entry in chain, since we
1745 * only want to wait for anything older than last_tid
1747 if (list_empty(head))
1749 req = list_entry(head->next, struct ceph_osd_request,
1751 } while (req->r_tid < last_tid);
1753 spin_unlock(&ci->i_unsafe_lock);
1756 int ceph_fsync(struct file *file, struct dentry *dentry, int datasync)
1758 struct inode *inode = dentry->d_inode;
1759 struct ceph_inode_info *ci = ceph_inode(inode);
1764 dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
1765 sync_write_wait(inode);
1767 ret = filemap_write_and_wait(inode->i_mapping);
1771 dirty = try_flush_caps(inode, NULL, &flush_tid);
1772 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
1775 * only wait on non-file metadata writeback (the mds
1776 * can recover size and mtime, so we don't need to
1779 if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
1780 dout("fsync waiting for flush_tid %u\n", flush_tid);
1781 ret = wait_event_interruptible(ci->i_cap_wq,
1782 caps_are_flushed(inode, flush_tid));
1785 dout("fsync %p%s done\n", inode, datasync ? " datasync" : "");
1790 * Flush any dirty caps back to the mds. If we aren't asked to wait,
1791 * queue inode for flush but don't do so immediately, because we can
1792 * get by with fewer MDS messages if we wait for data writeback to
1795 int ceph_write_inode(struct inode *inode, int wait)
1797 struct ceph_inode_info *ci = ceph_inode(inode);
1802 dout("write_inode %p wait=%d\n", inode, wait);
1804 dirty = try_flush_caps(inode, NULL, &flush_tid);
1806 err = wait_event_interruptible(ci->i_cap_wq,
1807 caps_are_flushed(inode, flush_tid));
1809 struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
1811 spin_lock(&inode->i_lock);
1812 if (__ceph_caps_dirty(ci))
1813 __cap_delay_requeue_front(mdsc, ci);
1814 spin_unlock(&inode->i_lock);
1820 * After a recovering MDS goes active, we need to resend any caps
1823 * Caller holds session->s_mutex.
1825 static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
1826 struct ceph_mds_session *session)
1828 struct ceph_cap_snap *capsnap;
1830 dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
1831 list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
1833 struct ceph_inode_info *ci = capsnap->ci;
1834 struct inode *inode = &ci->vfs_inode;
1835 struct ceph_cap *cap;
1837 spin_lock(&inode->i_lock);
1838 cap = ci->i_auth_cap;
1839 if (cap && cap->session == session) {
1840 dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
1842 __ceph_flush_snaps(ci, &session);
1844 pr_err("%p auth cap %p not mds%d ???\n", inode,
1845 cap, session->s_mds);
1846 spin_unlock(&inode->i_lock);
1851 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
1852 struct ceph_mds_session *session)
1854 struct ceph_inode_info *ci;
1856 kick_flushing_capsnaps(mdsc, session);
1858 dout("kick_flushing_caps mds%d\n", session->s_mds);
1859 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
1860 struct inode *inode = &ci->vfs_inode;
1861 struct ceph_cap *cap;
1864 spin_lock(&inode->i_lock);
1865 cap = ci->i_auth_cap;
1866 if (cap && cap->session == session) {
1867 dout("kick_flushing_caps %p cap %p %s\n", inode,
1868 cap, ceph_cap_string(ci->i_flushing_caps));
1869 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
1870 __ceph_caps_used(ci),
1871 __ceph_caps_wanted(ci),
1872 cap->issued | cap->implemented,
1873 ci->i_flushing_caps, NULL);
1875 spin_lock(&inode->i_lock);
1876 __cap_delay_requeue(mdsc, ci);
1877 spin_unlock(&inode->i_lock);
1880 pr_err("%p auth cap %p not mds%d ???\n", inode,
1881 cap, session->s_mds);
1882 spin_unlock(&inode->i_lock);
1889 * Take references to capabilities we hold, so that we don't release
1890 * them to the MDS prematurely.
1892 * Protected by i_lock.
1894 static void __take_cap_refs(struct ceph_inode_info *ci, int got)
1896 if (got & CEPH_CAP_PIN)
1898 if (got & CEPH_CAP_FILE_RD)
1900 if (got & CEPH_CAP_FILE_CACHE)
1901 ci->i_rdcache_ref++;
1902 if (got & CEPH_CAP_FILE_WR)
1904 if (got & CEPH_CAP_FILE_BUFFER) {
1905 if (ci->i_wrbuffer_ref == 0)
1906 igrab(&ci->vfs_inode);
1907 ci->i_wrbuffer_ref++;
1908 dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n",
1909 &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref);
1914 * Try to grab cap references. Specify those refs we @want, and the
1915 * minimal set we @need. Also include the larger offset we are writing
1916 * to (when applicable), and check against max_size here as well.
1917 * Note that caller is responsible for ensuring max_size increases are
1918 * requested from the MDS.
1920 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
1921 int *got, loff_t endoff, int *check_max, int *err)
1923 struct inode *inode = &ci->vfs_inode;
1925 int have, implemented;
1927 dout("get_cap_refs %p need %s want %s\n", inode,
1928 ceph_cap_string(need), ceph_cap_string(want));
1929 spin_lock(&inode->i_lock);
1931 /* make sure we _have_ some caps! */
1932 if (!__ceph_is_any_caps(ci)) {
1933 dout("get_cap_refs %p no real caps\n", inode);
1939 if (need & CEPH_CAP_FILE_WR) {
1940 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
1941 dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
1942 inode, endoff, ci->i_max_size);
1943 if (endoff > ci->i_wanted_max_size) {
1950 * If a sync write is in progress, we must wait, so that we
1951 * can get a final snapshot value for size+mtime.
1953 if (__ceph_have_pending_cap_snap(ci)) {
1954 dout("get_cap_refs %p cap_snap_pending\n", inode);
1958 have = __ceph_caps_issued(ci, &implemented);
1961 * disallow writes while a truncate is pending
1963 if (ci->i_truncate_pending)
1964 have &= ~CEPH_CAP_FILE_WR;
1966 if ((have & need) == need) {
1968 * Look at (implemented & ~have & not) so that we keep waiting
1969 * on transition from wanted -> needed caps. This is needed
1970 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
1971 * going before a prior buffered writeback happens.
1973 int not = want & ~(have & need);
1974 int revoking = implemented & ~have;
1975 dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
1976 inode, ceph_cap_string(have), ceph_cap_string(not),
1977 ceph_cap_string(revoking));
1978 if ((revoking & not) == 0) {
1979 *got = need | (have & want);
1980 __take_cap_refs(ci, *got);
1984 dout("get_cap_refs %p have %s needed %s\n", inode,
1985 ceph_cap_string(have), ceph_cap_string(need));
1988 spin_unlock(&inode->i_lock);
1989 dout("get_cap_refs %p ret %d got %s\n", inode,
1990 ret, ceph_cap_string(*got));
1995 * Check the offset we are writing up to against our current
1996 * max_size. If necessary, tell the MDS we want to write to
1999 static void check_max_size(struct inode *inode, loff_t endoff)
2001 struct ceph_inode_info *ci = ceph_inode(inode);
2004 /* do we need to explicitly request a larger max_size? */
2005 spin_lock(&inode->i_lock);
2006 if ((endoff >= ci->i_max_size ||
2007 endoff > (inode->i_size << 1)) &&
2008 endoff > ci->i_wanted_max_size) {
2009 dout("write %p at large endoff %llu, req max_size\n",
2011 ci->i_wanted_max_size = endoff;
2014 spin_unlock(&inode->i_lock);
2016 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2020 * Wait for caps, and take cap references. If we can't get a WR cap
2021 * due to a small max_size, make sure we check_max_size (and possibly
2022 * ask the mds) so we don't get hung up indefinitely.
2024 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got,
2027 int check_max, ret, err;
2031 check_max_size(&ci->vfs_inode, endoff);
2034 ret = wait_event_interruptible(ci->i_cap_wq,
2035 try_get_cap_refs(ci, need, want,
2046 * Take cap refs. Caller must already know we hold at least one ref
2047 * on the caps in question or we don't know this is safe.
2049 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2051 spin_lock(&ci->vfs_inode.i_lock);
2052 __take_cap_refs(ci, caps);
2053 spin_unlock(&ci->vfs_inode.i_lock);
2059 * If we released the last ref on any given cap, call ceph_check_caps
2060 * to release (or schedule a release).
2062 * If we are releasing a WR cap (from a sync write), finalize any affected
2063 * cap_snap, and wake up any waiters.
2065 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2067 struct inode *inode = &ci->vfs_inode;
2068 int last = 0, put = 0, flushsnaps = 0, wake = 0;
2069 struct ceph_cap_snap *capsnap;
2071 spin_lock(&inode->i_lock);
2072 if (had & CEPH_CAP_PIN)
2074 if (had & CEPH_CAP_FILE_RD)
2075 if (--ci->i_rd_ref == 0)
2077 if (had & CEPH_CAP_FILE_CACHE)
2078 if (--ci->i_rdcache_ref == 0)
2080 if (had & CEPH_CAP_FILE_BUFFER) {
2081 if (--ci->i_wrbuffer_ref == 0) {
2085 dout("put_cap_refs %p wrbuffer %d -> %d (?)\n",
2086 inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref);
2088 if (had & CEPH_CAP_FILE_WR)
2089 if (--ci->i_wr_ref == 0) {
2091 if (!list_empty(&ci->i_cap_snaps)) {
2092 capsnap = list_first_entry(&ci->i_cap_snaps,
2093 struct ceph_cap_snap,
2095 if (capsnap->writing) {
2096 capsnap->writing = 0;
2098 __ceph_finish_cap_snap(ci,
2104 spin_unlock(&inode->i_lock);
2106 dout("put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had),
2107 last ? "last" : "");
2109 if (last && !flushsnaps)
2110 ceph_check_caps(ci, 0, NULL);
2111 else if (flushsnaps)
2112 ceph_flush_snaps(ci);
2114 wake_up(&ci->i_cap_wq);
2120 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2121 * context. Adjust per-snap dirty page accounting as appropriate.
2122 * Once all dirty data for a cap_snap is flushed, flush snapped file
2123 * metadata back to the MDS. If we dropped the last ref, call
2126 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2127 struct ceph_snap_context *snapc)
2129 struct inode *inode = &ci->vfs_inode;
2133 struct ceph_cap_snap *capsnap = NULL;
2135 spin_lock(&inode->i_lock);
2136 ci->i_wrbuffer_ref -= nr;
2137 last = !ci->i_wrbuffer_ref;
2139 if (ci->i_head_snapc == snapc) {
2140 ci->i_wrbuffer_ref_head -= nr;
2141 if (!ci->i_wrbuffer_ref_head) {
2142 ceph_put_snap_context(ci->i_head_snapc);
2143 ci->i_head_snapc = NULL;
2145 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2147 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2148 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2149 last ? " LAST" : "");
2151 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2152 if (capsnap->context == snapc) {
2154 capsnap->dirty_pages -= nr;
2155 last_snap = !capsnap->dirty_pages;
2160 dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2161 " snap %lld %d/%d -> %d/%d %s%s\n",
2162 inode, capsnap, capsnap->context->seq,
2163 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2164 ci->i_wrbuffer_ref, capsnap->dirty_pages,
2165 last ? " (wrbuffer last)" : "",
2166 last_snap ? " (capsnap last)" : "");
2169 spin_unlock(&inode->i_lock);
2172 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2174 } else if (last_snap) {
2175 ceph_flush_snaps(ci);
2176 wake_up(&ci->i_cap_wq);
2181 * Handle a cap GRANT message from the MDS. (Note that a GRANT may
2182 * actually be a revocation if it specifies a smaller cap set.)
2184 * caller holds s_mutex.
2187 * 1 - check_caps on auth cap only (writeback)
2188 * 2 - check_caps (ack revoke)
2190 static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2191 struct ceph_mds_session *session,
2192 struct ceph_cap *cap,
2193 struct ceph_buffer *xattr_buf)
2194 __releases(inode->i_lock)
2197 struct ceph_inode_info *ci = ceph_inode(inode);
2198 int mds = session->s_mds;
2199 int seq = le32_to_cpu(grant->seq);
2200 int newcaps = le32_to_cpu(grant->caps);
2201 int issued, implemented, used, wanted, dirty;
2202 u64 size = le64_to_cpu(grant->size);
2203 u64 max_size = le64_to_cpu(grant->max_size);
2204 struct timespec mtime, atime, ctime;
2208 int revoked_rdcache = 0;
2209 int queue_invalidate = 0;
2211 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2212 inode, cap, mds, seq, ceph_cap_string(newcaps));
2213 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2217 * If CACHE is being revoked, and we have no dirty buffers,
2218 * try to invalidate (once). (If there are dirty buffers, we
2219 * will invalidate _after_ writeback.)
2221 if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2222 !ci->i_wrbuffer_ref) {
2223 if (try_nonblocking_invalidate(inode) == 0) {
2224 revoked_rdcache = 1;
2226 /* there were locked pages.. invalidate later
2227 in a separate thread. */
2228 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2229 queue_invalidate = 1;
2230 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2235 /* side effects now are allowed */
2237 issued = __ceph_caps_issued(ci, &implemented);
2238 issued |= implemented | __ceph_caps_dirty(ci);
2240 cap->cap_gen = session->s_cap_gen;
2242 __check_cap_issue(ci, cap, newcaps);
2244 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
2245 inode->i_mode = le32_to_cpu(grant->mode);
2246 inode->i_uid = le32_to_cpu(grant->uid);
2247 inode->i_gid = le32_to_cpu(grant->gid);
2248 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2249 inode->i_uid, inode->i_gid);
2252 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
2253 inode->i_nlink = le32_to_cpu(grant->nlink);
2255 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2256 int len = le32_to_cpu(grant->xattr_len);
2257 u64 version = le64_to_cpu(grant->xattr_version);
2259 if (version > ci->i_xattrs.version) {
2260 dout(" got new xattrs v%llu on %p len %d\n",
2261 version, inode, len);
2262 if (ci->i_xattrs.blob)
2263 ceph_buffer_put(ci->i_xattrs.blob);
2264 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2265 ci->i_xattrs.version = version;
2269 /* size/ctime/mtime/atime? */
2270 ceph_fill_file_size(inode, issued,
2271 le32_to_cpu(grant->truncate_seq),
2272 le64_to_cpu(grant->truncate_size), size);
2273 ceph_decode_timespec(&mtime, &grant->mtime);
2274 ceph_decode_timespec(&atime, &grant->atime);
2275 ceph_decode_timespec(&ctime, &grant->ctime);
2276 ceph_fill_file_time(inode, issued,
2277 le32_to_cpu(grant->time_warp_seq), &ctime, &mtime,
2280 /* max size increase? */
2281 if (max_size != ci->i_max_size) {
2282 dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
2283 ci->i_max_size = max_size;
2284 if (max_size >= ci->i_wanted_max_size) {
2285 ci->i_wanted_max_size = 0; /* reset */
2286 ci->i_requested_max_size = 0;
2291 /* check cap bits */
2292 wanted = __ceph_caps_wanted(ci);
2293 used = __ceph_caps_used(ci);
2294 dirty = __ceph_caps_dirty(ci);
2295 dout(" my wanted = %s, used = %s, dirty %s\n",
2296 ceph_cap_string(wanted),
2297 ceph_cap_string(used),
2298 ceph_cap_string(dirty));
2299 if (wanted != le32_to_cpu(grant->wanted)) {
2300 dout("mds wanted %s -> %s\n",
2301 ceph_cap_string(le32_to_cpu(grant->wanted)),
2302 ceph_cap_string(wanted));
2303 grant->wanted = cpu_to_le32(wanted);
2308 /* file layout may have changed */
2309 ci->i_layout = grant->layout;
2311 /* revocation, grant, or no-op? */
2312 if (cap->issued & ~newcaps) {
2313 dout("revocation: %s -> %s\n", ceph_cap_string(cap->issued),
2314 ceph_cap_string(newcaps));
2315 if ((used & ~newcaps) & CEPH_CAP_FILE_BUFFER)
2316 writeback = 1; /* will delay ack */
2317 else if (dirty & ~newcaps)
2318 reply = 1; /* initiate writeback in check_caps */
2319 else if (((used & ~newcaps) & CEPH_CAP_FILE_CACHE) == 0 ||
2321 reply = 2; /* send revoke ack in check_caps */
2322 cap->issued = newcaps;
2323 } else if (cap->issued == newcaps) {
2324 dout("caps unchanged: %s -> %s\n",
2325 ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
2327 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
2328 ceph_cap_string(newcaps));
2329 cap->issued = newcaps;
2330 cap->implemented |= newcaps; /* add bits only, to
2331 * avoid stepping on a
2332 * pending revocation */
2336 spin_unlock(&inode->i_lock);
2339 * queue inode for writeback: we can't actually call
2340 * filemap_write_and_wait, etc. from message handler
2343 ceph_queue_writeback(inode);
2344 if (queue_invalidate)
2345 ceph_queue_invalidate(inode);
2347 wake_up(&ci->i_cap_wq);
2352 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
2353 * MDS has been safely committed.
2355 static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2356 struct ceph_mds_caps *m,
2357 struct ceph_mds_session *session,
2358 struct ceph_cap *cap)
2359 __releases(inode->i_lock)
2361 struct ceph_inode_info *ci = ceph_inode(inode);
2362 struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
2363 unsigned seq = le32_to_cpu(m->seq);
2364 int dirty = le32_to_cpu(m->dirty);
2369 for (i = 0; i < CEPH_CAP_BITS; i++)
2370 if ((dirty & (1 << i)) &&
2371 flush_tid == ci->i_cap_flush_tid[i])
2374 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
2375 " flushing %s -> %s\n",
2376 inode, session->s_mds, seq, ceph_cap_string(dirty),
2377 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
2378 ceph_cap_string(ci->i_flushing_caps & ~cleaned));
2380 if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned))
2383 ci->i_flushing_caps &= ~cleaned;
2385 spin_lock(&mdsc->cap_dirty_lock);
2386 if (ci->i_flushing_caps == 0) {
2387 list_del_init(&ci->i_flushing_item);
2388 if (!list_empty(&session->s_cap_flushing))
2389 dout(" mds%d still flushing cap on %p\n",
2391 &list_entry(session->s_cap_flushing.next,
2392 struct ceph_inode_info,
2393 i_flushing_item)->vfs_inode);
2394 mdsc->num_cap_flushing--;
2395 wake_up(&mdsc->cap_flushing_wq);
2396 dout(" inode %p now !flushing\n", inode);
2398 if (ci->i_dirty_caps == 0) {
2399 dout(" inode %p now clean\n", inode);
2400 BUG_ON(!list_empty(&ci->i_dirty_item));
2403 BUG_ON(list_empty(&ci->i_dirty_item));
2406 spin_unlock(&mdsc->cap_dirty_lock);
2407 wake_up(&ci->i_cap_wq);
2410 spin_unlock(&inode->i_lock);
2416 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
2417 * throw away our cap_snap.
2419 * Caller hold s_mutex.
2421 static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
2422 struct ceph_mds_caps *m,
2423 struct ceph_mds_session *session)
2425 struct ceph_inode_info *ci = ceph_inode(inode);
2426 u64 follows = le64_to_cpu(m->snap_follows);
2427 struct ceph_cap_snap *capsnap;
2430 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
2431 inode, ci, session->s_mds, follows);
2433 spin_lock(&inode->i_lock);
2434 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2435 if (capsnap->follows == follows) {
2436 if (capsnap->flush_tid != flush_tid) {
2437 dout(" cap_snap %p follows %lld tid %lld !="
2438 " %lld\n", capsnap, follows,
2439 flush_tid, capsnap->flush_tid);
2442 WARN_ON(capsnap->dirty_pages || capsnap->writing);
2443 dout(" removing cap_snap %p follows %lld\n",
2445 ceph_put_snap_context(capsnap->context);
2446 list_del(&capsnap->ci_item);
2447 list_del(&capsnap->flushing_item);
2448 ceph_put_cap_snap(capsnap);
2452 dout(" skipping cap_snap %p follows %lld\n",
2453 capsnap, capsnap->follows);
2456 spin_unlock(&inode->i_lock);
2462 * Handle TRUNC from MDS, indicating file truncation.
2464 * caller hold s_mutex.
2466 static void handle_cap_trunc(struct inode *inode,
2467 struct ceph_mds_caps *trunc,
2468 struct ceph_mds_session *session)
2469 __releases(inode->i_lock)
2471 struct ceph_inode_info *ci = ceph_inode(inode);
2472 int mds = session->s_mds;
2473 int seq = le32_to_cpu(trunc->seq);
2474 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
2475 u64 truncate_size = le64_to_cpu(trunc->truncate_size);
2476 u64 size = le64_to_cpu(trunc->size);
2477 int implemented = 0;
2478 int dirty = __ceph_caps_dirty(ci);
2479 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
2480 int queue_trunc = 0;
2482 issued |= implemented | dirty;
2484 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
2485 inode, mds, seq, truncate_size, truncate_seq);
2486 queue_trunc = ceph_fill_file_size(inode, issued,
2487 truncate_seq, truncate_size, size);
2488 spin_unlock(&inode->i_lock);
2491 ceph_queue_vmtruncate(inode);
2495 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a
2496 * different one. If we are the most recent migration we've seen (as
2497 * indicated by mseq), make note of the migrating cap bits for the
2498 * duration (until we see the corresponding IMPORT).
2500 * caller holds s_mutex
2502 static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
2503 struct ceph_mds_session *session)
2505 struct ceph_inode_info *ci = ceph_inode(inode);
2506 int mds = session->s_mds;
2507 unsigned mseq = le32_to_cpu(ex->migrate_seq);
2508 struct ceph_cap *cap = NULL, *t;
2512 dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
2513 inode, ci, mds, mseq);
2515 spin_lock(&inode->i_lock);
2517 /* make sure we haven't seen a higher mseq */
2518 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
2519 t = rb_entry(p, struct ceph_cap, ci_node);
2520 if (ceph_seq_cmp(t->mseq, mseq) > 0) {
2521 dout(" higher mseq on cap from mds%d\n",
2525 if (t->session->s_mds == mds)
2532 ci->i_cap_exporting_mds = mds;
2533 ci->i_cap_exporting_mseq = mseq;
2534 ci->i_cap_exporting_issued = cap->issued;
2536 __ceph_remove_cap(cap);
2541 spin_unlock(&inode->i_lock);
2545 * Handle cap IMPORT. If there are temp bits from an older EXPORT,
2548 * caller holds s_mutex.
2550 static void handle_cap_import(struct ceph_mds_client *mdsc,
2551 struct inode *inode, struct ceph_mds_caps *im,
2552 struct ceph_mds_session *session,
2553 void *snaptrace, int snaptrace_len)
2555 struct ceph_inode_info *ci = ceph_inode(inode);
2556 int mds = session->s_mds;
2557 unsigned issued = le32_to_cpu(im->caps);
2558 unsigned wanted = le32_to_cpu(im->wanted);
2559 unsigned seq = le32_to_cpu(im->seq);
2560 unsigned mseq = le32_to_cpu(im->migrate_seq);
2561 u64 realmino = le64_to_cpu(im->realm);
2562 u64 cap_id = le64_to_cpu(im->cap_id);
2564 if (ci->i_cap_exporting_mds >= 0 &&
2565 ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) {
2566 dout("handle_cap_import inode %p ci %p mds%d mseq %d"
2567 " - cleared exporting from mds%d\n",
2568 inode, ci, mds, mseq,
2569 ci->i_cap_exporting_mds);
2570 ci->i_cap_exporting_issued = 0;
2571 ci->i_cap_exporting_mseq = 0;
2572 ci->i_cap_exporting_mds = -1;
2574 dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
2575 inode, ci, mds, mseq);
2578 down_write(&mdsc->snap_rwsem);
2579 ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len,
2581 downgrade_write(&mdsc->snap_rwsem);
2582 ceph_add_cap(inode, session, cap_id, -1,
2583 issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
2584 NULL /* no caps context */);
2585 try_flush_caps(inode, session, NULL);
2586 up_read(&mdsc->snap_rwsem);
2590 * Handle a caps message from the MDS.
2592 * Identify the appropriate session, inode, and call the right handler
2593 * based on the cap op.
2595 void ceph_handle_caps(struct ceph_mds_session *session,
2596 struct ceph_msg *msg)
2598 struct ceph_mds_client *mdsc = session->s_mdsc;
2599 struct super_block *sb = mdsc->client->sb;
2600 struct inode *inode;
2601 struct ceph_cap *cap;
2602 struct ceph_mds_caps *h;
2603 int mds = session->s_mds;
2606 struct ceph_vino vino;
2614 dout("handle_caps from mds%d\n", mds);
2617 tid = le64_to_cpu(msg->hdr.tid);
2618 if (msg->front.iov_len < sizeof(*h))
2620 h = msg->front.iov_base;
2622 op = le32_to_cpu(h->op);
2623 vino.ino = le64_to_cpu(h->ino);
2624 vino.snap = CEPH_NOSNAP;
2625 cap_id = le64_to_cpu(h->cap_id);
2626 seq = le32_to_cpu(h->seq);
2627 size = le64_to_cpu(h->size);
2628 max_size = le64_to_cpu(h->max_size);
2630 mutex_lock(&session->s_mutex);
2632 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
2636 inode = ceph_find_inode(sb, vino);
2637 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
2640 dout(" i don't have ino %llx\n", vino.ino);
2644 /* these will work even if we don't have a cap yet */
2646 case CEPH_CAP_OP_FLUSHSNAP_ACK:
2647 handle_cap_flushsnap_ack(inode, tid, h, session);
2650 case CEPH_CAP_OP_EXPORT:
2651 handle_cap_export(inode, h, session);
2654 case CEPH_CAP_OP_IMPORT:
2655 handle_cap_import(mdsc, inode, h, session,
2656 snaptrace, le32_to_cpu(h->snap_trace_len));
2657 check_caps = 1; /* we may have sent a RELEASE to the old auth */
2661 /* the rest require a cap */
2662 spin_lock(&inode->i_lock);
2663 cap = __get_cap_for_mds(ceph_inode(inode), mds);
2665 dout("no cap on %p ino %llx.%llx from mds%d, releasing\n",
2666 inode, ceph_ino(inode), ceph_snap(inode), mds);
2667 spin_unlock(&inode->i_lock);
2671 /* note that each of these drops i_lock for us */
2673 case CEPH_CAP_OP_REVOKE:
2674 case CEPH_CAP_OP_GRANT:
2675 r = handle_cap_grant(inode, h, session, cap, msg->middle);
2677 ceph_check_caps(ceph_inode(inode),
2678 CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
2681 ceph_check_caps(ceph_inode(inode),
2686 case CEPH_CAP_OP_FLUSH_ACK:
2687 handle_cap_flush_ack(inode, tid, h, session, cap);
2690 case CEPH_CAP_OP_TRUNC:
2691 handle_cap_trunc(inode, h, session);
2695 spin_unlock(&inode->i_lock);
2696 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
2697 ceph_cap_op_name(op));
2701 mutex_unlock(&session->s_mutex);
2704 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY, NULL);
2710 pr_err("ceph_handle_caps: corrupt message\n");
2716 * Delayed work handler to process end of delayed cap release LRU list.
2718 void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
2720 struct ceph_inode_info *ci;
2721 int flags = CHECK_CAPS_NODELAY;
2723 dout("check_delayed_caps\n");
2725 spin_lock(&mdsc->cap_delay_lock);
2726 if (list_empty(&mdsc->cap_delay_list))
2728 ci = list_first_entry(&mdsc->cap_delay_list,
2729 struct ceph_inode_info,
2731 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
2732 time_before(jiffies, ci->i_hold_caps_max))
2734 list_del_init(&ci->i_cap_delay_list);
2735 spin_unlock(&mdsc->cap_delay_lock);
2736 dout("check_delayed_caps on %p\n", &ci->vfs_inode);
2737 ceph_check_caps(ci, flags, NULL);
2739 spin_unlock(&mdsc->cap_delay_lock);
2743 * Flush all dirty caps to the mds
2745 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2747 struct ceph_inode_info *ci;
2748 struct inode *inode;
2750 dout("flush_dirty_caps\n");
2751 spin_lock(&mdsc->cap_dirty_lock);
2752 while (!list_empty(&mdsc->cap_dirty)) {
2753 ci = list_first_entry(&mdsc->cap_dirty,
2754 struct ceph_inode_info,
2756 inode = igrab(&ci->vfs_inode);
2757 spin_unlock(&mdsc->cap_dirty_lock);
2759 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH,
2763 spin_lock(&mdsc->cap_dirty_lock);
2765 spin_unlock(&mdsc->cap_dirty_lock);
2769 * Drop open file reference. If we were the last open file,
2770 * we may need to release capabilities to the MDS (or schedule
2771 * their delayed release).
2773 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
2775 struct inode *inode = &ci->vfs_inode;
2778 spin_lock(&inode->i_lock);
2779 dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
2780 ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
2781 BUG_ON(ci->i_nr_by_mode[fmode] == 0);
2782 if (--ci->i_nr_by_mode[fmode] == 0)
2784 spin_unlock(&inode->i_lock);
2786 if (last && ci->i_vino.snap == CEPH_NOSNAP)
2787 ceph_check_caps(ci, 0, NULL);
2791 * Helpers for embedding cap and dentry lease releases into mds
2794 * @force is used by dentry_release (below) to force inclusion of a
2795 * record for the directory inode, even when there aren't any caps to
2798 int ceph_encode_inode_release(void **p, struct inode *inode,
2799 int mds, int drop, int unless, int force)
2801 struct ceph_inode_info *ci = ceph_inode(inode);
2802 struct ceph_cap *cap;
2803 struct ceph_mds_request_release *rel = *p;
2806 dout("encode_inode_release %p mds%d drop %s unless %s\n", inode,
2807 mds, ceph_cap_string(drop), ceph_cap_string(unless));
2809 spin_lock(&inode->i_lock);
2810 cap = __get_cap_for_mds(ci, mds);
2811 if (cap && __cap_is_valid(cap)) {
2813 ((cap->issued & drop) &&
2814 (cap->issued & unless) == 0)) {
2815 if ((cap->issued & drop) &&
2816 (cap->issued & unless) == 0) {
2817 dout("encode_inode_release %p cap %p %s -> "
2819 ceph_cap_string(cap->issued),
2820 ceph_cap_string(cap->issued & ~drop));
2821 cap->issued &= ~drop;
2822 cap->implemented &= ~drop;
2823 if (ci->i_ceph_flags & CEPH_I_NODELAY) {
2824 int wanted = __ceph_caps_wanted(ci);
2825 dout(" wanted %s -> %s (act %s)\n",
2826 ceph_cap_string(cap->mds_wanted),
2827 ceph_cap_string(cap->mds_wanted &
2829 ceph_cap_string(wanted));
2830 cap->mds_wanted &= wanted;
2833 dout("encode_inode_release %p cap %p %s"
2834 " (force)\n", inode, cap,
2835 ceph_cap_string(cap->issued));
2838 rel->ino = cpu_to_le64(ceph_ino(inode));
2839 rel->cap_id = cpu_to_le64(cap->cap_id);
2840 rel->seq = cpu_to_le32(cap->seq);
2841 rel->issue_seq = cpu_to_le32(cap->issue_seq),
2842 rel->mseq = cpu_to_le32(cap->mseq);
2843 rel->caps = cpu_to_le32(cap->issued);
2844 rel->wanted = cpu_to_le32(cap->mds_wanted);
2850 dout("encode_inode_release %p cap %p %s\n",
2851 inode, cap, ceph_cap_string(cap->issued));
2854 spin_unlock(&inode->i_lock);
2858 int ceph_encode_dentry_release(void **p, struct dentry *dentry,
2859 int mds, int drop, int unless)
2861 struct inode *dir = dentry->d_parent->d_inode;
2862 struct ceph_mds_request_release *rel = *p;
2863 struct ceph_dentry_info *di = ceph_dentry(dentry);
2868 * force an record for the directory caps if we have a dentry lease.
2869 * this is racy (can't take i_lock and d_lock together), but it
2870 * doesn't have to be perfect; the mds will revoke anything we don't
2873 spin_lock(&dentry->d_lock);
2874 if (di->lease_session && di->lease_session->s_mds == mds)
2876 spin_unlock(&dentry->d_lock);
2878 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
2880 spin_lock(&dentry->d_lock);
2881 if (ret && di->lease_session && di->lease_session->s_mds == mds) {
2882 dout("encode_dentry_release %p mds%d seq %d\n",
2883 dentry, mds, (int)di->lease_seq);
2884 rel->dname_len = cpu_to_le32(dentry->d_name.len);
2885 memcpy(*p, dentry->d_name.name, dentry->d_name.len);
2886 *p += dentry->d_name.len;
2887 rel->dname_seq = cpu_to_le32(di->lease_seq);
2889 spin_unlock(&dentry->d_lock);