ceph: fix version check on racing inode updates
[linux-2.6-block.git] / fs / ceph / inode.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
355da1eb
SW
2
3#include <linux/module.h>
4#include <linux/fs.h>
5#include <linux/smp_lock.h>
6#include <linux/slab.h>
7#include <linux/string.h>
8#include <linux/uaccess.h>
9#include <linux/kernel.h>
10#include <linux/namei.h>
11#include <linux/writeback.h>
12#include <linux/vmalloc.h>
c9af9fb6 13#include <linux/pagevec.h>
355da1eb
SW
14
15#include "super.h"
3d14c5d2
YS
16#include "mds_client.h"
17#include <linux/ceph/decode.h>
355da1eb
SW
18
19/*
20 * Ceph inode operations
21 *
22 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
23 * setattr, etc.), xattr helpers, and helpers for assimilating
24 * metadata returned by the MDS into our cache.
25 *
26 * Also define helpers for doing asynchronous writeback, invalidation,
27 * and truncation for the benefit of those who can't afford to block
28 * (typically because they are in the message handler path).
29 */
30
31static const struct inode_operations ceph_symlink_iops;
32
3c6f6b79
SW
33static void ceph_invalidate_work(struct work_struct *work);
34static void ceph_writeback_work(struct work_struct *work);
35static void ceph_vmtruncate_work(struct work_struct *work);
355da1eb
SW
36
37/*
38 * find or create an inode, given the ceph ino number
39 */
40struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
41{
42 struct inode *inode;
43 ino_t t = ceph_vino_to_ino(vino);
44
45 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
46 if (inode == NULL)
47 return ERR_PTR(-ENOMEM);
48 if (inode->i_state & I_NEW) {
49 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
50 inode, ceph_vinop(inode), (u64)inode->i_ino);
51 unlock_new_inode(inode);
52 }
53
54 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
55 vino.snap, inode);
56 return inode;
57}
58
59/*
60 * get/constuct snapdir inode for a given directory
61 */
62struct inode *ceph_get_snapdir(struct inode *parent)
63{
64 struct ceph_vino vino = {
65 .ino = ceph_ino(parent),
66 .snap = CEPH_SNAPDIR,
67 };
68 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
b377ff13 69 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
70
71 BUG_ON(!S_ISDIR(parent->i_mode));
72 if (IS_ERR(inode))
7e34bc52 73 return inode;
355da1eb
SW
74 inode->i_mode = parent->i_mode;
75 inode->i_uid = parent->i_uid;
76 inode->i_gid = parent->i_gid;
77 inode->i_op = &ceph_dir_iops;
78 inode->i_fop = &ceph_dir_fops;
b377ff13
SW
79 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
80 ci->i_rbytes = 0;
355da1eb
SW
81 return inode;
82}
83
84const struct inode_operations ceph_file_iops = {
85 .permission = ceph_permission,
86 .setattr = ceph_setattr,
87 .getattr = ceph_getattr,
88 .setxattr = ceph_setxattr,
89 .getxattr = ceph_getxattr,
90 .listxattr = ceph_listxattr,
91 .removexattr = ceph_removexattr,
92};
93
94
95/*
96 * We use a 'frag tree' to keep track of the MDS's directory fragments
97 * for a given inode (usually there is just a single fragment). We
98 * need to know when a child frag is delegated to a new MDS, or when
99 * it is flagged as replicated, so we can direct our requests
100 * accordingly.
101 */
102
103/*
104 * find/create a frag in the tree
105 */
106static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
107 u32 f)
108{
109 struct rb_node **p;
110 struct rb_node *parent = NULL;
111 struct ceph_inode_frag *frag;
112 int c;
113
114 p = &ci->i_fragtree.rb_node;
115 while (*p) {
116 parent = *p;
117 frag = rb_entry(parent, struct ceph_inode_frag, node);
118 c = ceph_frag_compare(f, frag->frag);
119 if (c < 0)
120 p = &(*p)->rb_left;
121 else if (c > 0)
122 p = &(*p)->rb_right;
123 else
124 return frag;
125 }
126
127 frag = kmalloc(sizeof(*frag), GFP_NOFS);
128 if (!frag) {
129 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
130 "frag %x\n", &ci->vfs_inode,
131 ceph_vinop(&ci->vfs_inode), f);
132 return ERR_PTR(-ENOMEM);
133 }
134 frag->frag = f;
135 frag->split_by = 0;
136 frag->mds = -1;
137 frag->ndist = 0;
138
139 rb_link_node(&frag->node, parent, p);
140 rb_insert_color(&frag->node, &ci->i_fragtree);
141
142 dout("get_or_create_frag added %llx.%llx frag %x\n",
143 ceph_vinop(&ci->vfs_inode), f);
144 return frag;
145}
146
147/*
148 * find a specific frag @f
149 */
150struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
151{
152 struct rb_node *n = ci->i_fragtree.rb_node;
153
154 while (n) {
155 struct ceph_inode_frag *frag =
156 rb_entry(n, struct ceph_inode_frag, node);
157 int c = ceph_frag_compare(f, frag->frag);
158 if (c < 0)
159 n = n->rb_left;
160 else if (c > 0)
161 n = n->rb_right;
162 else
163 return frag;
164 }
165 return NULL;
166}
167
168/*
169 * Choose frag containing the given value @v. If @pfrag is
170 * specified, copy the frag delegation info to the caller if
171 * it is present.
172 */
173u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
174 struct ceph_inode_frag *pfrag,
175 int *found)
176{
177 u32 t = ceph_frag_make(0, 0);
178 struct ceph_inode_frag *frag;
179 unsigned nway, i;
180 u32 n;
181
182 if (found)
183 *found = 0;
184
185 mutex_lock(&ci->i_fragtree_mutex);
186 while (1) {
187 WARN_ON(!ceph_frag_contains_value(t, v));
188 frag = __ceph_find_frag(ci, t);
189 if (!frag)
190 break; /* t is a leaf */
191 if (frag->split_by == 0) {
192 if (pfrag)
193 memcpy(pfrag, frag, sizeof(*pfrag));
194 if (found)
195 *found = 1;
196 break;
197 }
198
199 /* choose child */
200 nway = 1 << frag->split_by;
201 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
202 frag->split_by, nway);
203 for (i = 0; i < nway; i++) {
204 n = ceph_frag_make_child(t, frag->split_by, i);
205 if (ceph_frag_contains_value(n, v)) {
206 t = n;
207 break;
208 }
209 }
210 BUG_ON(i == nway);
211 }
212 dout("choose_frag(%x) = %x\n", v, t);
213
214 mutex_unlock(&ci->i_fragtree_mutex);
215 return t;
216}
217
218/*
219 * Process dirfrag (delegation) info from the mds. Include leaf
220 * fragment in tree ONLY if ndist > 0. Otherwise, only
221 * branches/splits are included in i_fragtree)
222 */
223static int ceph_fill_dirfrag(struct inode *inode,
224 struct ceph_mds_reply_dirfrag *dirinfo)
225{
226 struct ceph_inode_info *ci = ceph_inode(inode);
227 struct ceph_inode_frag *frag;
228 u32 id = le32_to_cpu(dirinfo->frag);
229 int mds = le32_to_cpu(dirinfo->auth);
230 int ndist = le32_to_cpu(dirinfo->ndist);
231 int i;
232 int err = 0;
233
234 mutex_lock(&ci->i_fragtree_mutex);
235 if (ndist == 0) {
236 /* no delegation info needed. */
237 frag = __ceph_find_frag(ci, id);
238 if (!frag)
239 goto out;
240 if (frag->split_by == 0) {
241 /* tree leaf, remove */
242 dout("fill_dirfrag removed %llx.%llx frag %x"
243 " (no ref)\n", ceph_vinop(inode), id);
244 rb_erase(&frag->node, &ci->i_fragtree);
245 kfree(frag);
246 } else {
247 /* tree branch, keep and clear */
248 dout("fill_dirfrag cleared %llx.%llx frag %x"
249 " referral\n", ceph_vinop(inode), id);
250 frag->mds = -1;
251 frag->ndist = 0;
252 }
253 goto out;
254 }
255
256
257 /* find/add this frag to store mds delegation info */
258 frag = __get_or_create_frag(ci, id);
259 if (IS_ERR(frag)) {
260 /* this is not the end of the world; we can continue
261 with bad/inaccurate delegation info */
262 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
263 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
264 err = -ENOMEM;
265 goto out;
266 }
267
268 frag->mds = mds;
269 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
270 for (i = 0; i < frag->ndist; i++)
271 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
272 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
273 ceph_vinop(inode), frag->frag, frag->ndist);
274
275out:
276 mutex_unlock(&ci->i_fragtree_mutex);
277 return err;
278}
279
280
281/*
282 * initialize a newly allocated inode.
283 */
284struct inode *ceph_alloc_inode(struct super_block *sb)
285{
286 struct ceph_inode_info *ci;
287 int i;
288
289 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
290 if (!ci)
291 return NULL;
292
293 dout("alloc_inode %p\n", &ci->vfs_inode);
294
295 ci->i_version = 0;
296 ci->i_time_warp_seq = 0;
297 ci->i_ceph_flags = 0;
298 ci->i_release_count = 0;
299 ci->i_symlink = NULL;
300
301 ci->i_fragtree = RB_ROOT;
302 mutex_init(&ci->i_fragtree_mutex);
303
304 ci->i_xattrs.blob = NULL;
305 ci->i_xattrs.prealloc_blob = NULL;
306 ci->i_xattrs.dirty = false;
307 ci->i_xattrs.index = RB_ROOT;
308 ci->i_xattrs.count = 0;
309 ci->i_xattrs.names_size = 0;
310 ci->i_xattrs.vals_size = 0;
311 ci->i_xattrs.version = 0;
312 ci->i_xattrs.index_version = 0;
313
314 ci->i_caps = RB_ROOT;
315 ci->i_auth_cap = NULL;
316 ci->i_dirty_caps = 0;
317 ci->i_flushing_caps = 0;
318 INIT_LIST_HEAD(&ci->i_dirty_item);
319 INIT_LIST_HEAD(&ci->i_flushing_item);
320 ci->i_cap_flush_seq = 0;
321 ci->i_cap_flush_last_tid = 0;
322 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
323 init_waitqueue_head(&ci->i_cap_wq);
324 ci->i_hold_caps_min = 0;
325 ci->i_hold_caps_max = 0;
326 INIT_LIST_HEAD(&ci->i_cap_delay_list);
327 ci->i_cap_exporting_mds = 0;
328 ci->i_cap_exporting_mseq = 0;
329 ci->i_cap_exporting_issued = 0;
330 INIT_LIST_HEAD(&ci->i_cap_snaps);
331 ci->i_head_snapc = NULL;
332 ci->i_snap_caps = 0;
333
334 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
335 ci->i_nr_by_mode[i] = 0;
336
337 ci->i_truncate_seq = 0;
338 ci->i_truncate_size = 0;
339 ci->i_truncate_pending = 0;
340
341 ci->i_max_size = 0;
342 ci->i_reported_size = 0;
343 ci->i_wanted_max_size = 0;
344 ci->i_requested_max_size = 0;
345
346 ci->i_pin_ref = 0;
347 ci->i_rd_ref = 0;
348 ci->i_rdcache_ref = 0;
349 ci->i_wr_ref = 0;
350 ci->i_wrbuffer_ref = 0;
351 ci->i_wrbuffer_ref_head = 0;
352 ci->i_shared_gen = 0;
353 ci->i_rdcache_gen = 0;
354 ci->i_rdcache_revoking = 0;
355
356 INIT_LIST_HEAD(&ci->i_unsafe_writes);
357 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
358 spin_lock_init(&ci->i_unsafe_lock);
359
360 ci->i_snap_realm = NULL;
361 INIT_LIST_HEAD(&ci->i_snap_realm_item);
362 INIT_LIST_HEAD(&ci->i_snap_flush_item);
363
3c6f6b79
SW
364 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
365 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
355da1eb
SW
366
367 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
368
369 return &ci->vfs_inode;
370}
371
372void ceph_destroy_inode(struct inode *inode)
373{
374 struct ceph_inode_info *ci = ceph_inode(inode);
375 struct ceph_inode_frag *frag;
376 struct rb_node *n;
377
378 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
379
380 ceph_queue_caps_release(inode);
381
8b218b8a
SW
382 /*
383 * we may still have a snap_realm reference if there are stray
384 * caps in i_cap_exporting_issued or i_snap_caps.
385 */
386 if (ci->i_snap_realm) {
387 struct ceph_mds_client *mdsc =
3d14c5d2 388 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
8b218b8a
SW
389 struct ceph_snap_realm *realm = ci->i_snap_realm;
390
391 dout(" dropping residual ref to snap realm %p\n", realm);
392 spin_lock(&realm->inodes_with_caps_lock);
393 list_del_init(&ci->i_snap_realm_item);
394 spin_unlock(&realm->inodes_with_caps_lock);
395 ceph_put_snap_realm(mdsc, realm);
396 }
397
355da1eb
SW
398 kfree(ci->i_symlink);
399 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
400 frag = rb_entry(n, struct ceph_inode_frag, node);
401 rb_erase(n, &ci->i_fragtree);
402 kfree(frag);
403 }
404
405 __ceph_destroy_xattrs(ci);
b6c1d5b8
SW
406 if (ci->i_xattrs.blob)
407 ceph_buffer_put(ci->i_xattrs.blob);
408 if (ci->i_xattrs.prealloc_blob)
409 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
355da1eb
SW
410
411 kmem_cache_free(ceph_inode_cachep, ci);
412}
413
414
415/*
416 * Helpers to fill in size, ctime, mtime, and atime. We have to be
417 * careful because either the client or MDS may have more up to date
418 * info, depending on which capabilities are held, and whether
419 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
420 * and size are monotonically increasing, except when utimes() or
421 * truncate() increments the corresponding _seq values.)
422 */
423int ceph_fill_file_size(struct inode *inode, int issued,
424 u32 truncate_seq, u64 truncate_size, u64 size)
425{
426 struct ceph_inode_info *ci = ceph_inode(inode);
427 int queue_trunc = 0;
428
429 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
430 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
431 dout("size %lld -> %llu\n", inode->i_size, size);
432 inode->i_size = size;
433 inode->i_blocks = (size + (1<<9) - 1) >> 9;
434 ci->i_reported_size = size;
435 if (truncate_seq != ci->i_truncate_seq) {
436 dout("truncate_seq %u -> %u\n",
437 ci->i_truncate_seq, truncate_seq);
438 ci->i_truncate_seq = truncate_seq;
3d497d85
YS
439 /*
440 * If we hold relevant caps, or in the case where we're
441 * not the only client referencing this file and we
442 * don't hold those caps, then we need to check whether
443 * the file is either opened or mmaped
444 */
445 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
2962507c
SW
446 CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
447 CEPH_CAP_FILE_EXCL|
448 CEPH_CAP_FILE_LAZYIO)) ||
3d497d85
YS
449 mapping_mapped(inode->i_mapping) ||
450 __ceph_caps_file_wanted(ci)) {
355da1eb
SW
451 ci->i_truncate_pending++;
452 queue_trunc = 1;
453 }
454 }
455 }
456 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
457 ci->i_truncate_size != truncate_size) {
458 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
459 truncate_size);
460 ci->i_truncate_size = truncate_size;
461 }
462 return queue_trunc;
463}
464
465void ceph_fill_file_time(struct inode *inode, int issued,
466 u64 time_warp_seq, struct timespec *ctime,
467 struct timespec *mtime, struct timespec *atime)
468{
469 struct ceph_inode_info *ci = ceph_inode(inode);
470 int warn = 0;
471
472 if (issued & (CEPH_CAP_FILE_EXCL|
473 CEPH_CAP_FILE_WR|
474 CEPH_CAP_FILE_BUFFER)) {
475 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
476 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
477 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
478 ctime->tv_sec, ctime->tv_nsec);
479 inode->i_ctime = *ctime;
480 }
481 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
482 /* the MDS did a utimes() */
483 dout("mtime %ld.%09ld -> %ld.%09ld "
484 "tw %d -> %d\n",
485 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
486 mtime->tv_sec, mtime->tv_nsec,
487 ci->i_time_warp_seq, (int)time_warp_seq);
488
489 inode->i_mtime = *mtime;
490 inode->i_atime = *atime;
491 ci->i_time_warp_seq = time_warp_seq;
492 } else if (time_warp_seq == ci->i_time_warp_seq) {
493 /* nobody did utimes(); take the max */
494 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
495 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
496 inode->i_mtime.tv_sec,
497 inode->i_mtime.tv_nsec,
498 mtime->tv_sec, mtime->tv_nsec);
499 inode->i_mtime = *mtime;
500 }
501 if (timespec_compare(atime, &inode->i_atime) > 0) {
502 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
503 inode->i_atime.tv_sec,
504 inode->i_atime.tv_nsec,
505 atime->tv_sec, atime->tv_nsec);
506 inode->i_atime = *atime;
507 }
508 } else if (issued & CEPH_CAP_FILE_EXCL) {
509 /* we did a utimes(); ignore mds values */
510 } else {
511 warn = 1;
512 }
513 } else {
514 /* we have no write caps; whatever the MDS says is true */
515 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
516 inode->i_ctime = *ctime;
517 inode->i_mtime = *mtime;
518 inode->i_atime = *atime;
519 ci->i_time_warp_seq = time_warp_seq;
520 } else {
521 warn = 1;
522 }
523 }
524 if (warn) /* time_warp_seq shouldn't go backwards */
525 dout("%p mds time_warp_seq %llu < %u\n",
526 inode, time_warp_seq, ci->i_time_warp_seq);
527}
528
529/*
530 * Populate an inode based on info from mds. May be called on new or
531 * existing inodes.
532 */
533static int fill_inode(struct inode *inode,
534 struct ceph_mds_reply_info_in *iinfo,
535 struct ceph_mds_reply_dirfrag *dirinfo,
536 struct ceph_mds_session *session,
537 unsigned long ttl_from, int cap_fmode,
538 struct ceph_cap_reservation *caps_reservation)
539{
540 struct ceph_mds_reply_inode *info = iinfo->in;
541 struct ceph_inode_info *ci = ceph_inode(inode);
542 int i;
543 int issued, implemented;
544 struct timespec mtime, atime, ctime;
545 u32 nsplits;
546 struct ceph_buffer *xattr_blob = NULL;
547 int err = 0;
548 int queue_trunc = 0;
549
550 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
551 inode, ceph_vinop(inode), le64_to_cpu(info->version),
552 ci->i_version);
553
554 /*
555 * prealloc xattr data, if it looks like we'll need it. only
556 * if len > 4 (meaning there are actually xattrs; the first 4
557 * bytes are the xattr count).
558 */
559 if (iinfo->xattr_len > 4) {
b6c1d5b8 560 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
355da1eb
SW
561 if (!xattr_blob)
562 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
563 iinfo->xattr_len);
564 }
565
566 spin_lock(&inode->i_lock);
567
568 /*
569 * provided version will be odd if inode value is projected,
8bd59e01
SW
570 * even if stable. skip the update if we have newer stable
571 * info (ours>=theirs, e.g. due to racing mds replies), unless
572 * we are getting projected (unstable) info (in which case the
573 * version is odd, and we want ours>theirs).
574 * us them
575 * 2 2 skip
576 * 3 2 skip
577 * 3 3 update
355da1eb
SW
578 */
579 if (le64_to_cpu(info->version) > 0 &&
8bd59e01 580 (ci->i_version & ~1) >= le64_to_cpu(info->version))
355da1eb
SW
581 goto no_change;
582
583 issued = __ceph_caps_issued(ci, &implemented);
584 issued |= implemented | __ceph_caps_dirty(ci);
585
586 /* update inode */
587 ci->i_version = le64_to_cpu(info->version);
588 inode->i_version++;
589 inode->i_rdev = le32_to_cpu(info->rdev);
590
591 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
592 inode->i_mode = le32_to_cpu(info->mode);
593 inode->i_uid = le32_to_cpu(info->uid);
594 inode->i_gid = le32_to_cpu(info->gid);
595 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
596 inode->i_uid, inode->i_gid);
597 }
598
599 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
600 inode->i_nlink = le32_to_cpu(info->nlink);
601
602 /* be careful with mtime, atime, size */
603 ceph_decode_timespec(&atime, &info->atime);
604 ceph_decode_timespec(&mtime, &info->mtime);
605 ceph_decode_timespec(&ctime, &info->ctime);
606 queue_trunc = ceph_fill_file_size(inode, issued,
607 le32_to_cpu(info->truncate_seq),
608 le64_to_cpu(info->truncate_size),
355da1eb
SW
609 le64_to_cpu(info->size));
610 ceph_fill_file_time(inode, issued,
611 le32_to_cpu(info->time_warp_seq),
612 &ctime, &mtime, &atime);
613
912a9b03
SW
614 /* only update max_size on auth cap */
615 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
616 ci->i_max_size != le64_to_cpu(info->max_size)) {
617 dout("max_size %lld -> %llu\n", ci->i_max_size,
618 le64_to_cpu(info->max_size));
619 ci->i_max_size = le64_to_cpu(info->max_size);
620 }
621
355da1eb
SW
622 ci->i_layout = info->layout;
623 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
624
625 /* xattrs */
626 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
627 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
628 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
629 if (ci->i_xattrs.blob)
630 ceph_buffer_put(ci->i_xattrs.blob);
631 ci->i_xattrs.blob = xattr_blob;
632 if (xattr_blob)
633 memcpy(ci->i_xattrs.blob->vec.iov_base,
634 iinfo->xattr_data, iinfo->xattr_len);
635 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
a6424e48 636 xattr_blob = NULL;
355da1eb
SW
637 }
638
639 inode->i_mapping->a_ops = &ceph_aops;
640 inode->i_mapping->backing_dev_info =
640ef79d 641 &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
355da1eb
SW
642
643 switch (inode->i_mode & S_IFMT) {
644 case S_IFIFO:
645 case S_IFBLK:
646 case S_IFCHR:
647 case S_IFSOCK:
648 init_special_inode(inode, inode->i_mode, inode->i_rdev);
649 inode->i_op = &ceph_file_iops;
650 break;
651 case S_IFREG:
652 inode->i_op = &ceph_file_iops;
653 inode->i_fop = &ceph_file_fops;
654 break;
655 case S_IFLNK:
656 inode->i_op = &ceph_symlink_iops;
657 if (!ci->i_symlink) {
658 int symlen = iinfo->symlink_len;
659 char *sym;
660
661 BUG_ON(symlen != inode->i_size);
662 spin_unlock(&inode->i_lock);
663
664 err = -ENOMEM;
665 sym = kmalloc(symlen+1, GFP_NOFS);
666 if (!sym)
667 goto out;
668 memcpy(sym, iinfo->symlink, symlen);
669 sym[symlen] = 0;
670
671 spin_lock(&inode->i_lock);
672 if (!ci->i_symlink)
673 ci->i_symlink = sym;
674 else
675 kfree(sym); /* lost a race */
676 }
677 break;
678 case S_IFDIR:
679 inode->i_op = &ceph_dir_iops;
680 inode->i_fop = &ceph_dir_fops;
681
682 ci->i_files = le64_to_cpu(info->files);
683 ci->i_subdirs = le64_to_cpu(info->subdirs);
684 ci->i_rbytes = le64_to_cpu(info->rbytes);
685 ci->i_rfiles = le64_to_cpu(info->rfiles);
686 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
687 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
688
689 /* set dir completion flag? */
690 if (ci->i_files == 0 && ci->i_subdirs == 0 &&
691 ceph_snap(inode) == CEPH_NOSNAP &&
1b7facc4 692 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
12451491 693 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
1b7facc4 694 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
355da1eb
SW
695 dout(" marking %p complete (empty)\n", inode);
696 ci->i_ceph_flags |= CEPH_I_COMPLETE;
697 ci->i_max_offset = 2;
698 }
699
700 /* it may be better to set st_size in getattr instead? */
3d14c5d2 701 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
355da1eb
SW
702 inode->i_size = ci->i_rbytes;
703 break;
704 default:
705 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
706 ceph_vinop(inode), inode->i_mode);
707 }
708
709no_change:
710 spin_unlock(&inode->i_lock);
711
712 /* queue truncate if we saw i_size decrease */
713 if (queue_trunc)
3c6f6b79 714 ceph_queue_vmtruncate(inode);
355da1eb
SW
715
716 /* populate frag tree */
717 /* FIXME: move me up, if/when version reflects fragtree changes */
718 nsplits = le32_to_cpu(info->fragtree.nsplits);
719 mutex_lock(&ci->i_fragtree_mutex);
720 for (i = 0; i < nsplits; i++) {
721 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
722 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
723
724 if (IS_ERR(frag))
725 continue;
726 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
727 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
728 }
729 mutex_unlock(&ci->i_fragtree_mutex);
730
731 /* were we issued a capability? */
732 if (info->cap.caps) {
733 if (ceph_snap(inode) == CEPH_NOSNAP) {
734 ceph_add_cap(inode, session,
735 le64_to_cpu(info->cap.cap_id),
736 cap_fmode,
737 le32_to_cpu(info->cap.caps),
738 le32_to_cpu(info->cap.wanted),
739 le32_to_cpu(info->cap.seq),
740 le32_to_cpu(info->cap.mseq),
741 le64_to_cpu(info->cap.realm),
742 info->cap.flags,
743 caps_reservation);
744 } else {
745 spin_lock(&inode->i_lock);
746 dout(" %p got snap_caps %s\n", inode,
747 ceph_cap_string(le32_to_cpu(info->cap.caps)));
748 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
749 if (cap_fmode >= 0)
750 __ceph_get_fmode(ci, cap_fmode);
751 spin_unlock(&inode->i_lock);
752 }
04d000eb
SW
753 } else if (cap_fmode >= 0) {
754 pr_warning("mds issued no caps on %llx.%llx\n",
755 ceph_vinop(inode));
756 __ceph_get_fmode(ci, cap_fmode);
355da1eb
SW
757 }
758
759 /* update delegation info? */
760 if (dirinfo)
761 ceph_fill_dirfrag(inode, dirinfo);
762
763 err = 0;
764
765out:
b6c1d5b8
SW
766 if (xattr_blob)
767 ceph_buffer_put(xattr_blob);
355da1eb
SW
768 return err;
769}
770
771/*
772 * caller should hold session s_mutex.
773 */
774static void update_dentry_lease(struct dentry *dentry,
775 struct ceph_mds_reply_lease *lease,
776 struct ceph_mds_session *session,
777 unsigned long from_time)
778{
779 struct ceph_dentry_info *di = ceph_dentry(dentry);
780 long unsigned duration = le32_to_cpu(lease->duration_ms);
781 long unsigned ttl = from_time + (duration * HZ) / 1000;
782 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
783 struct inode *dir;
784
785 /* only track leases on regular dentries */
786 if (dentry->d_op != &ceph_dentry_ops)
787 return;
788
789 spin_lock(&dentry->d_lock);
790 dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
791 dentry, le16_to_cpu(lease->mask), duration, ttl);
792
793 /* make lease_rdcache_gen match directory */
794 dir = dentry->d_parent->d_inode;
795 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
796
797 if (lease->mask == 0)
798 goto out_unlock;
799
800 if (di->lease_gen == session->s_cap_gen &&
801 time_before(ttl, dentry->d_time))
802 goto out_unlock; /* we already have a newer lease. */
803
804 if (di->lease_session && di->lease_session != session)
805 goto out_unlock;
806
807 ceph_dentry_lru_touch(dentry);
808
809 if (!di->lease_session)
810 di->lease_session = ceph_get_mds_session(session);
811 di->lease_gen = session->s_cap_gen;
812 di->lease_seq = le32_to_cpu(lease->seq);
813 di->lease_renew_after = half_ttl;
814 di->lease_renew_from = 0;
815 dentry->d_time = ttl;
816out_unlock:
817 spin_unlock(&dentry->d_lock);
818 return;
819}
820
1cd3935b
SW
821/*
822 * Set dentry's directory position based on the current dir's max, and
823 * order it in d_subdirs, so that dcache_readdir behaves.
824 */
825static void ceph_set_dentry_offset(struct dentry *dn)
826{
827 struct dentry *dir = dn->d_parent;
828 struct inode *inode = dn->d_parent->d_inode;
829 struct ceph_dentry_info *di;
830
831 BUG_ON(!inode);
832
833 di = ceph_dentry(dn);
834
835 spin_lock(&inode->i_lock);
836 if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
837 spin_unlock(&inode->i_lock);
838 return;
839 }
840 di->offset = ceph_inode(inode)->i_max_offset++;
841 spin_unlock(&inode->i_lock);
842
843 spin_lock(&dcache_lock);
844 spin_lock(&dn->d_lock);
13a4214c 845 list_move(&dn->d_u.d_child, &dir->d_subdirs);
1cd3935b
SW
846 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
847 dn->d_u.d_child.prev, dn->d_u.d_child.next);
848 spin_unlock(&dn->d_lock);
849 spin_unlock(&dcache_lock);
850}
851
355da1eb
SW
852/*
853 * splice a dentry to an inode.
854 * caller must hold directory i_mutex for this to be safe.
855 *
856 * we will only rehash the resulting dentry if @prehash is
857 * true; @prehash will be set to false (for the benefit of
858 * the caller) if we fail.
859 */
860static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
467c5251 861 bool *prehash, bool set_offset)
355da1eb
SW
862{
863 struct dentry *realdn;
864
1cd3935b
SW
865 BUG_ON(dn->d_inode);
866
355da1eb
SW
867 /* dn must be unhashed */
868 if (!d_unhashed(dn))
869 d_drop(dn);
870 realdn = d_materialise_unique(dn, in);
871 if (IS_ERR(realdn)) {
d69ed05a
SW
872 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
873 PTR_ERR(realdn), dn, in, ceph_vinop(in));
355da1eb
SW
874 if (prehash)
875 *prehash = false; /* don't rehash on error */
876 dn = realdn; /* note realdn contains the error */
877 goto out;
878 } else if (realdn) {
879 dout("dn %p (%d) spliced with %p (%d) "
880 "inode %p ino %llx.%llx\n",
881 dn, atomic_read(&dn->d_count),
882 realdn, atomic_read(&realdn->d_count),
883 realdn->d_inode, ceph_vinop(realdn->d_inode));
884 dput(dn);
885 dn = realdn;
886 } else {
887 BUG_ON(!ceph_dentry(dn));
355da1eb
SW
888 dout("dn %p attached to %p ino %llx.%llx\n",
889 dn, dn->d_inode, ceph_vinop(dn->d_inode));
890 }
891 if ((!prehash || *prehash) && d_unhashed(dn))
892 d_rehash(dn);
467c5251
SW
893 if (set_offset)
894 ceph_set_dentry_offset(dn);
355da1eb
SW
895out:
896 return dn;
897}
898
899/*
900 * Incorporate results into the local cache. This is either just
901 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
902 * after a lookup).
903 *
904 * A reply may contain
905 * a directory inode along with a dentry.
906 * and/or a target inode
907 *
908 * Called with snap_rwsem (read).
909 */
910int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
911 struct ceph_mds_session *session)
912{
913 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
914 struct inode *in = NULL;
915 struct ceph_mds_reply_inode *ininfo;
916 struct ceph_vino vino;
3d14c5d2 917 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
355da1eb
SW
918 int i = 0;
919 int err = 0;
920
921 dout("fill_trace %p is_dentry %d is_target %d\n", req,
922 rinfo->head->is_dentry, rinfo->head->is_target);
923
924#if 0
925 /*
926 * Debugging hook:
927 *
928 * If we resend completed ops to a recovering mds, we get no
929 * trace. Since that is very rare, pretend this is the case
930 * to ensure the 'no trace' handlers in the callers behave.
931 *
932 * Fill in inodes unconditionally to avoid breaking cap
933 * invariants.
934 */
935 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
936 pr_info("fill_trace faking empty trace on %lld %s\n",
937 req->r_tid, ceph_mds_op_name(rinfo->head->op));
938 if (rinfo->head->is_dentry) {
939 rinfo->head->is_dentry = 0;
940 err = fill_inode(req->r_locked_dir,
941 &rinfo->diri, rinfo->dirfrag,
942 session, req->r_request_started, -1);
943 }
944 if (rinfo->head->is_target) {
945 rinfo->head->is_target = 0;
946 ininfo = rinfo->targeti.in;
947 vino.ino = le64_to_cpu(ininfo->ino);
948 vino.snap = le64_to_cpu(ininfo->snapid);
949 in = ceph_get_inode(sb, vino);
950 err = fill_inode(in, &rinfo->targeti, NULL,
951 session, req->r_request_started,
952 req->r_fmode);
953 iput(in);
954 }
955 }
956#endif
957
958 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
959 dout("fill_trace reply is empty!\n");
167c9e35
SW
960 if (rinfo->head->result == 0 && req->r_locked_dir)
961 ceph_invalidate_dir_request(req);
355da1eb
SW
962 return 0;
963 }
964
965 if (rinfo->head->is_dentry) {
5b1daecd
SW
966 struct inode *dir = req->r_locked_dir;
967
968 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
969 session, req->r_request_started, -1,
970 &req->r_caps_reservation);
971 if (err < 0)
972 return err;
973 }
974
9358c6d4
SW
975 /*
976 * ignore null lease/binding on snapdir ENOENT, or else we
977 * will have trouble splicing in the virtual snapdir later
978 */
979 if (rinfo->head->is_dentry && !req->r_aborted &&
980 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
3d14c5d2 981 fsc->mount_options->snapdir_name,
9358c6d4 982 req->r_dentry->d_name.len))) {
355da1eb
SW
983 /*
984 * lookup link rename : null -> possibly existing inode
985 * mknod symlink mkdir : null -> new inode
986 * unlink : linked -> null
987 */
988 struct inode *dir = req->r_locked_dir;
989 struct dentry *dn = req->r_dentry;
990 bool have_dir_cap, have_lease;
991
992 BUG_ON(!dn);
993 BUG_ON(!dir);
994 BUG_ON(dn->d_parent->d_inode != dir);
995 BUG_ON(ceph_ino(dir) !=
996 le64_to_cpu(rinfo->diri.in->ino));
997 BUG_ON(ceph_snap(dir) !=
998 le64_to_cpu(rinfo->diri.in->snapid));
999
355da1eb
SW
1000 /* do we have a lease on the whole dir? */
1001 have_dir_cap =
1002 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1003 CEPH_CAP_FILE_SHARED);
1004
1005 /* do we have a dn lease? */
1006 have_lease = have_dir_cap ||
1007 (le16_to_cpu(rinfo->dlease->mask) &
1008 CEPH_LOCK_DN);
1009
1010 if (!have_lease)
1011 dout("fill_trace no dentry lease or dir cap\n");
1012
1013 /* rename? */
1014 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1015 dout(" src %p '%.*s' dst %p '%.*s'\n",
1016 req->r_old_dentry,
1017 req->r_old_dentry->d_name.len,
1018 req->r_old_dentry->d_name.name,
1019 dn, dn->d_name.len, dn->d_name.name);
1020 dout("fill_trace doing d_move %p -> %p\n",
1021 req->r_old_dentry, dn);
c10f5e12
SW
1022
1023 /* d_move screws up d_subdirs order */
1024 ceph_i_clear(dir, CEPH_I_COMPLETE);
1025
355da1eb
SW
1026 d_move(req->r_old_dentry, dn);
1027 dout(" src %p '%.*s' dst %p '%.*s'\n",
1028 req->r_old_dentry,
1029 req->r_old_dentry->d_name.len,
1030 req->r_old_dentry->d_name.name,
1031 dn, dn->d_name.len, dn->d_name.name);
81a6cf2d 1032
c4a29f26
SW
1033 /* ensure target dentry is invalidated, despite
1034 rehashing bug in vfs_rename_dir */
81a6cf2d
SW
1035 ceph_invalidate_dentry_lease(dn);
1036
355da1eb 1037 /* take overwritten dentry's readdir offset */
1cd3935b
SW
1038 dout("dn %p gets %p offset %lld (old offset %lld)\n",
1039 req->r_old_dentry, dn, ceph_dentry(dn)->offset,
1040 ceph_dentry(req->r_old_dentry)->offset);
355da1eb
SW
1041 ceph_dentry(req->r_old_dentry)->offset =
1042 ceph_dentry(dn)->offset;
81a6cf2d 1043
355da1eb
SW
1044 dn = req->r_old_dentry; /* use old_dentry */
1045 in = dn->d_inode;
1046 }
1047
1048 /* null dentry? */
1049 if (!rinfo->head->is_target) {
1050 dout("fill_trace null dentry\n");
1051 if (dn->d_inode) {
1052 dout("d_delete %p\n", dn);
1053 d_delete(dn);
1054 } else {
1055 dout("d_instantiate %p NULL\n", dn);
1056 d_instantiate(dn, NULL);
1057 if (have_lease && d_unhashed(dn))
1058 d_rehash(dn);
1059 update_dentry_lease(dn, rinfo->dlease,
1060 session,
1061 req->r_request_started);
1062 }
1063 goto done;
1064 }
1065
1066 /* attach proper inode */
1067 ininfo = rinfo->targeti.in;
1068 vino.ino = le64_to_cpu(ininfo->ino);
1069 vino.snap = le64_to_cpu(ininfo->snapid);
d8b16b3d
SW
1070 in = dn->d_inode;
1071 if (!in) {
355da1eb
SW
1072 in = ceph_get_inode(sb, vino);
1073 if (IS_ERR(in)) {
1074 pr_err("fill_trace bad get_inode "
1075 "%llx.%llx\n", vino.ino, vino.snap);
1076 err = PTR_ERR(in);
1077 d_delete(dn);
1078 goto done;
1079 }
467c5251 1080 dn = splice_dentry(dn, in, &have_lease, true);
355da1eb
SW
1081 if (IS_ERR(dn)) {
1082 err = PTR_ERR(dn);
1083 goto done;
1084 }
1085 req->r_dentry = dn; /* may have spliced */
1086 igrab(in);
1087 } else if (ceph_ino(in) == vino.ino &&
1088 ceph_snap(in) == vino.snap) {
1089 igrab(in);
1090 } else {
1091 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1092 dn, in, ceph_ino(in), ceph_snap(in),
1093 vino.ino, vino.snap);
1094 have_lease = false;
1095 in = NULL;
1096 }
1097
1098 if (have_lease)
1099 update_dentry_lease(dn, rinfo->dlease, session,
1100 req->r_request_started);
1101 dout(" final dn %p\n", dn);
1102 i++;
1103 } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1104 req->r_op == CEPH_MDS_OP_MKSNAP) {
1105 struct dentry *dn = req->r_dentry;
1106
1107 /* fill out a snapdir LOOKUPSNAP dentry */
1108 BUG_ON(!dn);
1109 BUG_ON(!req->r_locked_dir);
1110 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1111 ininfo = rinfo->targeti.in;
1112 vino.ino = le64_to_cpu(ininfo->ino);
1113 vino.snap = le64_to_cpu(ininfo->snapid);
1114 in = ceph_get_inode(sb, vino);
1115 if (IS_ERR(in)) {
1116 pr_err("fill_inode get_inode badness %llx.%llx\n",
1117 vino.ino, vino.snap);
1118 err = PTR_ERR(in);
1119 d_delete(dn);
1120 goto done;
1121 }
1122 dout(" linking snapped dir %p to dn %p\n", in, dn);
467c5251 1123 dn = splice_dentry(dn, in, NULL, true);
355da1eb
SW
1124 if (IS_ERR(dn)) {
1125 err = PTR_ERR(dn);
1126 goto done;
1127 }
1128 req->r_dentry = dn; /* may have spliced */
1129 igrab(in);
1130 rinfo->head->is_dentry = 1; /* fool notrace handlers */
1131 }
1132
1133 if (rinfo->head->is_target) {
1134 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1135 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1136
1137 if (in == NULL || ceph_ino(in) != vino.ino ||
1138 ceph_snap(in) != vino.snap) {
1139 in = ceph_get_inode(sb, vino);
1140 if (IS_ERR(in)) {
1141 err = PTR_ERR(in);
1142 goto done;
1143 }
1144 }
1145 req->r_target_inode = in;
1146
1147 err = fill_inode(in,
1148 &rinfo->targeti, NULL,
1149 session, req->r_request_started,
1150 (le32_to_cpu(rinfo->head->result) == 0) ?
1151 req->r_fmode : -1,
1152 &req->r_caps_reservation);
1153 if (err < 0) {
1154 pr_err("fill_inode badness %p %llx.%llx\n",
1155 in, ceph_vinop(in));
1156 goto done;
1157 }
1158 }
1159
1160done:
1161 dout("fill_trace done err=%d\n", err);
1162 return err;
1163}
1164
1165/*
1166 * Prepopulate our cache with readdir results, leases, etc.
1167 */
1168int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1169 struct ceph_mds_session *session)
1170{
1171 struct dentry *parent = req->r_dentry;
1172 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1173 struct qstr dname;
1174 struct dentry *dn;
1175 struct inode *in;
1176 int err = 0, i;
1177 struct inode *snapdir = NULL;
1178 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1179 u64 frag = le32_to_cpu(rhead->args.readdir.frag);
1180 struct ceph_dentry_info *di;
1181
1182 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1183 snapdir = ceph_get_snapdir(parent->d_inode);
1184 parent = d_find_alias(snapdir);
1185 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1186 rinfo->dir_nr, parent);
1187 } else {
1188 dout("readdir_prepopulate %d items under dn %p\n",
1189 rinfo->dir_nr, parent);
1190 if (rinfo->dir_dir)
1191 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1192 }
1193
1194 for (i = 0; i < rinfo->dir_nr; i++) {
1195 struct ceph_vino vino;
1196
1197 dname.name = rinfo->dir_dname[i];
1198 dname.len = rinfo->dir_dname_len[i];
1199 dname.hash = full_name_hash(dname.name, dname.len);
1200
1201 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1202 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1203
1204retry_lookup:
1205 dn = d_lookup(parent, &dname);
1206 dout("d_lookup on parent=%p name=%.*s got %p\n",
1207 parent, dname.len, dname.name, dn);
1208
1209 if (!dn) {
1210 dn = d_alloc(parent, &dname);
1211 dout("d_alloc %p '%.*s' = %p\n", parent,
1212 dname.len, dname.name, dn);
1213 if (dn == NULL) {
1214 dout("d_alloc badness\n");
1215 err = -ENOMEM;
1216 goto out;
1217 }
1218 err = ceph_init_dentry(dn);
8c696737
SW
1219 if (err < 0) {
1220 dput(dn);
355da1eb 1221 goto out;
8c696737 1222 }
355da1eb
SW
1223 } else if (dn->d_inode &&
1224 (ceph_ino(dn->d_inode) != vino.ino ||
1225 ceph_snap(dn->d_inode) != vino.snap)) {
1226 dout(" dn %p points to wrong inode %p\n",
1227 dn, dn->d_inode);
1228 d_delete(dn);
1229 dput(dn);
1230 goto retry_lookup;
1231 } else {
1232 /* reorder parent's d_subdirs */
1233 spin_lock(&dcache_lock);
1234 spin_lock(&dn->d_lock);
1235 list_move(&dn->d_u.d_child, &parent->d_subdirs);
1236 spin_unlock(&dn->d_lock);
1237 spin_unlock(&dcache_lock);
1238 }
1239
1240 di = dn->d_fsdata;
1241 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1242
1243 /* inode */
1244 if (dn->d_inode) {
1245 in = dn->d_inode;
1246 } else {
1247 in = ceph_get_inode(parent->d_sb, vino);
ac1f12ef 1248 if (IS_ERR(in)) {
355da1eb
SW
1249 dout("new_inode badness\n");
1250 d_delete(dn);
1251 dput(dn);
ac1f12ef 1252 err = PTR_ERR(in);
355da1eb
SW
1253 goto out;
1254 }
467c5251 1255 dn = splice_dentry(dn, in, NULL, false);
d69ed05a
SW
1256 if (IS_ERR(dn))
1257 dn = NULL;
355da1eb
SW
1258 }
1259
1260 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1261 req->r_request_started, -1,
1262 &req->r_caps_reservation) < 0) {
1263 pr_err("fill_inode badness on %p\n", in);
d69ed05a 1264 goto next_item;
355da1eb 1265 }
d69ed05a
SW
1266 if (dn)
1267 update_dentry_lease(dn, rinfo->dir_dlease[i],
1268 req->r_session,
1269 req->r_request_started);
1270next_item:
1271 if (dn)
1272 dput(dn);
355da1eb
SW
1273 }
1274 req->r_did_prepopulate = true;
1275
1276out:
1277 if (snapdir) {
1278 iput(snapdir);
1279 dput(parent);
1280 }
1281 dout("readdir_prepopulate done\n");
1282 return err;
1283}
1284
1285int ceph_inode_set_size(struct inode *inode, loff_t size)
1286{
1287 struct ceph_inode_info *ci = ceph_inode(inode);
1288 int ret = 0;
1289
1290 spin_lock(&inode->i_lock);
1291 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1292 inode->i_size = size;
1293 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1294
1295 /* tell the MDS if we are approaching max_size */
1296 if ((size << 1) >= ci->i_max_size &&
1297 (ci->i_reported_size << 1) < ci->i_max_size)
1298 ret = 1;
1299
1300 spin_unlock(&inode->i_lock);
1301 return ret;
1302}
1303
1304/*
1305 * Write back inode data in a worker thread. (This can't be done
1306 * in the message handler context.)
1307 */
3c6f6b79
SW
1308void ceph_queue_writeback(struct inode *inode)
1309{
1310 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1311 &ceph_inode(inode)->i_wb_work)) {
2c27c9a5 1312 dout("ceph_queue_writeback %p\n", inode);
3c6f6b79
SW
1313 igrab(inode);
1314 } else {
2c27c9a5 1315 dout("ceph_queue_writeback %p failed\n", inode);
3c6f6b79
SW
1316 }
1317}
1318
1319static void ceph_writeback_work(struct work_struct *work)
355da1eb
SW
1320{
1321 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1322 i_wb_work);
1323 struct inode *inode = &ci->vfs_inode;
1324
1325 dout("writeback %p\n", inode);
1326 filemap_fdatawrite(&inode->i_data);
1327 iput(inode);
1328}
1329
3c6f6b79
SW
1330/*
1331 * queue an async invalidation
1332 */
1333void ceph_queue_invalidate(struct inode *inode)
1334{
1335 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1336 &ceph_inode(inode)->i_pg_inv_work)) {
1337 dout("ceph_queue_invalidate %p\n", inode);
1338 igrab(inode);
1339 } else {
1340 dout("ceph_queue_invalidate %p failed\n", inode);
1341 }
1342}
1343
c9af9fb6
YS
1344/*
1345 * invalidate any pages that are not dirty or under writeback. this
1346 * includes pages that are clean and mapped.
1347 */
1348static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
1349{
1350 struct pagevec pvec;
1351 pgoff_t next = 0;
1352 int i;
1353
1354 pagevec_init(&pvec, 0);
1355 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1356 for (i = 0; i < pagevec_count(&pvec); i++) {
1357 struct page *page = pvec.pages[i];
1358 pgoff_t index;
1359 int skip_page =
1360 (PageDirty(page) || PageWriteback(page));
1361
1362 if (!skip_page)
1363 skip_page = !trylock_page(page);
1364
1365 /*
1366 * We really shouldn't be looking at the ->index of an
1367 * unlocked page. But we're not allowed to lock these
1368 * pages. So we rely upon nobody altering the ->index
1369 * of this (pinned-by-us) page.
1370 */
1371 index = page->index;
1372 if (index > next)
1373 next = index;
1374 next++;
1375
1376 if (skip_page)
1377 continue;
1378
1379 generic_error_remove_page(mapping, page);
1380 unlock_page(page);
1381 }
1382 pagevec_release(&pvec);
1383 cond_resched();
1384 }
1385}
1386
355da1eb
SW
1387/*
1388 * Invalidate inode pages in a worker thread. (This can't be done
1389 * in the message handler context.)
1390 */
3c6f6b79 1391static void ceph_invalidate_work(struct work_struct *work)
355da1eb
SW
1392{
1393 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1394 i_pg_inv_work);
1395 struct inode *inode = &ci->vfs_inode;
1396 u32 orig_gen;
1397 int check = 0;
1398
1399 spin_lock(&inode->i_lock);
1400 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1401 ci->i_rdcache_gen, ci->i_rdcache_revoking);
cd045cb4 1402 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
355da1eb 1403 /* nevermind! */
355da1eb
SW
1404 spin_unlock(&inode->i_lock);
1405 goto out;
1406 }
1407 orig_gen = ci->i_rdcache_gen;
1408 spin_unlock(&inode->i_lock);
1409
c9af9fb6 1410 ceph_invalidate_nondirty_pages(inode->i_mapping);
355da1eb
SW
1411
1412 spin_lock(&inode->i_lock);
cd045cb4
SW
1413 if (orig_gen == ci->i_rdcache_gen &&
1414 orig_gen == ci->i_rdcache_revoking) {
355da1eb
SW
1415 dout("invalidate_pages %p gen %d successful\n", inode,
1416 ci->i_rdcache_gen);
cd045cb4 1417 ci->i_rdcache_revoking--;
355da1eb
SW
1418 check = 1;
1419 } else {
cd045cb4
SW
1420 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1421 inode, orig_gen, ci->i_rdcache_gen,
1422 ci->i_rdcache_revoking);
355da1eb
SW
1423 }
1424 spin_unlock(&inode->i_lock);
1425
1426 if (check)
1427 ceph_check_caps(ci, 0, NULL);
1428out:
1429 iput(inode);
1430}
1431
1432
1433/*
1434 * called by trunc_wq; take i_mutex ourselves
1435 *
1436 * We also truncate in a separate thread as well.
1437 */
3c6f6b79 1438static void ceph_vmtruncate_work(struct work_struct *work)
355da1eb
SW
1439{
1440 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1441 i_vmtruncate_work);
1442 struct inode *inode = &ci->vfs_inode;
1443
1444 dout("vmtruncate_work %p\n", inode);
1445 mutex_lock(&inode->i_mutex);
1446 __ceph_do_pending_vmtruncate(inode);
1447 mutex_unlock(&inode->i_mutex);
1448 iput(inode);
1449}
1450
3c6f6b79
SW
1451/*
1452 * Queue an async vmtruncate. If we fail to queue work, we will handle
1453 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1454 */
1455void ceph_queue_vmtruncate(struct inode *inode)
1456{
1457 struct ceph_inode_info *ci = ceph_inode(inode);
1458
640ef79d 1459 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
3c6f6b79
SW
1460 &ci->i_vmtruncate_work)) {
1461 dout("ceph_queue_vmtruncate %p\n", inode);
1462 igrab(inode);
1463 } else {
1464 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1465 inode, ci->i_truncate_pending);
1466 }
1467}
1468
355da1eb
SW
1469/*
1470 * called with i_mutex held.
1471 *
1472 * Make sure any pending truncation is applied before doing anything
1473 * that may depend on it.
1474 */
1475void __ceph_do_pending_vmtruncate(struct inode *inode)
1476{
1477 struct ceph_inode_info *ci = ceph_inode(inode);
1478 u64 to;
1479 int wrbuffer_refs, wake = 0;
1480
1481retry:
1482 spin_lock(&inode->i_lock);
1483 if (ci->i_truncate_pending == 0) {
1484 dout("__do_pending_vmtruncate %p none pending\n", inode);
1485 spin_unlock(&inode->i_lock);
1486 return;
1487 }
1488
1489 /*
1490 * make sure any dirty snapped pages are flushed before we
1491 * possibly truncate them.. so write AND block!
1492 */
1493 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1494 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1495 inode);
1496 spin_unlock(&inode->i_lock);
1497 filemap_write_and_wait_range(&inode->i_data, 0,
1498 inode->i_sb->s_maxbytes);
1499 goto retry;
1500 }
1501
1502 to = ci->i_truncate_size;
1503 wrbuffer_refs = ci->i_wrbuffer_ref;
1504 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1505 ci->i_truncate_pending, to);
1506 spin_unlock(&inode->i_lock);
1507
1508 truncate_inode_pages(inode->i_mapping, to);
1509
1510 spin_lock(&inode->i_lock);
1511 ci->i_truncate_pending--;
1512 if (ci->i_truncate_pending == 0)
1513 wake = 1;
1514 spin_unlock(&inode->i_lock);
1515
1516 if (wrbuffer_refs == 0)
1517 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1518 if (wake)
03066f23 1519 wake_up_all(&ci->i_cap_wq);
355da1eb
SW
1520}
1521
1522
1523/*
1524 * symlinks
1525 */
1526static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1527{
1528 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1529 nd_set_link(nd, ci->i_symlink);
1530 return NULL;
1531}
1532
1533static const struct inode_operations ceph_symlink_iops = {
1534 .readlink = generic_readlink,
1535 .follow_link = ceph_sym_follow_link,
1536};
1537
1538/*
1539 * setattr
1540 */
1541int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1542{
1543 struct inode *inode = dentry->d_inode;
1544 struct ceph_inode_info *ci = ceph_inode(inode);
1545 struct inode *parent_inode = dentry->d_parent->d_inode;
1546 const unsigned int ia_valid = attr->ia_valid;
1547 struct ceph_mds_request *req;
3d14c5d2 1548 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
355da1eb
SW
1549 int issued;
1550 int release = 0, dirtied = 0;
1551 int mask = 0;
1552 int err = 0;
355da1eb
SW
1553
1554 if (ceph_snap(inode) != CEPH_NOSNAP)
1555 return -EROFS;
1556
1557 __ceph_do_pending_vmtruncate(inode);
1558
1559 err = inode_change_ok(inode, attr);
1560 if (err != 0)
1561 return err;
1562
1563 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1564 USE_AUTH_MDS);
1565 if (IS_ERR(req))
1566 return PTR_ERR(req);
1567
1568 spin_lock(&inode->i_lock);
1569 issued = __ceph_caps_issued(ci, NULL);
1570 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1571
1572 if (ia_valid & ATTR_UID) {
1573 dout("setattr %p uid %d -> %d\n", inode,
1574 inode->i_uid, attr->ia_uid);
1575 if (issued & CEPH_CAP_AUTH_EXCL) {
1576 inode->i_uid = attr->ia_uid;
1577 dirtied |= CEPH_CAP_AUTH_EXCL;
1578 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1579 attr->ia_uid != inode->i_uid) {
1580 req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
1581 mask |= CEPH_SETATTR_UID;
1582 release |= CEPH_CAP_AUTH_SHARED;
1583 }
1584 }
1585 if (ia_valid & ATTR_GID) {
1586 dout("setattr %p gid %d -> %d\n", inode,
1587 inode->i_gid, attr->ia_gid);
1588 if (issued & CEPH_CAP_AUTH_EXCL) {
1589 inode->i_gid = attr->ia_gid;
1590 dirtied |= CEPH_CAP_AUTH_EXCL;
1591 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1592 attr->ia_gid != inode->i_gid) {
1593 req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
1594 mask |= CEPH_SETATTR_GID;
1595 release |= CEPH_CAP_AUTH_SHARED;
1596 }
1597 }
1598 if (ia_valid & ATTR_MODE) {
1599 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1600 attr->ia_mode);
1601 if (issued & CEPH_CAP_AUTH_EXCL) {
1602 inode->i_mode = attr->ia_mode;
1603 dirtied |= CEPH_CAP_AUTH_EXCL;
1604 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1605 attr->ia_mode != inode->i_mode) {
1606 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1607 mask |= CEPH_SETATTR_MODE;
1608 release |= CEPH_CAP_AUTH_SHARED;
1609 }
1610 }
1611
1612 if (ia_valid & ATTR_ATIME) {
1613 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1614 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1615 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1616 if (issued & CEPH_CAP_FILE_EXCL) {
1617 ci->i_time_warp_seq++;
1618 inode->i_atime = attr->ia_atime;
1619 dirtied |= CEPH_CAP_FILE_EXCL;
1620 } else if ((issued & CEPH_CAP_FILE_WR) &&
1621 timespec_compare(&inode->i_atime,
1622 &attr->ia_atime) < 0) {
1623 inode->i_atime = attr->ia_atime;
1624 dirtied |= CEPH_CAP_FILE_WR;
1625 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1626 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1627 ceph_encode_timespec(&req->r_args.setattr.atime,
1628 &attr->ia_atime);
1629 mask |= CEPH_SETATTR_ATIME;
1630 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1631 CEPH_CAP_FILE_WR;
1632 }
1633 }
1634 if (ia_valid & ATTR_MTIME) {
1635 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1636 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1637 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1638 if (issued & CEPH_CAP_FILE_EXCL) {
1639 ci->i_time_warp_seq++;
1640 inode->i_mtime = attr->ia_mtime;
1641 dirtied |= CEPH_CAP_FILE_EXCL;
1642 } else if ((issued & CEPH_CAP_FILE_WR) &&
1643 timespec_compare(&inode->i_mtime,
1644 &attr->ia_mtime) < 0) {
1645 inode->i_mtime = attr->ia_mtime;
1646 dirtied |= CEPH_CAP_FILE_WR;
1647 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1648 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1649 ceph_encode_timespec(&req->r_args.setattr.mtime,
1650 &attr->ia_mtime);
1651 mask |= CEPH_SETATTR_MTIME;
1652 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1653 CEPH_CAP_FILE_WR;
1654 }
1655 }
1656 if (ia_valid & ATTR_SIZE) {
1657 dout("setattr %p size %lld -> %lld\n", inode,
1658 inode->i_size, attr->ia_size);
1659 if (attr->ia_size > inode->i_sb->s_maxbytes) {
1660 err = -EINVAL;
1661 goto out;
1662 }
1663 if ((issued & CEPH_CAP_FILE_EXCL) &&
1664 attr->ia_size > inode->i_size) {
1665 inode->i_size = attr->ia_size;
355da1eb
SW
1666 inode->i_blocks =
1667 (attr->ia_size + (1 << 9) - 1) >> 9;
1668 inode->i_ctime = attr->ia_ctime;
1669 ci->i_reported_size = attr->ia_size;
1670 dirtied |= CEPH_CAP_FILE_EXCL;
1671 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1672 attr->ia_size != inode->i_size) {
1673 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1674 req->r_args.setattr.old_size =
1675 cpu_to_le64(inode->i_size);
1676 mask |= CEPH_SETATTR_SIZE;
1677 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1678 CEPH_CAP_FILE_WR;
1679 }
1680 }
1681
1682 /* these do nothing */
1683 if (ia_valid & ATTR_CTIME) {
1684 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1685 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1686 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1687 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1688 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1689 only ? "ctime only" : "ignored");
1690 inode->i_ctime = attr->ia_ctime;
1691 if (only) {
1692 /*
1693 * if kernel wants to dirty ctime but nothing else,
1694 * we need to choose a cap to dirty under, or do
1695 * a almost-no-op setattr
1696 */
1697 if (issued & CEPH_CAP_AUTH_EXCL)
1698 dirtied |= CEPH_CAP_AUTH_EXCL;
1699 else if (issued & CEPH_CAP_FILE_EXCL)
1700 dirtied |= CEPH_CAP_FILE_EXCL;
1701 else if (issued & CEPH_CAP_XATTR_EXCL)
1702 dirtied |= CEPH_CAP_XATTR_EXCL;
1703 else
1704 mask |= CEPH_SETATTR_CTIME;
1705 }
1706 }
1707 if (ia_valid & ATTR_FILE)
1708 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1709
1710 if (dirtied) {
1711 __ceph_mark_dirty_caps(ci, dirtied);
1712 inode->i_ctime = CURRENT_TIME;
1713 }
1714
1715 release &= issued;
1716 spin_unlock(&inode->i_lock);
1717
355da1eb
SW
1718 if (mask) {
1719 req->r_inode = igrab(inode);
1720 req->r_inode_drop = release;
1721 req->r_args.setattr.mask = cpu_to_le32(mask);
1722 req->r_num_caps = 1;
1723 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
1724 }
1725 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1726 ceph_cap_string(dirtied), mask);
1727
1728 ceph_mdsc_put_request(req);
1729 __ceph_do_pending_vmtruncate(inode);
1730 return err;
1731out:
1732 spin_unlock(&inode->i_lock);
1733 ceph_mdsc_put_request(req);
1734 return err;
1735}
1736
1737/*
1738 * Verify that we have a lease on the given mask. If not,
1739 * do a getattr against an mds.
1740 */
1741int ceph_do_getattr(struct inode *inode, int mask)
1742{
3d14c5d2
YS
1743 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1744 struct ceph_mds_client *mdsc = fsc->mdsc;
355da1eb
SW
1745 struct ceph_mds_request *req;
1746 int err;
1747
1748 if (ceph_snap(inode) == CEPH_SNAPDIR) {
1749 dout("do_getattr inode %p SNAPDIR\n", inode);
1750 return 0;
1751 }
1752
1753 dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask));
1754 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1755 return 0;
1756
1757 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1758 if (IS_ERR(req))
1759 return PTR_ERR(req);
1760 req->r_inode = igrab(inode);
1761 req->r_num_caps = 1;
1762 req->r_args.getattr.mask = cpu_to_le32(mask);
1763 err = ceph_mdsc_do_request(mdsc, NULL, req);
1764 ceph_mdsc_put_request(req);
1765 dout("do_getattr result=%d\n", err);
1766 return err;
1767}
1768
1769
1770/*
1771 * Check inode permissions. We verify we have a valid value for
1772 * the AUTH cap, then call the generic handler.
1773 */
1774int ceph_permission(struct inode *inode, int mask)
1775{
1776 int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
1777
1778 if (!err)
1779 err = generic_permission(inode, mask, NULL);
1780 return err;
1781}
1782
1783/*
1784 * Get all attributes. Hopefully somedata we'll have a statlite()
1785 * and can limit the fields we require to be accurate.
1786 */
1787int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1788 struct kstat *stat)
1789{
1790 struct inode *inode = dentry->d_inode;
232d4b01 1791 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
1792 int err;
1793
1794 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1795 if (!err) {
1796 generic_fillattr(inode, stat);
1797 stat->ino = inode->i_ino;
1798 if (ceph_snap(inode) != CEPH_NOSNAP)
1799 stat->dev = ceph_snap(inode);
1800 else
1801 stat->dev = 0;
232d4b01
SW
1802 if (S_ISDIR(inode->i_mode)) {
1803 stat->size = ci->i_rbytes;
1804 stat->blocks = 0;
355da1eb 1805 stat->blksize = 65536;
232d4b01 1806 }
355da1eb
SW
1807 }
1808 return err;
1809}