Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
[linux-2.6-block.git] / fs / ceph / inode.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
355da1eb
SW
2
3#include <linux/module.h>
4#include <linux/fs.h>
355da1eb
SW
5#include <linux/slab.h>
6#include <linux/string.h>
7#include <linux/uaccess.h>
8#include <linux/kernel.h>
355da1eb
SW
9#include <linux/writeback.h>
10#include <linux/vmalloc.h>
2cdeb1e4 11#include <linux/xattr.h>
4db658ea 12#include <linux/posix_acl.h>
3e7fbe9c 13#include <linux/random.h>
a407846e 14#include <linux/sort.h>
355da1eb
SW
15
16#include "super.h"
3d14c5d2 17#include "mds_client.h"
99ccbd22 18#include "cache.h"
3d14c5d2 19#include <linux/ceph/decode.h>
355da1eb
SW
20
21/*
22 * Ceph inode operations
23 *
24 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
25 * setattr, etc.), xattr helpers, and helpers for assimilating
26 * metadata returned by the MDS into our cache.
27 *
28 * Also define helpers for doing asynchronous writeback, invalidation,
29 * and truncation for the benefit of those who can't afford to block
30 * (typically because they are in the message handler path).
31 */
32
33static const struct inode_operations ceph_symlink_iops;
34
3c6f6b79
SW
35static void ceph_invalidate_work(struct work_struct *work);
36static void ceph_writeback_work(struct work_struct *work);
37static void ceph_vmtruncate_work(struct work_struct *work);
355da1eb
SW
38
39/*
40 * find or create an inode, given the ceph ino number
41 */
ad1fee96
YS
42static int ceph_set_ino_cb(struct inode *inode, void *data)
43{
44 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
45 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
46 return 0;
47}
48
355da1eb
SW
49struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
50{
51 struct inode *inode;
52 ino_t t = ceph_vino_to_ino(vino);
53
54 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
55 if (inode == NULL)
56 return ERR_PTR(-ENOMEM);
57 if (inode->i_state & I_NEW) {
58 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
59 inode, ceph_vinop(inode), (u64)inode->i_ino);
60 unlock_new_inode(inode);
61 }
62
63 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
64 vino.snap, inode);
65 return inode;
66}
67
68/*
69 * get/constuct snapdir inode for a given directory
70 */
71struct inode *ceph_get_snapdir(struct inode *parent)
72{
73 struct ceph_vino vino = {
74 .ino = ceph_ino(parent),
75 .snap = CEPH_SNAPDIR,
76 };
77 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
b377ff13 78 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
79
80 BUG_ON(!S_ISDIR(parent->i_mode));
81 if (IS_ERR(inode))
7e34bc52 82 return inode;
355da1eb
SW
83 inode->i_mode = parent->i_mode;
84 inode->i_uid = parent->i_uid;
85 inode->i_gid = parent->i_gid;
38c48b5f
YZ
86 inode->i_op = &ceph_snapdir_iops;
87 inode->i_fop = &ceph_snapdir_fops;
b377ff13
SW
88 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
89 ci->i_rbytes = 0;
355da1eb
SW
90 return inode;
91}
92
93const struct inode_operations ceph_file_iops = {
94 .permission = ceph_permission,
95 .setattr = ceph_setattr,
96 .getattr = ceph_getattr,
355da1eb 97 .listxattr = ceph_listxattr,
7221fe4c 98 .get_acl = ceph_get_acl,
72466d0b 99 .set_acl = ceph_set_acl,
355da1eb
SW
100};
101
102
103/*
104 * We use a 'frag tree' to keep track of the MDS's directory fragments
105 * for a given inode (usually there is just a single fragment). We
106 * need to know when a child frag is delegated to a new MDS, or when
107 * it is flagged as replicated, so we can direct our requests
108 * accordingly.
109 */
110
111/*
112 * find/create a frag in the tree
113 */
114static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
115 u32 f)
116{
117 struct rb_node **p;
118 struct rb_node *parent = NULL;
119 struct ceph_inode_frag *frag;
120 int c;
121
122 p = &ci->i_fragtree.rb_node;
123 while (*p) {
124 parent = *p;
125 frag = rb_entry(parent, struct ceph_inode_frag, node);
126 c = ceph_frag_compare(f, frag->frag);
127 if (c < 0)
128 p = &(*p)->rb_left;
129 else if (c > 0)
130 p = &(*p)->rb_right;
131 else
132 return frag;
133 }
134
135 frag = kmalloc(sizeof(*frag), GFP_NOFS);
136 if (!frag) {
137 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
138 "frag %x\n", &ci->vfs_inode,
139 ceph_vinop(&ci->vfs_inode), f);
140 return ERR_PTR(-ENOMEM);
141 }
142 frag->frag = f;
143 frag->split_by = 0;
144 frag->mds = -1;
145 frag->ndist = 0;
146
147 rb_link_node(&frag->node, parent, p);
148 rb_insert_color(&frag->node, &ci->i_fragtree);
149
150 dout("get_or_create_frag added %llx.%llx frag %x\n",
151 ceph_vinop(&ci->vfs_inode), f);
152 return frag;
153}
154
155/*
156 * find a specific frag @f
157 */
158struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
159{
160 struct rb_node *n = ci->i_fragtree.rb_node;
161
162 while (n) {
163 struct ceph_inode_frag *frag =
164 rb_entry(n, struct ceph_inode_frag, node);
165 int c = ceph_frag_compare(f, frag->frag);
166 if (c < 0)
167 n = n->rb_left;
168 else if (c > 0)
169 n = n->rb_right;
170 else
171 return frag;
172 }
173 return NULL;
174}
175
176/*
177 * Choose frag containing the given value @v. If @pfrag is
178 * specified, copy the frag delegation info to the caller if
179 * it is present.
180 */
3e7fbe9c
YZ
181static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
182 struct ceph_inode_frag *pfrag, int *found)
355da1eb
SW
183{
184 u32 t = ceph_frag_make(0, 0);
185 struct ceph_inode_frag *frag;
186 unsigned nway, i;
187 u32 n;
188
189 if (found)
190 *found = 0;
191
355da1eb
SW
192 while (1) {
193 WARN_ON(!ceph_frag_contains_value(t, v));
194 frag = __ceph_find_frag(ci, t);
195 if (!frag)
196 break; /* t is a leaf */
197 if (frag->split_by == 0) {
198 if (pfrag)
199 memcpy(pfrag, frag, sizeof(*pfrag));
200 if (found)
201 *found = 1;
202 break;
203 }
204
205 /* choose child */
206 nway = 1 << frag->split_by;
207 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
208 frag->split_by, nway);
209 for (i = 0; i < nway; i++) {
210 n = ceph_frag_make_child(t, frag->split_by, i);
211 if (ceph_frag_contains_value(n, v)) {
212 t = n;
213 break;
214 }
215 }
216 BUG_ON(i == nway);
217 }
218 dout("choose_frag(%x) = %x\n", v, t);
219
355da1eb
SW
220 return t;
221}
222
3e7fbe9c
YZ
223u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
224 struct ceph_inode_frag *pfrag, int *found)
225{
226 u32 ret;
227 mutex_lock(&ci->i_fragtree_mutex);
228 ret = __ceph_choose_frag(ci, v, pfrag, found);
229 mutex_unlock(&ci->i_fragtree_mutex);
230 return ret;
231}
232
355da1eb
SW
233/*
234 * Process dirfrag (delegation) info from the mds. Include leaf
235 * fragment in tree ONLY if ndist > 0. Otherwise, only
236 * branches/splits are included in i_fragtree)
237 */
238static int ceph_fill_dirfrag(struct inode *inode,
239 struct ceph_mds_reply_dirfrag *dirinfo)
240{
241 struct ceph_inode_info *ci = ceph_inode(inode);
242 struct ceph_inode_frag *frag;
243 u32 id = le32_to_cpu(dirinfo->frag);
244 int mds = le32_to_cpu(dirinfo->auth);
245 int ndist = le32_to_cpu(dirinfo->ndist);
8d08503c 246 int diri_auth = -1;
355da1eb
SW
247 int i;
248 int err = 0;
249
8d08503c
YZ
250 spin_lock(&ci->i_ceph_lock);
251 if (ci->i_auth_cap)
252 diri_auth = ci->i_auth_cap->mds;
253 spin_unlock(&ci->i_ceph_lock);
254
42172119
YZ
255 if (mds == -1) /* CDIR_AUTH_PARENT */
256 mds = diri_auth;
257
355da1eb 258 mutex_lock(&ci->i_fragtree_mutex);
8d08503c 259 if (ndist == 0 && mds == diri_auth) {
355da1eb
SW
260 /* no delegation info needed. */
261 frag = __ceph_find_frag(ci, id);
262 if (!frag)
263 goto out;
264 if (frag->split_by == 0) {
265 /* tree leaf, remove */
266 dout("fill_dirfrag removed %llx.%llx frag %x"
267 " (no ref)\n", ceph_vinop(inode), id);
268 rb_erase(&frag->node, &ci->i_fragtree);
269 kfree(frag);
270 } else {
271 /* tree branch, keep and clear */
272 dout("fill_dirfrag cleared %llx.%llx frag %x"
273 " referral\n", ceph_vinop(inode), id);
274 frag->mds = -1;
275 frag->ndist = 0;
276 }
277 goto out;
278 }
279
280
281 /* find/add this frag to store mds delegation info */
282 frag = __get_or_create_frag(ci, id);
283 if (IS_ERR(frag)) {
284 /* this is not the end of the world; we can continue
285 with bad/inaccurate delegation info */
286 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
287 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
288 err = -ENOMEM;
289 goto out;
290 }
291
292 frag->mds = mds;
293 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
294 for (i = 0; i < frag->ndist; i++)
295 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
296 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
297 ceph_vinop(inode), frag->frag, frag->ndist);
298
299out:
300 mutex_unlock(&ci->i_fragtree_mutex);
301 return err;
302}
303
a407846e
YZ
304static int frag_tree_split_cmp(const void *l, const void *r)
305{
306 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
307 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
308 return ceph_frag_compare(ls->frag, rs->frag);
309}
310
a4b7431f
YZ
311static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
312{
313 if (!frag)
314 return f == ceph_frag_make(0, 0);
315 if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
316 return false;
317 return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
318}
319
3e7fbe9c
YZ
320static int ceph_fill_fragtree(struct inode *inode,
321 struct ceph_frag_tree_head *fragtree,
322 struct ceph_mds_reply_dirfrag *dirinfo)
323{
324 struct ceph_inode_info *ci = ceph_inode(inode);
a4b7431f 325 struct ceph_inode_frag *frag, *prev_frag = NULL;
3e7fbe9c 326 struct rb_node *rb_node;
1b1bc16d
YZ
327 unsigned i, split_by, nsplits;
328 u32 id;
3e7fbe9c
YZ
329 bool update = false;
330
331 mutex_lock(&ci->i_fragtree_mutex);
332 nsplits = le32_to_cpu(fragtree->nsplits);
1b1bc16d
YZ
333 if (nsplits != ci->i_fragtree_nsplits) {
334 update = true;
335 } else if (nsplits) {
3e7fbe9c
YZ
336 i = prandom_u32() % nsplits;
337 id = le32_to_cpu(fragtree->splits[i].frag);
338 if (!__ceph_find_frag(ci, id))
339 update = true;
340 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
341 rb_node = rb_first(&ci->i_fragtree);
342 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
343 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
344 update = true;
345 }
346 if (!update && dirinfo) {
347 id = le32_to_cpu(dirinfo->frag);
348 if (id != __ceph_choose_frag(ci, id, NULL, NULL))
349 update = true;
350 }
351 if (!update)
352 goto out_unlock;
353
a407846e
YZ
354 if (nsplits > 1) {
355 sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
356 frag_tree_split_cmp, NULL);
357 }
358
3e7fbe9c
YZ
359 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
360 rb_node = rb_first(&ci->i_fragtree);
361 for (i = 0; i < nsplits; i++) {
362 id = le32_to_cpu(fragtree->splits[i].frag);
1b1bc16d
YZ
363 split_by = le32_to_cpu(fragtree->splits[i].by);
364 if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
365 pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
366 "frag %x split by %d\n", ceph_vinop(inode),
367 i, nsplits, id, split_by);
368 continue;
369 }
3e7fbe9c
YZ
370 frag = NULL;
371 while (rb_node) {
372 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
373 if (ceph_frag_compare(frag->frag, id) >= 0) {
374 if (frag->frag != id)
375 frag = NULL;
376 else
377 rb_node = rb_next(rb_node);
378 break;
379 }
380 rb_node = rb_next(rb_node);
a4b7431f
YZ
381 /* delete stale split/leaf node */
382 if (frag->split_by > 0 ||
383 !is_frag_child(frag->frag, prev_frag)) {
384 rb_erase(&frag->node, &ci->i_fragtree);
1b1bc16d
YZ
385 if (frag->split_by > 0)
386 ci->i_fragtree_nsplits--;
a4b7431f
YZ
387 kfree(frag);
388 }
3e7fbe9c
YZ
389 frag = NULL;
390 }
391 if (!frag) {
392 frag = __get_or_create_frag(ci, id);
393 if (IS_ERR(frag))
394 continue;
395 }
1b1bc16d
YZ
396 if (frag->split_by == 0)
397 ci->i_fragtree_nsplits++;
398 frag->split_by = split_by;
3e7fbe9c 399 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
a4b7431f 400 prev_frag = frag;
3e7fbe9c
YZ
401 }
402 while (rb_node) {
403 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
404 rb_node = rb_next(rb_node);
a4b7431f
YZ
405 /* delete stale split/leaf node */
406 if (frag->split_by > 0 ||
407 !is_frag_child(frag->frag, prev_frag)) {
408 rb_erase(&frag->node, &ci->i_fragtree);
1b1bc16d
YZ
409 if (frag->split_by > 0)
410 ci->i_fragtree_nsplits--;
a4b7431f
YZ
411 kfree(frag);
412 }
3e7fbe9c
YZ
413 }
414out_unlock:
415 mutex_unlock(&ci->i_fragtree_mutex);
416 return 0;
417}
355da1eb
SW
418
419/*
420 * initialize a newly allocated inode.
421 */
422struct inode *ceph_alloc_inode(struct super_block *sb)
423{
424 struct ceph_inode_info *ci;
425 int i;
426
427 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
428 if (!ci)
429 return NULL;
430
431 dout("alloc_inode %p\n", &ci->vfs_inode);
432
be655596
SW
433 spin_lock_init(&ci->i_ceph_lock);
434
355da1eb 435 ci->i_version = 0;
31c542a1 436 ci->i_inline_version = 0;
355da1eb
SW
437 ci->i_time_warp_seq = 0;
438 ci->i_ceph_flags = 0;
fdd4e158
YZ
439 atomic64_set(&ci->i_ordered_count, 1);
440 atomic64_set(&ci->i_release_count, 1);
441 atomic64_set(&ci->i_complete_seq[0], 0);
442 atomic64_set(&ci->i_complete_seq[1], 0);
355da1eb
SW
443 ci->i_symlink = NULL;
444
6c0f3af7 445 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
30c156d9 446 RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
6c0f3af7 447
355da1eb
SW
448 ci->i_fragtree = RB_ROOT;
449 mutex_init(&ci->i_fragtree_mutex);
450
451 ci->i_xattrs.blob = NULL;
452 ci->i_xattrs.prealloc_blob = NULL;
453 ci->i_xattrs.dirty = false;
454 ci->i_xattrs.index = RB_ROOT;
455 ci->i_xattrs.count = 0;
456 ci->i_xattrs.names_size = 0;
457 ci->i_xattrs.vals_size = 0;
458 ci->i_xattrs.version = 0;
459 ci->i_xattrs.index_version = 0;
460
461 ci->i_caps = RB_ROOT;
462 ci->i_auth_cap = NULL;
463 ci->i_dirty_caps = 0;
464 ci->i_flushing_caps = 0;
465 INIT_LIST_HEAD(&ci->i_dirty_item);
466 INIT_LIST_HEAD(&ci->i_flushing_item);
f66fd9f0 467 ci->i_prealloc_cap_flush = NULL;
e4500b5e 468 INIT_LIST_HEAD(&ci->i_cap_flush_list);
355da1eb
SW
469 init_waitqueue_head(&ci->i_cap_wq);
470 ci->i_hold_caps_min = 0;
471 ci->i_hold_caps_max = 0;
472 INIT_LIST_HEAD(&ci->i_cap_delay_list);
355da1eb
SW
473 INIT_LIST_HEAD(&ci->i_cap_snaps);
474 ci->i_head_snapc = NULL;
475 ci->i_snap_caps = 0;
476
774a6a11 477 for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
355da1eb
SW
478 ci->i_nr_by_mode[i] = 0;
479
b0d7c223 480 mutex_init(&ci->i_truncate_mutex);
355da1eb
SW
481 ci->i_truncate_seq = 0;
482 ci->i_truncate_size = 0;
483 ci->i_truncate_pending = 0;
484
485 ci->i_max_size = 0;
486 ci->i_reported_size = 0;
487 ci->i_wanted_max_size = 0;
488 ci->i_requested_max_size = 0;
489
490 ci->i_pin_ref = 0;
491 ci->i_rd_ref = 0;
492 ci->i_rdcache_ref = 0;
493 ci->i_wr_ref = 0;
d3d0720d 494 ci->i_wb_ref = 0;
355da1eb
SW
495 ci->i_wrbuffer_ref = 0;
496 ci->i_wrbuffer_ref_head = 0;
497 ci->i_shared_gen = 0;
498 ci->i_rdcache_gen = 0;
499 ci->i_rdcache_revoking = 0;
500
501 INIT_LIST_HEAD(&ci->i_unsafe_writes);
502 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
68cd5b4b 503 INIT_LIST_HEAD(&ci->i_unsafe_iops);
355da1eb
SW
504 spin_lock_init(&ci->i_unsafe_lock);
505
506 ci->i_snap_realm = NULL;
507 INIT_LIST_HEAD(&ci->i_snap_realm_item);
508 INIT_LIST_HEAD(&ci->i_snap_flush_item);
509
3c6f6b79
SW
510 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
511 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
355da1eb
SW
512
513 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
514
99ccbd22
MT
515 ceph_fscache_inode_init(ci);
516
355da1eb
SW
517 return &ci->vfs_inode;
518}
519
fa0d7e3d
NP
520static void ceph_i_callback(struct rcu_head *head)
521{
522 struct inode *inode = container_of(head, struct inode, i_rcu);
523 struct ceph_inode_info *ci = ceph_inode(inode);
524
fa0d7e3d
NP
525 kmem_cache_free(ceph_inode_cachep, ci);
526}
527
355da1eb
SW
528void ceph_destroy_inode(struct inode *inode)
529{
530 struct ceph_inode_info *ci = ceph_inode(inode);
531 struct ceph_inode_frag *frag;
532 struct rb_node *n;
533
534 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
535
99ccbd22
MT
536 ceph_fscache_unregister_inode_cookie(ci);
537
355da1eb
SW
538 ceph_queue_caps_release(inode);
539
8b218b8a
SW
540 /*
541 * we may still have a snap_realm reference if there are stray
d9df2783 542 * caps in i_snap_caps.
8b218b8a
SW
543 */
544 if (ci->i_snap_realm) {
545 struct ceph_mds_client *mdsc =
3d14c5d2 546 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
8b218b8a
SW
547 struct ceph_snap_realm *realm = ci->i_snap_realm;
548
549 dout(" dropping residual ref to snap realm %p\n", realm);
550 spin_lock(&realm->inodes_with_caps_lock);
551 list_del_init(&ci->i_snap_realm_item);
552 spin_unlock(&realm->inodes_with_caps_lock);
553 ceph_put_snap_realm(mdsc, realm);
554 }
555
355da1eb
SW
556 kfree(ci->i_symlink);
557 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
558 frag = rb_entry(n, struct ceph_inode_frag, node);
559 rb_erase(n, &ci->i_fragtree);
560 kfree(frag);
561 }
1b1bc16d 562 ci->i_fragtree_nsplits = 0;
355da1eb
SW
563
564 __ceph_destroy_xattrs(ci);
b6c1d5b8
SW
565 if (ci->i_xattrs.blob)
566 ceph_buffer_put(ci->i_xattrs.blob);
567 if (ci->i_xattrs.prealloc_blob)
568 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
355da1eb 569
779fe0fb 570 ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
30c156d9 571
fa0d7e3d 572 call_rcu(&inode->i_rcu, ceph_i_callback);
355da1eb
SW
573}
574
9f12bd11
YZ
575int ceph_drop_inode(struct inode *inode)
576{
577 /*
578 * Positve dentry and corresponding inode are always accompanied
579 * in MDS reply. So no need to keep inode in the cache after
580 * dropping all its aliases.
581 */
582 return 1;
583}
584
9a5530c6
YZ
585void ceph_evict_inode(struct inode *inode)
586{
587 /* wait unsafe sync writes */
588 ceph_sync_write_wait(inode);
589 truncate_inode_pages_final(&inode->i_data);
590 clear_inode(inode);
591}
592
224a7542
YZ
593static inline blkcnt_t calc_inode_blocks(u64 size)
594{
595 return (size + (1<<9) - 1) >> 9;
596}
597
355da1eb
SW
598/*
599 * Helpers to fill in size, ctime, mtime, and atime. We have to be
600 * careful because either the client or MDS may have more up to date
601 * info, depending on which capabilities are held, and whether
602 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
603 * and size are monotonically increasing, except when utimes() or
604 * truncate() increments the corresponding _seq values.)
605 */
606int ceph_fill_file_size(struct inode *inode, int issued,
607 u32 truncate_seq, u64 truncate_size, u64 size)
608{
609 struct ceph_inode_info *ci = ceph_inode(inode);
610 int queue_trunc = 0;
611
612 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
613 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
614 dout("size %lld -> %llu\n", inode->i_size, size);
a3d714c3
YZ
615 if (size > 0 && S_ISDIR(inode->i_mode)) {
616 pr_err("fill_file_size non-zero size for directory\n");
617 size = 0;
618 }
99c88e69 619 i_size_write(inode, size);
224a7542 620 inode->i_blocks = calc_inode_blocks(size);
355da1eb
SW
621 ci->i_reported_size = size;
622 if (truncate_seq != ci->i_truncate_seq) {
623 dout("truncate_seq %u -> %u\n",
624 ci->i_truncate_seq, truncate_seq);
625 ci->i_truncate_seq = truncate_seq;
b0d7c223
YZ
626
627 /* the MDS should have revoked these caps */
628 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
629 CEPH_CAP_FILE_RD |
630 CEPH_CAP_FILE_WR |
631 CEPH_CAP_FILE_LAZYIO));
3d497d85
YS
632 /*
633 * If we hold relevant caps, or in the case where we're
634 * not the only client referencing this file and we
635 * don't hold those caps, then we need to check whether
636 * the file is either opened or mmaped
637 */
b0d7c223
YZ
638 if ((issued & (CEPH_CAP_FILE_CACHE|
639 CEPH_CAP_FILE_BUFFER)) ||
3d497d85
YS
640 mapping_mapped(inode->i_mapping) ||
641 __ceph_caps_file_wanted(ci)) {
355da1eb
SW
642 ci->i_truncate_pending++;
643 queue_trunc = 1;
644 }
645 }
646 }
647 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
648 ci->i_truncate_size != truncate_size) {
649 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
650 truncate_size);
651 ci->i_truncate_size = truncate_size;
652 }
99ccbd22
MT
653
654 if (queue_trunc)
655 ceph_fscache_invalidate(inode);
656
355da1eb
SW
657 return queue_trunc;
658}
659
660void ceph_fill_file_time(struct inode *inode, int issued,
661 u64 time_warp_seq, struct timespec *ctime,
662 struct timespec *mtime, struct timespec *atime)
663{
664 struct ceph_inode_info *ci = ceph_inode(inode);
665 int warn = 0;
666
667 if (issued & (CEPH_CAP_FILE_EXCL|
668 CEPH_CAP_FILE_WR|
d8672d64
SW
669 CEPH_CAP_FILE_BUFFER|
670 CEPH_CAP_AUTH_EXCL|
671 CEPH_CAP_XATTR_EXCL)) {
355da1eb
SW
672 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
673 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
674 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
675 ctime->tv_sec, ctime->tv_nsec);
676 inode->i_ctime = *ctime;
677 }
678 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
679 /* the MDS did a utimes() */
680 dout("mtime %ld.%09ld -> %ld.%09ld "
681 "tw %d -> %d\n",
682 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
683 mtime->tv_sec, mtime->tv_nsec,
684 ci->i_time_warp_seq, (int)time_warp_seq);
685
686 inode->i_mtime = *mtime;
687 inode->i_atime = *atime;
688 ci->i_time_warp_seq = time_warp_seq;
689 } else if (time_warp_seq == ci->i_time_warp_seq) {
690 /* nobody did utimes(); take the max */
691 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
692 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
693 inode->i_mtime.tv_sec,
694 inode->i_mtime.tv_nsec,
695 mtime->tv_sec, mtime->tv_nsec);
696 inode->i_mtime = *mtime;
697 }
698 if (timespec_compare(atime, &inode->i_atime) > 0) {
699 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
700 inode->i_atime.tv_sec,
701 inode->i_atime.tv_nsec,
702 atime->tv_sec, atime->tv_nsec);
703 inode->i_atime = *atime;
704 }
705 } else if (issued & CEPH_CAP_FILE_EXCL) {
706 /* we did a utimes(); ignore mds values */
707 } else {
708 warn = 1;
709 }
710 } else {
d8672d64 711 /* we have no write|excl caps; whatever the MDS says is true */
355da1eb
SW
712 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
713 inode->i_ctime = *ctime;
714 inode->i_mtime = *mtime;
715 inode->i_atime = *atime;
716 ci->i_time_warp_seq = time_warp_seq;
717 } else {
718 warn = 1;
719 }
720 }
721 if (warn) /* time_warp_seq shouldn't go backwards */
722 dout("%p mds time_warp_seq %llu < %u\n",
723 inode, time_warp_seq, ci->i_time_warp_seq);
724}
725
726/*
727 * Populate an inode based on info from mds. May be called on new or
728 * existing inodes.
729 */
01deead0 730static int fill_inode(struct inode *inode, struct page *locked_page,
355da1eb
SW
731 struct ceph_mds_reply_info_in *iinfo,
732 struct ceph_mds_reply_dirfrag *dirinfo,
733 struct ceph_mds_session *session,
734 unsigned long ttl_from, int cap_fmode,
735 struct ceph_cap_reservation *caps_reservation)
736{
d9df2783 737 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
355da1eb
SW
738 struct ceph_mds_reply_inode *info = iinfo->in;
739 struct ceph_inode_info *ci = ceph_inode(inode);
f98a128a 740 int issued = 0, implemented, new_issued;
355da1eb 741 struct timespec mtime, atime, ctime;
355da1eb 742 struct ceph_buffer *xattr_blob = NULL;
779fe0fb 743 struct ceph_string *pool_ns = NULL;
d9df2783 744 struct ceph_cap *new_cap = NULL;
355da1eb 745 int err = 0;
d9df2783 746 bool wake = false;
f98a128a
YZ
747 bool queue_trunc = false;
748 bool new_version = false;
31c542a1 749 bool fill_inline = false;
355da1eb
SW
750
751 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
752 inode, ceph_vinop(inode), le64_to_cpu(info->version),
753 ci->i_version);
754
d9df2783
YZ
755 /* prealloc new cap struct */
756 if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP)
757 new_cap = ceph_get_cap(mdsc, caps_reservation);
758
355da1eb
SW
759 /*
760 * prealloc xattr data, if it looks like we'll need it. only
761 * if len > 4 (meaning there are actually xattrs; the first 4
762 * bytes are the xattr count).
763 */
764 if (iinfo->xattr_len > 4) {
b6c1d5b8 765 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
355da1eb
SW
766 if (!xattr_blob)
767 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
768 iinfo->xattr_len);
769 }
770
779fe0fb
YZ
771 if (iinfo->pool_ns_len > 0)
772 pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
773 iinfo->pool_ns_len);
774
be655596 775 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
776
777 /*
778 * provided version will be odd if inode value is projected,
8bd59e01
SW
779 * even if stable. skip the update if we have newer stable
780 * info (ours>=theirs, e.g. due to racing mds replies), unless
781 * we are getting projected (unstable) info (in which case the
782 * version is odd, and we want ours>theirs).
783 * us them
784 * 2 2 skip
785 * 3 2 skip
786 * 3 3 update
355da1eb 787 */
f98a128a
YZ
788 if (ci->i_version == 0 ||
789 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
790 le64_to_cpu(info->version) > (ci->i_version & ~1)))
791 new_version = true;
792
355da1eb
SW
793 issued = __ceph_caps_issued(ci, &implemented);
794 issued |= implemented | __ceph_caps_dirty(ci);
f98a128a 795 new_issued = ~issued & le32_to_cpu(info->cap.caps);
355da1eb
SW
796
797 /* update inode */
798 ci->i_version = le64_to_cpu(info->version);
799 inode->i_version++;
800 inode->i_rdev = le32_to_cpu(info->rdev);
f98a128a 801 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
355da1eb 802
f98a128a
YZ
803 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
804 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
355da1eb 805 inode->i_mode = le32_to_cpu(info->mode);
ab871b90
EB
806 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
807 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
355da1eb 808 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
bd2bae6a
EB
809 from_kuid(&init_user_ns, inode->i_uid),
810 from_kgid(&init_user_ns, inode->i_gid));
355da1eb
SW
811 }
812
f98a128a
YZ
813 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
814 (issued & CEPH_CAP_LINK_EXCL) == 0)
bfe86848 815 set_nlink(inode, le32_to_cpu(info->nlink));
355da1eb 816
f98a128a
YZ
817 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
818 /* be careful with mtime, atime, size */
819 ceph_decode_timespec(&atime, &info->atime);
820 ceph_decode_timespec(&mtime, &info->mtime);
821 ceph_decode_timespec(&ctime, &info->ctime);
822 ceph_fill_file_time(inode, issued,
823 le32_to_cpu(info->time_warp_seq),
824 &ctime, &mtime, &atime);
825 }
826
827 if (new_version ||
828 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
7627151e 829 s64 old_pool = ci->i_layout.pool_id;
779fe0fb
YZ
830 struct ceph_string *old_ns;
831
7627151e 832 ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
779fe0fb
YZ
833 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
834 lockdep_is_held(&ci->i_ceph_lock));
835 rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
836
837 if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
10183a69 838 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
10183a69 839
779fe0fb 840 pool_ns = old_ns;
10183a69 841
f98a128a
YZ
842 queue_trunc = ceph_fill_file_size(inode, issued,
843 le32_to_cpu(info->truncate_seq),
844 le64_to_cpu(info->truncate_size),
845 le64_to_cpu(info->size));
846 /* only update max_size on auth cap */
847 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
848 ci->i_max_size != le64_to_cpu(info->max_size)) {
849 dout("max_size %lld -> %llu\n", ci->i_max_size,
850 le64_to_cpu(info->max_size));
851 ci->i_max_size = le64_to_cpu(info->max_size);
852 }
853 }
355da1eb
SW
854
855 /* xattrs */
856 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
508b32d8 857 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
355da1eb
SW
858 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
859 if (ci->i_xattrs.blob)
860 ceph_buffer_put(ci->i_xattrs.blob);
861 ci->i_xattrs.blob = xattr_blob;
862 if (xattr_blob)
863 memcpy(ci->i_xattrs.blob->vec.iov_base,
864 iinfo->xattr_data, iinfo->xattr_len);
865 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
7221fe4c 866 ceph_forget_all_cached_acls(inode);
a6424e48 867 xattr_blob = NULL;
355da1eb
SW
868 }
869
870 inode->i_mapping->a_ops = &ceph_aops;
355da1eb
SW
871
872 switch (inode->i_mode & S_IFMT) {
873 case S_IFIFO:
874 case S_IFBLK:
875 case S_IFCHR:
876 case S_IFSOCK:
877 init_special_inode(inode, inode->i_mode, inode->i_rdev);
878 inode->i_op = &ceph_file_iops;
879 break;
880 case S_IFREG:
881 inode->i_op = &ceph_file_iops;
882 inode->i_fop = &ceph_file_fops;
883 break;
884 case S_IFLNK:
885 inode->i_op = &ceph_symlink_iops;
886 if (!ci->i_symlink) {
810339ec 887 u32 symlen = iinfo->symlink_len;
355da1eb
SW
888 char *sym;
889
be655596 890 spin_unlock(&ci->i_ceph_lock);
355da1eb 891
224a7542
YZ
892 if (symlen != i_size_read(inode)) {
893 pr_err("fill_inode %llx.%llx BAD symlink "
894 "size %lld\n", ceph_vinop(inode),
895 i_size_read(inode));
896 i_size_write(inode, symlen);
897 inode->i_blocks = calc_inode_blocks(symlen);
898 }
810339ec 899
355da1eb 900 err = -ENOMEM;
810339ec 901 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
355da1eb
SW
902 if (!sym)
903 goto out;
355da1eb 904
be655596 905 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
906 if (!ci->i_symlink)
907 ci->i_symlink = sym;
908 else
909 kfree(sym); /* lost a race */
910 }
ac194dcc 911 inode->i_link = ci->i_symlink;
355da1eb
SW
912 break;
913 case S_IFDIR:
914 inode->i_op = &ceph_dir_iops;
915 inode->i_fop = &ceph_dir_fops;
916
14303d20
SW
917 ci->i_dir_layout = iinfo->dir_layout;
918
355da1eb
SW
919 ci->i_files = le64_to_cpu(info->files);
920 ci->i_subdirs = le64_to_cpu(info->subdirs);
921 ci->i_rbytes = le64_to_cpu(info->rbytes);
922 ci->i_rfiles = le64_to_cpu(info->rfiles);
923 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
924 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
355da1eb
SW
925 break;
926 default:
927 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
928 ceph_vinop(inode), inode->i_mode);
929 }
930
355da1eb
SW
931 /* were we issued a capability? */
932 if (info->cap.caps) {
933 if (ceph_snap(inode) == CEPH_NOSNAP) {
2f92b3d0 934 unsigned caps = le32_to_cpu(info->cap.caps);
355da1eb
SW
935 ceph_add_cap(inode, session,
936 le64_to_cpu(info->cap.cap_id),
2f92b3d0 937 cap_fmode, caps,
355da1eb
SW
938 le32_to_cpu(info->cap.wanted),
939 le32_to_cpu(info->cap.seq),
940 le32_to_cpu(info->cap.mseq),
941 le64_to_cpu(info->cap.realm),
d9df2783 942 info->cap.flags, &new_cap);
2f92b3d0
YZ
943
944 /* set dir completion flag? */
945 if (S_ISDIR(inode->i_mode) &&
946 ci->i_files == 0 && ci->i_subdirs == 0 &&
947 (caps & CEPH_CAP_FILE_SHARED) &&
948 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
949 !__ceph_dir_is_complete(ci)) {
950 dout(" marking %p complete (empty)\n", inode);
fdd4e158 951 i_size_write(inode, 0);
2f92b3d0 952 __ceph_dir_set_complete(ci,
fdd4e158
YZ
953 atomic64_read(&ci->i_release_count),
954 atomic64_read(&ci->i_ordered_count));
2f92b3d0
YZ
955 }
956
d9df2783 957 wake = true;
355da1eb 958 } else {
355da1eb
SW
959 dout(" %p got snap_caps %s\n", inode,
960 ceph_cap_string(le32_to_cpu(info->cap.caps)));
961 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
962 if (cap_fmode >= 0)
963 __ceph_get_fmode(ci, cap_fmode);
355da1eb 964 }
04d000eb 965 } else if (cap_fmode >= 0) {
f3ae1b97 966 pr_warn("mds issued no caps on %llx.%llx\n",
04d000eb
SW
967 ceph_vinop(inode));
968 __ceph_get_fmode(ci, cap_fmode);
355da1eb 969 }
31c542a1
YZ
970
971 if (iinfo->inline_version > 0 &&
972 iinfo->inline_version >= ci->i_inline_version) {
973 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
974 ci->i_inline_version = iinfo->inline_version;
975 if (ci->i_inline_version != CEPH_INLINE_NONE &&
01deead0
YZ
976 (locked_page ||
977 (le32_to_cpu(info->cap.caps) & cache_caps)))
31c542a1
YZ
978 fill_inline = true;
979 }
980
be655596 981 spin_unlock(&ci->i_ceph_lock);
355da1eb 982
31c542a1 983 if (fill_inline)
01deead0 984 ceph_fill_inline_data(inode, locked_page,
31c542a1
YZ
985 iinfo->inline_data, iinfo->inline_len);
986
d9df2783
YZ
987 if (wake)
988 wake_up_all(&ci->i_cap_wq);
989
355da1eb
SW
990 /* queue truncate if we saw i_size decrease */
991 if (queue_trunc)
3c6f6b79 992 ceph_queue_vmtruncate(inode);
355da1eb
SW
993
994 /* populate frag tree */
3e7fbe9c
YZ
995 if (S_ISDIR(inode->i_mode))
996 ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
355da1eb
SW
997
998 /* update delegation info? */
999 if (dirinfo)
1000 ceph_fill_dirfrag(inode, dirinfo);
1001
1002 err = 0;
355da1eb 1003out:
d9df2783
YZ
1004 if (new_cap)
1005 ceph_put_cap(mdsc, new_cap);
b6c1d5b8
SW
1006 if (xattr_blob)
1007 ceph_buffer_put(xattr_blob);
779fe0fb 1008 ceph_put_string(pool_ns);
355da1eb
SW
1009 return err;
1010}
1011
1012/*
1013 * caller should hold session s_mutex.
1014 */
1015static void update_dentry_lease(struct dentry *dentry,
1016 struct ceph_mds_reply_lease *lease,
1017 struct ceph_mds_session *session,
1018 unsigned long from_time)
1019{
1020 struct ceph_dentry_info *di = ceph_dentry(dentry);
1021 long unsigned duration = le32_to_cpu(lease->duration_ms);
1022 long unsigned ttl = from_time + (duration * HZ) / 1000;
1023 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1024 struct inode *dir;
1025
355da1eb 1026 spin_lock(&dentry->d_lock);
2f90b852
SW
1027 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1028 dentry, duration, ttl);
355da1eb
SW
1029
1030 /* make lease_rdcache_gen match directory */
2b0143b5 1031 dir = d_inode(dentry->d_parent);
18fc8abd
AV
1032
1033 /* only track leases on regular dentries */
1034 if (ceph_snap(dir) != CEPH_NOSNAP)
1035 goto out_unlock;
1036
355da1eb
SW
1037 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
1038
2f90b852 1039 if (duration == 0)
355da1eb
SW
1040 goto out_unlock;
1041
1042 if (di->lease_gen == session->s_cap_gen &&
9b16f03c 1043 time_before(ttl, di->time))
355da1eb
SW
1044 goto out_unlock; /* we already have a newer lease. */
1045
1046 if (di->lease_session && di->lease_session != session)
1047 goto out_unlock;
1048
1049 ceph_dentry_lru_touch(dentry);
1050
1051 if (!di->lease_session)
1052 di->lease_session = ceph_get_mds_session(session);
1053 di->lease_gen = session->s_cap_gen;
1054 di->lease_seq = le32_to_cpu(lease->seq);
1055 di->lease_renew_after = half_ttl;
1056 di->lease_renew_from = 0;
9b16f03c 1057 di->time = ttl;
355da1eb
SW
1058out_unlock:
1059 spin_unlock(&dentry->d_lock);
1060 return;
1061}
1062
1063/*
1064 * splice a dentry to an inode.
1065 * caller must hold directory i_mutex for this to be safe.
355da1eb 1066 */
f7380af0 1067static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
355da1eb
SW
1068{
1069 struct dentry *realdn;
1070
2b0143b5 1071 BUG_ON(d_inode(dn));
1cd3935b 1072
355da1eb
SW
1073 /* dn must be unhashed */
1074 if (!d_unhashed(dn))
1075 d_drop(dn);
41d28bca 1076 realdn = d_splice_alias(in, dn);
355da1eb 1077 if (IS_ERR(realdn)) {
d69ed05a
SW
1078 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1079 PTR_ERR(realdn), dn, in, ceph_vinop(in));
355da1eb
SW
1080 dn = realdn; /* note realdn contains the error */
1081 goto out;
1082 } else if (realdn) {
1083 dout("dn %p (%d) spliced with %p (%d) "
1084 "inode %p ino %llx.%llx\n",
84d08fa8
AV
1085 dn, d_count(dn),
1086 realdn, d_count(realdn),
2b0143b5 1087 d_inode(realdn), ceph_vinop(d_inode(realdn)));
355da1eb
SW
1088 dput(dn);
1089 dn = realdn;
1090 } else {
1091 BUG_ON(!ceph_dentry(dn));
355da1eb 1092 dout("dn %p attached to %p ino %llx.%llx\n",
2b0143b5 1093 dn, d_inode(dn), ceph_vinop(d_inode(dn)));
355da1eb 1094 }
355da1eb
SW
1095out:
1096 return dn;
1097}
1098
1099/*
1100 * Incorporate results into the local cache. This is either just
1101 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1102 * after a lookup).
1103 *
1104 * A reply may contain
1105 * a directory inode along with a dentry.
1106 * and/or a target inode
1107 *
1108 * Called with snap_rwsem (read).
1109 */
1110int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1111 struct ceph_mds_session *session)
1112{
1113 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1114 struct inode *in = NULL;
355da1eb 1115 struct ceph_vino vino;
3d14c5d2 1116 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
355da1eb
SW
1117 int err = 0;
1118
1119 dout("fill_trace %p is_dentry %d is_target %d\n", req,
1120 rinfo->head->is_dentry, rinfo->head->is_target);
1121
1122#if 0
1123 /*
1124 * Debugging hook:
1125 *
1126 * If we resend completed ops to a recovering mds, we get no
1127 * trace. Since that is very rare, pretend this is the case
1128 * to ensure the 'no trace' handlers in the callers behave.
1129 *
1130 * Fill in inodes unconditionally to avoid breaking cap
1131 * invariants.
1132 */
1133 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
1134 pr_info("fill_trace faking empty trace on %lld %s\n",
1135 req->r_tid, ceph_mds_op_name(rinfo->head->op));
1136 if (rinfo->head->is_dentry) {
1137 rinfo->head->is_dentry = 0;
1138 err = fill_inode(req->r_locked_dir,
1139 &rinfo->diri, rinfo->dirfrag,
1140 session, req->r_request_started, -1);
1141 }
1142 if (rinfo->head->is_target) {
1143 rinfo->head->is_target = 0;
1144 ininfo = rinfo->targeti.in;
1145 vino.ino = le64_to_cpu(ininfo->ino);
1146 vino.snap = le64_to_cpu(ininfo->snapid);
1147 in = ceph_get_inode(sb, vino);
1148 err = fill_inode(in, &rinfo->targeti, NULL,
1149 session, req->r_request_started,
1150 req->r_fmode);
1151 iput(in);
1152 }
1153 }
1154#endif
1155
1156 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1157 dout("fill_trace reply is empty!\n");
167c9e35
SW
1158 if (rinfo->head->result == 0 && req->r_locked_dir)
1159 ceph_invalidate_dir_request(req);
355da1eb
SW
1160 return 0;
1161 }
1162
1163 if (rinfo->head->is_dentry) {
5b1daecd
SW
1164 struct inode *dir = req->r_locked_dir;
1165
6c5e50fa 1166 if (dir) {
01deead0
YZ
1167 err = fill_inode(dir, NULL,
1168 &rinfo->diri, rinfo->dirfrag,
6c5e50fa
SW
1169 session, req->r_request_started, -1,
1170 &req->r_caps_reservation);
1171 if (err < 0)
19913b4e 1172 goto done;
6c5e50fa
SW
1173 } else {
1174 WARN_ON_ONCE(1);
1175 }
19913b4e
YZ
1176
1177 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) {
1178 struct qstr dname;
1179 struct dentry *dn, *parent;
1180
1181 BUG_ON(!rinfo->head->is_target);
1182 BUG_ON(req->r_dentry);
1183
1184 parent = d_find_any_alias(dir);
1185 BUG_ON(!parent);
1186
1187 dname.name = rinfo->dname;
1188 dname.len = rinfo->dname_len;
8387ff25 1189 dname.hash = full_name_hash(parent, dname.name, dname.len);
19913b4e
YZ
1190 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1191 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1192retry_lookup:
1193 dn = d_lookup(parent, &dname);
1194 dout("d_lookup on parent=%p name=%.*s got %p\n",
1195 parent, dname.len, dname.name, dn);
1196
1197 if (!dn) {
1198 dn = d_alloc(parent, &dname);
1199 dout("d_alloc %p '%.*s' = %p\n", parent,
1200 dname.len, dname.name, dn);
1201 if (dn == NULL) {
1202 dput(parent);
1203 err = -ENOMEM;
1204 goto done;
1205 }
ad5cb123 1206 err = 0;
2b0143b5
DH
1207 } else if (d_really_is_positive(dn) &&
1208 (ceph_ino(d_inode(dn)) != vino.ino ||
1209 ceph_snap(d_inode(dn)) != vino.snap)) {
19913b4e 1210 dout(" dn %p points to wrong inode %p\n",
2b0143b5 1211 dn, d_inode(dn));
19913b4e
YZ
1212 d_delete(dn);
1213 dput(dn);
1214 goto retry_lookup;
1215 }
1216
1217 req->r_dentry = dn;
1218 dput(parent);
1219 }
5b1daecd
SW
1220 }
1221
86b58d13
YZ
1222 if (rinfo->head->is_target) {
1223 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1224 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1225
1226 in = ceph_get_inode(sb, vino);
1227 if (IS_ERR(in)) {
1228 err = PTR_ERR(in);
1229 goto done;
1230 }
1231 req->r_target_inode = in;
1232
01deead0 1233 err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
86b58d13 1234 session, req->r_request_started,
48193012 1235 (!req->r_aborted && rinfo->head->result == 0) ?
86b58d13
YZ
1236 req->r_fmode : -1,
1237 &req->r_caps_reservation);
1238 if (err < 0) {
1239 pr_err("fill_inode badness %p %llx.%llx\n",
1240 in, ceph_vinop(in));
1241 goto done;
1242 }
1243 }
1244
9358c6d4
SW
1245 /*
1246 * ignore null lease/binding on snapdir ENOENT, or else we
1247 * will have trouble splicing in the virtual snapdir later
1248 */
1249 if (rinfo->head->is_dentry && !req->r_aborted &&
6c5e50fa 1250 req->r_locked_dir &&
9358c6d4 1251 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
3d14c5d2 1252 fsc->mount_options->snapdir_name,
9358c6d4 1253 req->r_dentry->d_name.len))) {
355da1eb
SW
1254 /*
1255 * lookup link rename : null -> possibly existing inode
1256 * mknod symlink mkdir : null -> new inode
1257 * unlink : linked -> null
1258 */
1259 struct inode *dir = req->r_locked_dir;
1260 struct dentry *dn = req->r_dentry;
1261 bool have_dir_cap, have_lease;
1262
1263 BUG_ON(!dn);
1264 BUG_ON(!dir);
2b0143b5 1265 BUG_ON(d_inode(dn->d_parent) != dir);
355da1eb
SW
1266 BUG_ON(ceph_ino(dir) !=
1267 le64_to_cpu(rinfo->diri.in->ino));
1268 BUG_ON(ceph_snap(dir) !=
1269 le64_to_cpu(rinfo->diri.in->snapid));
1270
355da1eb
SW
1271 /* do we have a lease on the whole dir? */
1272 have_dir_cap =
1273 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1274 CEPH_CAP_FILE_SHARED);
1275
1276 /* do we have a dn lease? */
1277 have_lease = have_dir_cap ||
2f90b852 1278 le32_to_cpu(rinfo->dlease->duration_ms);
355da1eb
SW
1279 if (!have_lease)
1280 dout("fill_trace no dentry lease or dir cap\n");
1281
1282 /* rename? */
1283 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
0a8a70f9
YZ
1284 struct inode *olddir = req->r_old_dentry_dir;
1285 BUG_ON(!olddir);
1286
a455589f 1287 dout(" src %p '%pd' dst %p '%pd'\n",
355da1eb 1288 req->r_old_dentry,
a455589f
AV
1289 req->r_old_dentry,
1290 dn, dn);
355da1eb
SW
1291 dout("fill_trace doing d_move %p -> %p\n",
1292 req->r_old_dentry, dn);
c10f5e12 1293
fdd4e158
YZ
1294 /* d_move screws up sibling dentries' offsets */
1295 ceph_dir_clear_ordered(dir);
1296 ceph_dir_clear_ordered(olddir);
1297
355da1eb 1298 d_move(req->r_old_dentry, dn);
a455589f
AV
1299 dout(" src %p '%pd' dst %p '%pd'\n",
1300 req->r_old_dentry,
355da1eb 1301 req->r_old_dentry,
a455589f 1302 dn, dn);
81a6cf2d 1303
c4a29f26
SW
1304 /* ensure target dentry is invalidated, despite
1305 rehashing bug in vfs_rename_dir */
81a6cf2d
SW
1306 ceph_invalidate_dentry_lease(dn);
1307
99ccbd22 1308 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1cd3935b 1309 ceph_dentry(req->r_old_dentry)->offset);
81a6cf2d 1310
355da1eb 1311 dn = req->r_old_dentry; /* use old_dentry */
355da1eb
SW
1312 }
1313
1314 /* null dentry? */
1315 if (!rinfo->head->is_target) {
1316 dout("fill_trace null dentry\n");
2b0143b5 1317 if (d_really_is_positive(dn)) {
70db4f36 1318 ceph_dir_clear_ordered(dir);
355da1eb
SW
1319 dout("d_delete %p\n", dn);
1320 d_delete(dn);
1321 } else {
355da1eb 1322 if (have_lease && d_unhashed(dn))
f8b31710 1323 d_add(dn, NULL);
355da1eb
SW
1324 update_dentry_lease(dn, rinfo->dlease,
1325 session,
1326 req->r_request_started);
1327 }
1328 goto done;
1329 }
1330
1331 /* attach proper inode */
2b0143b5 1332 if (d_really_is_negative(dn)) {
70db4f36 1333 ceph_dir_clear_ordered(dir);
86b58d13 1334 ihold(in);
f7380af0 1335 dn = splice_dentry(dn, in);
355da1eb
SW
1336 if (IS_ERR(dn)) {
1337 err = PTR_ERR(dn);
1338 goto done;
1339 }
1340 req->r_dentry = dn; /* may have spliced */
2b0143b5 1341 } else if (d_really_is_positive(dn) && d_inode(dn) != in) {
355da1eb 1342 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
2b0143b5 1343 dn, d_inode(dn), ceph_vinop(d_inode(dn)),
86b58d13 1344 ceph_vinop(in));
200fd27c 1345 d_invalidate(dn);
355da1eb 1346 have_lease = false;
355da1eb
SW
1347 }
1348
1349 if (have_lease)
1350 update_dentry_lease(dn, rinfo->dlease, session,
1351 req->r_request_started);
1352 dout(" final dn %p\n", dn);
86b58d13
YZ
1353 } else if (!req->r_aborted &&
1354 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1355 req->r_op == CEPH_MDS_OP_MKSNAP)) {
355da1eb 1356 struct dentry *dn = req->r_dentry;
0a8a70f9 1357 struct inode *dir = req->r_locked_dir;
355da1eb
SW
1358
1359 /* fill out a snapdir LOOKUPSNAP dentry */
1360 BUG_ON(!dn);
0a8a70f9
YZ
1361 BUG_ON(!dir);
1362 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
355da1eb 1363 dout(" linking snapped dir %p to dn %p\n", in, dn);
70db4f36 1364 ceph_dir_clear_ordered(dir);
86b58d13 1365 ihold(in);
f7380af0 1366 dn = splice_dentry(dn, in);
355da1eb
SW
1367 if (IS_ERR(dn)) {
1368 err = PTR_ERR(dn);
1369 goto done;
1370 }
1371 req->r_dentry = dn; /* may have spliced */
355da1eb 1372 }
355da1eb
SW
1373done:
1374 dout("fill_trace done err=%d\n", err);
1375 return err;
1376}
1377
1378/*
1379 * Prepopulate our cache with readdir results, leases, etc.
1380 */
79f9f99a
SW
1381static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1382 struct ceph_mds_session *session)
1383{
1384 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1385 int i, err = 0;
1386
1387 for (i = 0; i < rinfo->dir_nr; i++) {
2a5beea3 1388 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
79f9f99a
SW
1389 struct ceph_vino vino;
1390 struct inode *in;
1391 int rc;
1392
2a5beea3
YZ
1393 vino.ino = le64_to_cpu(rde->inode.in->ino);
1394 vino.snap = le64_to_cpu(rde->inode.in->snapid);
79f9f99a
SW
1395
1396 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1397 if (IS_ERR(in)) {
1398 err = PTR_ERR(in);
1399 dout("new_inode badness got %d\n", err);
1400 continue;
1401 }
2a5beea3 1402 rc = fill_inode(in, NULL, &rde->inode, NULL, session,
79f9f99a
SW
1403 req->r_request_started, -1,
1404 &req->r_caps_reservation);
1405 if (rc < 0) {
1406 pr_err("fill_inode badness on %p got %d\n", in, rc);
1407 err = rc;
79f9f99a 1408 }
209ae762 1409 iput(in);
79f9f99a
SW
1410 }
1411
1412 return err;
1413}
1414
fdd4e158
YZ
1415void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1416{
1417 if (ctl->page) {
1418 kunmap(ctl->page);
09cbfeaf 1419 put_page(ctl->page);
fdd4e158
YZ
1420 ctl->page = NULL;
1421 }
1422}
1423
1424static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1425 struct ceph_readdir_cache_control *ctl,
1426 struct ceph_mds_request *req)
1427{
1428 struct ceph_inode_info *ci = ceph_inode(dir);
09cbfeaf 1429 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
fdd4e158
YZ
1430 unsigned idx = ctl->index % nsize;
1431 pgoff_t pgoff = ctl->index / nsize;
1432
1433 if (!ctl->page || pgoff != page_index(ctl->page)) {
1434 ceph_readdir_cache_release(ctl);
af5e5eb5
YZ
1435 if (idx == 0)
1436 ctl->page = grab_cache_page(&dir->i_data, pgoff);
1437 else
1438 ctl->page = find_lock_page(&dir->i_data, pgoff);
fdd4e158
YZ
1439 if (!ctl->page) {
1440 ctl->index = -1;
af5e5eb5 1441 return idx == 0 ? -ENOMEM : 0;
fdd4e158
YZ
1442 }
1443 /* reading/filling the cache are serialized by
1444 * i_mutex, no need to use page lock */
1445 unlock_page(ctl->page);
1446 ctl->dentries = kmap(ctl->page);
af5e5eb5 1447 if (idx == 0)
09cbfeaf 1448 memset(ctl->dentries, 0, PAGE_SIZE);
fdd4e158
YZ
1449 }
1450
1451 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1452 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1453 dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1454 ctl->dentries[idx] = dn;
1455 ctl->index++;
1456 } else {
1457 dout("disable readdir cache\n");
1458 ctl->index = -1;
1459 }
1460 return 0;
1461}
1462
355da1eb
SW
1463int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1464 struct ceph_mds_session *session)
1465{
1466 struct dentry *parent = req->r_dentry;
f3c4ebe6 1467 struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
355da1eb
SW
1468 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1469 struct qstr dname;
1470 struct dentry *dn;
1471 struct inode *in;
315f2408 1472 int err = 0, skipped = 0, ret, i;
355da1eb
SW
1473 struct inode *snapdir = NULL;
1474 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
81c6aea5 1475 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
f3c4ebe6
YZ
1476 u32 last_hash = 0;
1477 u32 fpos_offset;
fdd4e158
YZ
1478 struct ceph_readdir_cache_control cache_ctl = {};
1479
1480 if (req->r_aborted)
1481 return readdir_prepopulate_inodes_only(req, session);
81c6aea5 1482
f3c4ebe6
YZ
1483 if (rinfo->hash_order && req->r_path2) {
1484 last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1485 req->r_path2, strlen(req->r_path2));
1486 last_hash = ceph_frag_value(last_hash);
1487 }
1488
81c6aea5
YZ
1489 if (rinfo->dir_dir &&
1490 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1491 dout("readdir_prepopulate got new frag %x -> %x\n",
1492 frag, le32_to_cpu(rinfo->dir_dir->frag));
1493 frag = le32_to_cpu(rinfo->dir_dir->frag);
f3c4ebe6 1494 if (!rinfo->hash_order)
fdd4e158 1495 req->r_readdir_offset = 2;
81c6aea5 1496 }
355da1eb
SW
1497
1498 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
2b0143b5 1499 snapdir = ceph_get_snapdir(d_inode(parent));
355da1eb
SW
1500 parent = d_find_alias(snapdir);
1501 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1502 rinfo->dir_nr, parent);
1503 } else {
1504 dout("readdir_prepopulate %d items under dn %p\n",
1505 rinfo->dir_nr, parent);
1506 if (rinfo->dir_dir)
2b0143b5 1507 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
355da1eb
SW
1508 }
1509
f72f9455
YZ
1510 if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2 &&
1511 !(rinfo->hash_order && req->r_path2)) {
fdd4e158
YZ
1512 /* note dir version at start of readdir so we can tell
1513 * if any dentries get dropped */
fdd4e158
YZ
1514 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
1515 req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count);
1516 req->r_readdir_cache_idx = 0;
1517 }
1518
1519 cache_ctl.index = req->r_readdir_cache_idx;
f3c4ebe6 1520 fpos_offset = req->r_readdir_offset;
fdd4e158 1521
86b58d13 1522 /* FIXME: release caps/leases if error occurs */
355da1eb 1523 for (i = 0; i < rinfo->dir_nr; i++) {
2a5beea3 1524 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
355da1eb
SW
1525 struct ceph_vino vino;
1526
2a5beea3
YZ
1527 dname.name = rde->name;
1528 dname.len = rde->name_len;
8387ff25 1529 dname.hash = full_name_hash(parent, dname.name, dname.len);
355da1eb 1530
2a5beea3
YZ
1531 vino.ino = le64_to_cpu(rde->inode.in->ino);
1532 vino.snap = le64_to_cpu(rde->inode.in->snapid);
355da1eb 1533
f3c4ebe6
YZ
1534 if (rinfo->hash_order) {
1535 u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1536 rde->name, rde->name_len);
1537 hash = ceph_frag_value(hash);
1538 if (hash != last_hash)
1539 fpos_offset = 2;
1540 last_hash = hash;
1541 rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1542 } else {
1543 rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1544 }
355da1eb
SW
1545
1546retry_lookup:
1547 dn = d_lookup(parent, &dname);
1548 dout("d_lookup on parent=%p name=%.*s got %p\n",
1549 parent, dname.len, dname.name, dn);
1550
1551 if (!dn) {
1552 dn = d_alloc(parent, &dname);
1553 dout("d_alloc %p '%.*s' = %p\n", parent,
1554 dname.len, dname.name, dn);
1555 if (dn == NULL) {
1556 dout("d_alloc badness\n");
1557 err = -ENOMEM;
1558 goto out;
1559 }
2b0143b5
DH
1560 } else if (d_really_is_positive(dn) &&
1561 (ceph_ino(d_inode(dn)) != vino.ino ||
1562 ceph_snap(d_inode(dn)) != vino.snap)) {
355da1eb 1563 dout(" dn %p points to wrong inode %p\n",
2b0143b5 1564 dn, d_inode(dn));
355da1eb
SW
1565 d_delete(dn);
1566 dput(dn);
1567 goto retry_lookup;
355da1eb
SW
1568 }
1569
355da1eb 1570 /* inode */
2b0143b5
DH
1571 if (d_really_is_positive(dn)) {
1572 in = d_inode(dn);
355da1eb
SW
1573 } else {
1574 in = ceph_get_inode(parent->d_sb, vino);
ac1f12ef 1575 if (IS_ERR(in)) {
355da1eb 1576 dout("new_inode badness\n");
2744c171 1577 d_drop(dn);
355da1eb 1578 dput(dn);
ac1f12ef 1579 err = PTR_ERR(in);
355da1eb
SW
1580 goto out;
1581 }
355da1eb
SW
1582 }
1583
2a5beea3 1584 ret = fill_inode(in, NULL, &rde->inode, NULL, session,
fdd4e158
YZ
1585 req->r_request_started, -1,
1586 &req->r_caps_reservation);
1587 if (ret < 0) {
355da1eb 1588 pr_err("fill_inode badness on %p\n", in);
2b0143b5 1589 if (d_really_is_negative(dn))
86b58d13
YZ
1590 iput(in);
1591 d_drop(dn);
fdd4e158 1592 err = ret;
d69ed05a 1593 goto next_item;
355da1eb 1594 }
86b58d13 1595
2b0143b5 1596 if (d_really_is_negative(dn)) {
315f2408
YZ
1597 struct dentry *realdn;
1598
1599 if (ceph_security_xattr_deadlock(in)) {
1600 dout(" skip splicing dn %p to inode %p"
1601 " (security xattr deadlock)\n", dn, in);
1602 iput(in);
1603 skipped++;
1604 goto next_item;
1605 }
1606
1607 realdn = splice_dentry(dn, in);
5cba372c
YZ
1608 if (IS_ERR(realdn)) {
1609 err = PTR_ERR(realdn);
1610 d_drop(dn);
86b58d13
YZ
1611 dn = NULL;
1612 goto next_item;
1613 }
5cba372c 1614 dn = realdn;
86b58d13
YZ
1615 }
1616
f3c4ebe6 1617 ceph_dentry(dn)->offset = rde->offset;
86b58d13 1618
2a5beea3 1619 update_dentry_lease(dn, rde->lease, req->r_session,
86b58d13 1620 req->r_request_started);
fdd4e158 1621
315f2408 1622 if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
fdd4e158
YZ
1623 ret = fill_readdir_cache(d_inode(parent), dn,
1624 &cache_ctl, req);
1625 if (ret < 0)
1626 err = ret;
1627 }
d69ed05a
SW
1628next_item:
1629 if (dn)
1630 dput(dn);
355da1eb 1631 }
355da1eb 1632out:
315f2408 1633 if (err == 0 && skipped == 0) {
fdd4e158
YZ
1634 req->r_did_prepopulate = true;
1635 req->r_readdir_cache_idx = cache_ctl.index;
1636 }
1637 ceph_readdir_cache_release(&cache_ctl);
355da1eb
SW
1638 if (snapdir) {
1639 iput(snapdir);
1640 dput(parent);
1641 }
1642 dout("readdir_prepopulate done\n");
1643 return err;
1644}
1645
1646int ceph_inode_set_size(struct inode *inode, loff_t size)
1647{
1648 struct ceph_inode_info *ci = ceph_inode(inode);
1649 int ret = 0;
1650
be655596 1651 spin_lock(&ci->i_ceph_lock);
355da1eb 1652 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
99c88e69 1653 i_size_write(inode, size);
224a7542 1654 inode->i_blocks = calc_inode_blocks(size);
355da1eb
SW
1655
1656 /* tell the MDS if we are approaching max_size */
1657 if ((size << 1) >= ci->i_max_size &&
1658 (ci->i_reported_size << 1) < ci->i_max_size)
1659 ret = 1;
1660
be655596 1661 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1662 return ret;
1663}
1664
1665/*
1666 * Write back inode data in a worker thread. (This can't be done
1667 * in the message handler context.)
1668 */
3c6f6b79
SW
1669void ceph_queue_writeback(struct inode *inode)
1670{
15a2015f 1671 ihold(inode);
3c6f6b79
SW
1672 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1673 &ceph_inode(inode)->i_wb_work)) {
2c27c9a5 1674 dout("ceph_queue_writeback %p\n", inode);
3c6f6b79 1675 } else {
2c27c9a5 1676 dout("ceph_queue_writeback %p failed\n", inode);
15a2015f 1677 iput(inode);
3c6f6b79
SW
1678 }
1679}
1680
1681static void ceph_writeback_work(struct work_struct *work)
355da1eb
SW
1682{
1683 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1684 i_wb_work);
1685 struct inode *inode = &ci->vfs_inode;
1686
1687 dout("writeback %p\n", inode);
1688 filemap_fdatawrite(&inode->i_data);
1689 iput(inode);
1690}
1691
3c6f6b79
SW
1692/*
1693 * queue an async invalidation
1694 */
1695void ceph_queue_invalidate(struct inode *inode)
1696{
15a2015f 1697 ihold(inode);
3c6f6b79
SW
1698 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1699 &ceph_inode(inode)->i_pg_inv_work)) {
1700 dout("ceph_queue_invalidate %p\n", inode);
3c6f6b79
SW
1701 } else {
1702 dout("ceph_queue_invalidate %p failed\n", inode);
15a2015f 1703 iput(inode);
3c6f6b79
SW
1704 }
1705}
1706
355da1eb
SW
1707/*
1708 * Invalidate inode pages in a worker thread. (This can't be done
1709 * in the message handler context.)
1710 */
3c6f6b79 1711static void ceph_invalidate_work(struct work_struct *work)
355da1eb
SW
1712{
1713 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1714 i_pg_inv_work);
1715 struct inode *inode = &ci->vfs_inode;
6c93df5d 1716 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
355da1eb
SW
1717 u32 orig_gen;
1718 int check = 0;
1719
b0d7c223 1720 mutex_lock(&ci->i_truncate_mutex);
6c93df5d
YZ
1721
1722 if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
1723 pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
1724 inode, ceph_ino(inode));
1725 mapping_set_error(inode->i_mapping, -EIO);
1726 truncate_pagecache(inode, 0);
1727 mutex_unlock(&ci->i_truncate_mutex);
1728 goto out;
1729 }
1730
be655596 1731 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1732 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1733 ci->i_rdcache_gen, ci->i_rdcache_revoking);
cd045cb4 1734 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
9563f88c
YZ
1735 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1736 check = 1;
be655596 1737 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1738 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1739 goto out;
1740 }
1741 orig_gen = ci->i_rdcache_gen;
be655596 1742 spin_unlock(&ci->i_ceph_lock);
355da1eb 1743
9abd4db7
YZ
1744 if (invalidate_inode_pages2(inode->i_mapping) < 0) {
1745 pr_err("invalidate_pages %p fails\n", inode);
1746 }
355da1eb 1747
be655596 1748 spin_lock(&ci->i_ceph_lock);
cd045cb4
SW
1749 if (orig_gen == ci->i_rdcache_gen &&
1750 orig_gen == ci->i_rdcache_revoking) {
355da1eb
SW
1751 dout("invalidate_pages %p gen %d successful\n", inode,
1752 ci->i_rdcache_gen);
cd045cb4 1753 ci->i_rdcache_revoking--;
355da1eb
SW
1754 check = 1;
1755 } else {
cd045cb4
SW
1756 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1757 inode, orig_gen, ci->i_rdcache_gen,
1758 ci->i_rdcache_revoking);
9563f88c
YZ
1759 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1760 check = 1;
355da1eb 1761 }
be655596 1762 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1763 mutex_unlock(&ci->i_truncate_mutex);
9563f88c 1764out:
355da1eb
SW
1765 if (check)
1766 ceph_check_caps(ci, 0, NULL);
355da1eb
SW
1767 iput(inode);
1768}
1769
1770
1771/*
3f99969f 1772 * called by trunc_wq;
355da1eb
SW
1773 *
1774 * We also truncate in a separate thread as well.
1775 */
3c6f6b79 1776static void ceph_vmtruncate_work(struct work_struct *work)
355da1eb
SW
1777{
1778 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1779 i_vmtruncate_work);
1780 struct inode *inode = &ci->vfs_inode;
1781
1782 dout("vmtruncate_work %p\n", inode);
b415bf4f 1783 __ceph_do_pending_vmtruncate(inode);
355da1eb
SW
1784 iput(inode);
1785}
1786
3c6f6b79
SW
1787/*
1788 * Queue an async vmtruncate. If we fail to queue work, we will handle
1789 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1790 */
1791void ceph_queue_vmtruncate(struct inode *inode)
1792{
1793 struct ceph_inode_info *ci = ceph_inode(inode);
1794
15a2015f 1795 ihold(inode);
99ccbd22 1796
640ef79d 1797 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
3c6f6b79
SW
1798 &ci->i_vmtruncate_work)) {
1799 dout("ceph_queue_vmtruncate %p\n", inode);
3c6f6b79
SW
1800 } else {
1801 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1802 inode, ci->i_truncate_pending);
15a2015f 1803 iput(inode);
3c6f6b79
SW
1804 }
1805}
1806
355da1eb 1807/*
355da1eb
SW
1808 * Make sure any pending truncation is applied before doing anything
1809 * that may depend on it.
1810 */
b415bf4f 1811void __ceph_do_pending_vmtruncate(struct inode *inode)
355da1eb
SW
1812{
1813 struct ceph_inode_info *ci = ceph_inode(inode);
1814 u64 to;
a85f50b6 1815 int wrbuffer_refs, finish = 0;
355da1eb 1816
b0d7c223 1817 mutex_lock(&ci->i_truncate_mutex);
355da1eb 1818retry:
be655596 1819 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1820 if (ci->i_truncate_pending == 0) {
1821 dout("__do_pending_vmtruncate %p none pending\n", inode);
be655596 1822 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1823 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1824 return;
1825 }
1826
1827 /*
1828 * make sure any dirty snapped pages are flushed before we
1829 * possibly truncate them.. so write AND block!
1830 */
1831 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1832 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1833 inode);
be655596 1834 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1835 filemap_write_and_wait_range(&inode->i_data, 0,
1836 inode->i_sb->s_maxbytes);
1837 goto retry;
1838 }
1839
b0d7c223
YZ
1840 /* there should be no reader or writer */
1841 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1842
355da1eb
SW
1843 to = ci->i_truncate_size;
1844 wrbuffer_refs = ci->i_wrbuffer_ref;
1845 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1846 ci->i_truncate_pending, to);
be655596 1847 spin_unlock(&ci->i_ceph_lock);
355da1eb 1848
4e217b5d 1849 truncate_pagecache(inode, to);
355da1eb 1850
be655596 1851 spin_lock(&ci->i_ceph_lock);
a85f50b6
YZ
1852 if (to == ci->i_truncate_size) {
1853 ci->i_truncate_pending = 0;
1854 finish = 1;
1855 }
be655596 1856 spin_unlock(&ci->i_ceph_lock);
a85f50b6
YZ
1857 if (!finish)
1858 goto retry;
355da1eb 1859
b0d7c223
YZ
1860 mutex_unlock(&ci->i_truncate_mutex);
1861
355da1eb
SW
1862 if (wrbuffer_refs == 0)
1863 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
a85f50b6
YZ
1864
1865 wake_up_all(&ci->i_cap_wq);
355da1eb
SW
1866}
1867
355da1eb
SW
1868/*
1869 * symlinks
1870 */
355da1eb
SW
1871static const struct inode_operations ceph_symlink_iops = {
1872 .readlink = generic_readlink,
6b255391 1873 .get_link = simple_get_link,
0b932672
YZ
1874 .setattr = ceph_setattr,
1875 .getattr = ceph_getattr,
0b932672 1876 .listxattr = ceph_listxattr,
355da1eb
SW
1877};
1878
a26fecca 1879int __ceph_setattr(struct inode *inode, struct iattr *attr)
355da1eb 1880{
355da1eb 1881 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
1882 const unsigned int ia_valid = attr->ia_valid;
1883 struct ceph_mds_request *req;
a26fecca 1884 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
f66fd9f0 1885 struct ceph_cap_flush *prealloc_cf;
355da1eb
SW
1886 int issued;
1887 int release = 0, dirtied = 0;
1888 int mask = 0;
1889 int err = 0;
fca65b4a 1890 int inode_dirty_flags = 0;
604d1b02 1891 bool lock_snap_rwsem = false;
355da1eb 1892
f66fd9f0
YZ
1893 prealloc_cf = ceph_alloc_cap_flush();
1894 if (!prealloc_cf)
1895 return -ENOMEM;
1896
355da1eb
SW
1897 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1898 USE_AUTH_MDS);
f66fd9f0
YZ
1899 if (IS_ERR(req)) {
1900 ceph_free_cap_flush(prealloc_cf);
355da1eb 1901 return PTR_ERR(req);
f66fd9f0 1902 }
355da1eb 1903
be655596 1904 spin_lock(&ci->i_ceph_lock);
355da1eb 1905 issued = __ceph_caps_issued(ci, NULL);
604d1b02
YZ
1906
1907 if (!ci->i_head_snapc &&
1908 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
1909 lock_snap_rwsem = true;
1910 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1911 spin_unlock(&ci->i_ceph_lock);
1912 down_read(&mdsc->snap_rwsem);
1913 spin_lock(&ci->i_ceph_lock);
1914 issued = __ceph_caps_issued(ci, NULL);
1915 }
1916 }
1917
355da1eb
SW
1918 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1919
1920 if (ia_valid & ATTR_UID) {
1921 dout("setattr %p uid %d -> %d\n", inode,
bd2bae6a
EB
1922 from_kuid(&init_user_ns, inode->i_uid),
1923 from_kuid(&init_user_ns, attr->ia_uid));
355da1eb
SW
1924 if (issued & CEPH_CAP_AUTH_EXCL) {
1925 inode->i_uid = attr->ia_uid;
1926 dirtied |= CEPH_CAP_AUTH_EXCL;
1927 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
ab871b90
EB
1928 !uid_eq(attr->ia_uid, inode->i_uid)) {
1929 req->r_args.setattr.uid = cpu_to_le32(
1930 from_kuid(&init_user_ns, attr->ia_uid));
355da1eb
SW
1931 mask |= CEPH_SETATTR_UID;
1932 release |= CEPH_CAP_AUTH_SHARED;
1933 }
1934 }
1935 if (ia_valid & ATTR_GID) {
1936 dout("setattr %p gid %d -> %d\n", inode,
bd2bae6a
EB
1937 from_kgid(&init_user_ns, inode->i_gid),
1938 from_kgid(&init_user_ns, attr->ia_gid));
355da1eb
SW
1939 if (issued & CEPH_CAP_AUTH_EXCL) {
1940 inode->i_gid = attr->ia_gid;
1941 dirtied |= CEPH_CAP_AUTH_EXCL;
1942 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
ab871b90
EB
1943 !gid_eq(attr->ia_gid, inode->i_gid)) {
1944 req->r_args.setattr.gid = cpu_to_le32(
1945 from_kgid(&init_user_ns, attr->ia_gid));
355da1eb
SW
1946 mask |= CEPH_SETATTR_GID;
1947 release |= CEPH_CAP_AUTH_SHARED;
1948 }
1949 }
1950 if (ia_valid & ATTR_MODE) {
1951 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1952 attr->ia_mode);
1953 if (issued & CEPH_CAP_AUTH_EXCL) {
1954 inode->i_mode = attr->ia_mode;
1955 dirtied |= CEPH_CAP_AUTH_EXCL;
1956 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1957 attr->ia_mode != inode->i_mode) {
7221fe4c 1958 inode->i_mode = attr->ia_mode;
355da1eb
SW
1959 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1960 mask |= CEPH_SETATTR_MODE;
1961 release |= CEPH_CAP_AUTH_SHARED;
1962 }
1963 }
1964
1965 if (ia_valid & ATTR_ATIME) {
1966 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1967 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1968 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1969 if (issued & CEPH_CAP_FILE_EXCL) {
1970 ci->i_time_warp_seq++;
1971 inode->i_atime = attr->ia_atime;
1972 dirtied |= CEPH_CAP_FILE_EXCL;
1973 } else if ((issued & CEPH_CAP_FILE_WR) &&
1974 timespec_compare(&inode->i_atime,
1975 &attr->ia_atime) < 0) {
1976 inode->i_atime = attr->ia_atime;
1977 dirtied |= CEPH_CAP_FILE_WR;
1978 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1979 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1980 ceph_encode_timespec(&req->r_args.setattr.atime,
1981 &attr->ia_atime);
1982 mask |= CEPH_SETATTR_ATIME;
1983 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1984 CEPH_CAP_FILE_WR;
1985 }
1986 }
1987 if (ia_valid & ATTR_MTIME) {
1988 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1989 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1990 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1991 if (issued & CEPH_CAP_FILE_EXCL) {
1992 ci->i_time_warp_seq++;
1993 inode->i_mtime = attr->ia_mtime;
1994 dirtied |= CEPH_CAP_FILE_EXCL;
1995 } else if ((issued & CEPH_CAP_FILE_WR) &&
1996 timespec_compare(&inode->i_mtime,
1997 &attr->ia_mtime) < 0) {
1998 inode->i_mtime = attr->ia_mtime;
1999 dirtied |= CEPH_CAP_FILE_WR;
2000 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2001 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
2002 ceph_encode_timespec(&req->r_args.setattr.mtime,
2003 &attr->ia_mtime);
2004 mask |= CEPH_SETATTR_MTIME;
2005 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
2006 CEPH_CAP_FILE_WR;
2007 }
2008 }
2009 if (ia_valid & ATTR_SIZE) {
2010 dout("setattr %p size %lld -> %lld\n", inode,
2011 inode->i_size, attr->ia_size);
355da1eb
SW
2012 if ((issued & CEPH_CAP_FILE_EXCL) &&
2013 attr->ia_size > inode->i_size) {
99c88e69 2014 i_size_write(inode, attr->ia_size);
224a7542 2015 inode->i_blocks = calc_inode_blocks(attr->ia_size);
355da1eb
SW
2016 inode->i_ctime = attr->ia_ctime;
2017 ci->i_reported_size = attr->ia_size;
2018 dirtied |= CEPH_CAP_FILE_EXCL;
2019 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2020 attr->ia_size != inode->i_size) {
2021 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2022 req->r_args.setattr.old_size =
2023 cpu_to_le64(inode->i_size);
2024 mask |= CEPH_SETATTR_SIZE;
2025 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
2026 CEPH_CAP_FILE_WR;
2027 }
2028 }
2029
2030 /* these do nothing */
2031 if (ia_valid & ATTR_CTIME) {
2032 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2033 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2034 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
2035 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2036 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2037 only ? "ctime only" : "ignored");
2038 inode->i_ctime = attr->ia_ctime;
2039 if (only) {
2040 /*
2041 * if kernel wants to dirty ctime but nothing else,
2042 * we need to choose a cap to dirty under, or do
2043 * a almost-no-op setattr
2044 */
2045 if (issued & CEPH_CAP_AUTH_EXCL)
2046 dirtied |= CEPH_CAP_AUTH_EXCL;
2047 else if (issued & CEPH_CAP_FILE_EXCL)
2048 dirtied |= CEPH_CAP_FILE_EXCL;
2049 else if (issued & CEPH_CAP_XATTR_EXCL)
2050 dirtied |= CEPH_CAP_XATTR_EXCL;
2051 else
2052 mask |= CEPH_SETATTR_CTIME;
2053 }
2054 }
2055 if (ia_valid & ATTR_FILE)
2056 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2057
2058 if (dirtied) {
f66fd9f0
YZ
2059 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2060 &prealloc_cf);
c2050a45 2061 inode->i_ctime = current_time(inode);
355da1eb
SW
2062 }
2063
2064 release &= issued;
be655596 2065 spin_unlock(&ci->i_ceph_lock);
604d1b02
YZ
2066 if (lock_snap_rwsem)
2067 up_read(&mdsc->snap_rwsem);
355da1eb 2068
fca65b4a
SW
2069 if (inode_dirty_flags)
2070 __mark_inode_dirty(inode, inode_dirty_flags);
2071
7221fe4c 2072 if (ia_valid & ATTR_MODE) {
4db658ea 2073 err = posix_acl_chmod(inode, attr->ia_mode);
7221fe4c
GZ
2074 if (err)
2075 goto out_put;
2076 }
2077
355da1eb 2078 if (mask) {
70b666c3
SW
2079 req->r_inode = inode;
2080 ihold(inode);
355da1eb
SW
2081 req->r_inode_drop = release;
2082 req->r_args.setattr.mask = cpu_to_le32(mask);
2083 req->r_num_caps = 1;
752c8bdc 2084 err = ceph_mdsc_do_request(mdsc, NULL, req);
355da1eb
SW
2085 }
2086 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2087 ceph_cap_string(dirtied), mask);
2088
2089 ceph_mdsc_put_request(req);
b0d7c223
YZ
2090 if (mask & CEPH_SETATTR_SIZE)
2091 __ceph_do_pending_vmtruncate(inode);
f66fd9f0 2092 ceph_free_cap_flush(prealloc_cf);
355da1eb 2093 return err;
7221fe4c 2094out_put:
355da1eb 2095 ceph_mdsc_put_request(req);
f66fd9f0 2096 ceph_free_cap_flush(prealloc_cf);
355da1eb
SW
2097 return err;
2098}
2099
a26fecca
AG
2100/*
2101 * setattr
2102 */
2103int ceph_setattr(struct dentry *dentry, struct iattr *attr)
2104{
fd5472ed
JK
2105 struct inode *inode = d_inode(dentry);
2106 int err;
2107
2108 if (ceph_snap(inode) != CEPH_NOSNAP)
2109 return -EROFS;
2110
31051c85 2111 err = setattr_prepare(dentry, attr);
fd5472ed
JK
2112 if (err != 0)
2113 return err;
2114
2115 return __ceph_setattr(inode, attr);
a26fecca
AG
2116}
2117
355da1eb
SW
2118/*
2119 * Verify that we have a lease on the given mask. If not,
2120 * do a getattr against an mds.
2121 */
01deead0
YZ
2122int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2123 int mask, bool force)
355da1eb 2124{
3d14c5d2
YS
2125 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2126 struct ceph_mds_client *mdsc = fsc->mdsc;
355da1eb
SW
2127 struct ceph_mds_request *req;
2128 int err;
2129
2130 if (ceph_snap(inode) == CEPH_SNAPDIR) {
2131 dout("do_getattr inode %p SNAPDIR\n", inode);
2132 return 0;
2133 }
2134
01deead0
YZ
2135 dout("do_getattr inode %p mask %s mode 0%o\n",
2136 inode, ceph_cap_string(mask), inode->i_mode);
508b32d8 2137 if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
355da1eb
SW
2138 return 0;
2139
2140 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
2141 if (IS_ERR(req))
2142 return PTR_ERR(req);
70b666c3
SW
2143 req->r_inode = inode;
2144 ihold(inode);
355da1eb
SW
2145 req->r_num_caps = 1;
2146 req->r_args.getattr.mask = cpu_to_le32(mask);
01deead0 2147 req->r_locked_page = locked_page;
355da1eb 2148 err = ceph_mdsc_do_request(mdsc, NULL, req);
01deead0
YZ
2149 if (locked_page && err == 0) {
2150 u64 inline_version = req->r_reply_info.targeti.inline_version;
2151 if (inline_version == 0) {
2152 /* the reply is supposed to contain inline data */
2153 err = -EINVAL;
2154 } else if (inline_version == CEPH_INLINE_NONE) {
2155 err = -ENODATA;
2156 } else {
2157 err = req->r_reply_info.targeti.inline_len;
2158 }
2159 }
355da1eb
SW
2160 ceph_mdsc_put_request(req);
2161 dout("do_getattr result=%d\n", err);
2162 return err;
2163}
2164
2165
2166/*
2167 * Check inode permissions. We verify we have a valid value for
2168 * the AUTH cap, then call the generic handler.
2169 */
10556cb2 2170int ceph_permission(struct inode *inode, int mask)
355da1eb 2171{
b74c79e9
NP
2172 int err;
2173
10556cb2 2174 if (mask & MAY_NOT_BLOCK)
b74c79e9
NP
2175 return -ECHILD;
2176
508b32d8 2177 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
355da1eb
SW
2178
2179 if (!err)
2830ba7f 2180 err = generic_permission(inode, mask);
355da1eb
SW
2181 return err;
2182}
2183
2184/*
2185 * Get all attributes. Hopefully somedata we'll have a statlite()
2186 * and can limit the fields we require to be accurate.
2187 */
2188int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
2189 struct kstat *stat)
2190{
2b0143b5 2191 struct inode *inode = d_inode(dentry);
232d4b01 2192 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
2193 int err;
2194
508b32d8 2195 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false);
355da1eb
SW
2196 if (!err) {
2197 generic_fillattr(inode, stat);
ad1fee96 2198 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
355da1eb
SW
2199 if (ceph_snap(inode) != CEPH_NOSNAP)
2200 stat->dev = ceph_snap(inode);
2201 else
2202 stat->dev = 0;
232d4b01 2203 if (S_ISDIR(inode->i_mode)) {
1c1266bb
YS
2204 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2205 RBYTES))
2206 stat->size = ci->i_rbytes;
2207 else
2208 stat->size = ci->i_files + ci->i_subdirs;
232d4b01 2209 stat->blocks = 0;
355da1eb 2210 stat->blksize = 65536;
232d4b01 2211 }
355da1eb
SW
2212 }
2213 return err;
2214}