ceph: parse inline data in MClientReply and MClientCaps
[linux-2.6-block.git] / fs / ceph / inode.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
355da1eb
SW
2
3#include <linux/module.h>
4#include <linux/fs.h>
355da1eb
SW
5#include <linux/slab.h>
6#include <linux/string.h>
7#include <linux/uaccess.h>
8#include <linux/kernel.h>
9#include <linux/namei.h>
10#include <linux/writeback.h>
11#include <linux/vmalloc.h>
4db658ea 12#include <linux/posix_acl.h>
3e7fbe9c 13#include <linux/random.h>
355da1eb
SW
14
15#include "super.h"
3d14c5d2 16#include "mds_client.h"
99ccbd22 17#include "cache.h"
3d14c5d2 18#include <linux/ceph/decode.h>
355da1eb
SW
19
20/*
21 * Ceph inode operations
22 *
23 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
24 * setattr, etc.), xattr helpers, and helpers for assimilating
25 * metadata returned by the MDS into our cache.
26 *
27 * Also define helpers for doing asynchronous writeback, invalidation,
28 * and truncation for the benefit of those who can't afford to block
29 * (typically because they are in the message handler path).
30 */
31
32static const struct inode_operations ceph_symlink_iops;
33
3c6f6b79
SW
34static void ceph_invalidate_work(struct work_struct *work);
35static void ceph_writeback_work(struct work_struct *work);
36static void ceph_vmtruncate_work(struct work_struct *work);
355da1eb
SW
37
38/*
39 * find or create an inode, given the ceph ino number
40 */
ad1fee96
YS
41static int ceph_set_ino_cb(struct inode *inode, void *data)
42{
43 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
44 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
45 return 0;
46}
47
355da1eb
SW
48struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
49{
50 struct inode *inode;
51 ino_t t = ceph_vino_to_ino(vino);
52
53 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
54 if (inode == NULL)
55 return ERR_PTR(-ENOMEM);
56 if (inode->i_state & I_NEW) {
57 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
58 inode, ceph_vinop(inode), (u64)inode->i_ino);
59 unlock_new_inode(inode);
60 }
61
62 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
63 vino.snap, inode);
64 return inode;
65}
66
67/*
68 * get/constuct snapdir inode for a given directory
69 */
70struct inode *ceph_get_snapdir(struct inode *parent)
71{
72 struct ceph_vino vino = {
73 .ino = ceph_ino(parent),
74 .snap = CEPH_SNAPDIR,
75 };
76 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
b377ff13 77 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
78
79 BUG_ON(!S_ISDIR(parent->i_mode));
80 if (IS_ERR(inode))
7e34bc52 81 return inode;
355da1eb
SW
82 inode->i_mode = parent->i_mode;
83 inode->i_uid = parent->i_uid;
84 inode->i_gid = parent->i_gid;
85 inode->i_op = &ceph_dir_iops;
86 inode->i_fop = &ceph_dir_fops;
b377ff13
SW
87 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
88 ci->i_rbytes = 0;
355da1eb
SW
89 return inode;
90}
91
92const struct inode_operations ceph_file_iops = {
93 .permission = ceph_permission,
94 .setattr = ceph_setattr,
95 .getattr = ceph_getattr,
96 .setxattr = ceph_setxattr,
97 .getxattr = ceph_getxattr,
98 .listxattr = ceph_listxattr,
99 .removexattr = ceph_removexattr,
7221fe4c 100 .get_acl = ceph_get_acl,
72466d0b 101 .set_acl = ceph_set_acl,
355da1eb
SW
102};
103
104
105/*
106 * We use a 'frag tree' to keep track of the MDS's directory fragments
107 * for a given inode (usually there is just a single fragment). We
108 * need to know when a child frag is delegated to a new MDS, or when
109 * it is flagged as replicated, so we can direct our requests
110 * accordingly.
111 */
112
113/*
114 * find/create a frag in the tree
115 */
116static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
117 u32 f)
118{
119 struct rb_node **p;
120 struct rb_node *parent = NULL;
121 struct ceph_inode_frag *frag;
122 int c;
123
124 p = &ci->i_fragtree.rb_node;
125 while (*p) {
126 parent = *p;
127 frag = rb_entry(parent, struct ceph_inode_frag, node);
128 c = ceph_frag_compare(f, frag->frag);
129 if (c < 0)
130 p = &(*p)->rb_left;
131 else if (c > 0)
132 p = &(*p)->rb_right;
133 else
134 return frag;
135 }
136
137 frag = kmalloc(sizeof(*frag), GFP_NOFS);
138 if (!frag) {
139 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
140 "frag %x\n", &ci->vfs_inode,
141 ceph_vinop(&ci->vfs_inode), f);
142 return ERR_PTR(-ENOMEM);
143 }
144 frag->frag = f;
145 frag->split_by = 0;
146 frag->mds = -1;
147 frag->ndist = 0;
148
149 rb_link_node(&frag->node, parent, p);
150 rb_insert_color(&frag->node, &ci->i_fragtree);
151
152 dout("get_or_create_frag added %llx.%llx frag %x\n",
153 ceph_vinop(&ci->vfs_inode), f);
154 return frag;
155}
156
157/*
158 * find a specific frag @f
159 */
160struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
161{
162 struct rb_node *n = ci->i_fragtree.rb_node;
163
164 while (n) {
165 struct ceph_inode_frag *frag =
166 rb_entry(n, struct ceph_inode_frag, node);
167 int c = ceph_frag_compare(f, frag->frag);
168 if (c < 0)
169 n = n->rb_left;
170 else if (c > 0)
171 n = n->rb_right;
172 else
173 return frag;
174 }
175 return NULL;
176}
177
178/*
179 * Choose frag containing the given value @v. If @pfrag is
180 * specified, copy the frag delegation info to the caller if
181 * it is present.
182 */
3e7fbe9c
YZ
183static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
184 struct ceph_inode_frag *pfrag, int *found)
355da1eb
SW
185{
186 u32 t = ceph_frag_make(0, 0);
187 struct ceph_inode_frag *frag;
188 unsigned nway, i;
189 u32 n;
190
191 if (found)
192 *found = 0;
193
355da1eb
SW
194 while (1) {
195 WARN_ON(!ceph_frag_contains_value(t, v));
196 frag = __ceph_find_frag(ci, t);
197 if (!frag)
198 break; /* t is a leaf */
199 if (frag->split_by == 0) {
200 if (pfrag)
201 memcpy(pfrag, frag, sizeof(*pfrag));
202 if (found)
203 *found = 1;
204 break;
205 }
206
207 /* choose child */
208 nway = 1 << frag->split_by;
209 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
210 frag->split_by, nway);
211 for (i = 0; i < nway; i++) {
212 n = ceph_frag_make_child(t, frag->split_by, i);
213 if (ceph_frag_contains_value(n, v)) {
214 t = n;
215 break;
216 }
217 }
218 BUG_ON(i == nway);
219 }
220 dout("choose_frag(%x) = %x\n", v, t);
221
355da1eb
SW
222 return t;
223}
224
3e7fbe9c
YZ
225u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
226 struct ceph_inode_frag *pfrag, int *found)
227{
228 u32 ret;
229 mutex_lock(&ci->i_fragtree_mutex);
230 ret = __ceph_choose_frag(ci, v, pfrag, found);
231 mutex_unlock(&ci->i_fragtree_mutex);
232 return ret;
233}
234
355da1eb
SW
235/*
236 * Process dirfrag (delegation) info from the mds. Include leaf
237 * fragment in tree ONLY if ndist > 0. Otherwise, only
238 * branches/splits are included in i_fragtree)
239 */
240static int ceph_fill_dirfrag(struct inode *inode,
241 struct ceph_mds_reply_dirfrag *dirinfo)
242{
243 struct ceph_inode_info *ci = ceph_inode(inode);
244 struct ceph_inode_frag *frag;
245 u32 id = le32_to_cpu(dirinfo->frag);
246 int mds = le32_to_cpu(dirinfo->auth);
247 int ndist = le32_to_cpu(dirinfo->ndist);
8d08503c 248 int diri_auth = -1;
355da1eb
SW
249 int i;
250 int err = 0;
251
8d08503c
YZ
252 spin_lock(&ci->i_ceph_lock);
253 if (ci->i_auth_cap)
254 diri_auth = ci->i_auth_cap->mds;
255 spin_unlock(&ci->i_ceph_lock);
256
355da1eb 257 mutex_lock(&ci->i_fragtree_mutex);
8d08503c 258 if (ndist == 0 && mds == diri_auth) {
355da1eb
SW
259 /* no delegation info needed. */
260 frag = __ceph_find_frag(ci, id);
261 if (!frag)
262 goto out;
263 if (frag->split_by == 0) {
264 /* tree leaf, remove */
265 dout("fill_dirfrag removed %llx.%llx frag %x"
266 " (no ref)\n", ceph_vinop(inode), id);
267 rb_erase(&frag->node, &ci->i_fragtree);
268 kfree(frag);
269 } else {
270 /* tree branch, keep and clear */
271 dout("fill_dirfrag cleared %llx.%llx frag %x"
272 " referral\n", ceph_vinop(inode), id);
273 frag->mds = -1;
274 frag->ndist = 0;
275 }
276 goto out;
277 }
278
279
280 /* find/add this frag to store mds delegation info */
281 frag = __get_or_create_frag(ci, id);
282 if (IS_ERR(frag)) {
283 /* this is not the end of the world; we can continue
284 with bad/inaccurate delegation info */
285 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
286 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
287 err = -ENOMEM;
288 goto out;
289 }
290
291 frag->mds = mds;
292 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
293 for (i = 0; i < frag->ndist; i++)
294 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
295 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
296 ceph_vinop(inode), frag->frag, frag->ndist);
297
298out:
299 mutex_unlock(&ci->i_fragtree_mutex);
300 return err;
301}
302
3e7fbe9c
YZ
303static int ceph_fill_fragtree(struct inode *inode,
304 struct ceph_frag_tree_head *fragtree,
305 struct ceph_mds_reply_dirfrag *dirinfo)
306{
307 struct ceph_inode_info *ci = ceph_inode(inode);
308 struct ceph_inode_frag *frag;
309 struct rb_node *rb_node;
310 int i;
311 u32 id, nsplits;
312 bool update = false;
313
314 mutex_lock(&ci->i_fragtree_mutex);
315 nsplits = le32_to_cpu(fragtree->nsplits);
316 if (nsplits) {
317 i = prandom_u32() % nsplits;
318 id = le32_to_cpu(fragtree->splits[i].frag);
319 if (!__ceph_find_frag(ci, id))
320 update = true;
321 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
322 rb_node = rb_first(&ci->i_fragtree);
323 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
324 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
325 update = true;
326 }
327 if (!update && dirinfo) {
328 id = le32_to_cpu(dirinfo->frag);
329 if (id != __ceph_choose_frag(ci, id, NULL, NULL))
330 update = true;
331 }
332 if (!update)
333 goto out_unlock;
334
335 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
336 rb_node = rb_first(&ci->i_fragtree);
337 for (i = 0; i < nsplits; i++) {
338 id = le32_to_cpu(fragtree->splits[i].frag);
339 frag = NULL;
340 while (rb_node) {
341 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
342 if (ceph_frag_compare(frag->frag, id) >= 0) {
343 if (frag->frag != id)
344 frag = NULL;
345 else
346 rb_node = rb_next(rb_node);
347 break;
348 }
349 rb_node = rb_next(rb_node);
350 rb_erase(&frag->node, &ci->i_fragtree);
351 kfree(frag);
352 frag = NULL;
353 }
354 if (!frag) {
355 frag = __get_or_create_frag(ci, id);
356 if (IS_ERR(frag))
357 continue;
358 }
359 frag->split_by = le32_to_cpu(fragtree->splits[i].by);
360 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
361 }
362 while (rb_node) {
363 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
364 rb_node = rb_next(rb_node);
365 rb_erase(&frag->node, &ci->i_fragtree);
366 kfree(frag);
367 }
368out_unlock:
369 mutex_unlock(&ci->i_fragtree_mutex);
370 return 0;
371}
355da1eb
SW
372
373/*
374 * initialize a newly allocated inode.
375 */
376struct inode *ceph_alloc_inode(struct super_block *sb)
377{
378 struct ceph_inode_info *ci;
379 int i;
380
381 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
382 if (!ci)
383 return NULL;
384
385 dout("alloc_inode %p\n", &ci->vfs_inode);
386
be655596
SW
387 spin_lock_init(&ci->i_ceph_lock);
388
355da1eb
SW
389 ci->i_version = 0;
390 ci->i_time_warp_seq = 0;
391 ci->i_ceph_flags = 0;
70db4f36 392 ci->i_ordered_count = 0;
2f276c51
YZ
393 atomic_set(&ci->i_release_count, 1);
394 atomic_set(&ci->i_complete_count, 0);
355da1eb
SW
395 ci->i_symlink = NULL;
396
6c0f3af7
SW
397 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
398
355da1eb
SW
399 ci->i_fragtree = RB_ROOT;
400 mutex_init(&ci->i_fragtree_mutex);
401
402 ci->i_xattrs.blob = NULL;
403 ci->i_xattrs.prealloc_blob = NULL;
404 ci->i_xattrs.dirty = false;
405 ci->i_xattrs.index = RB_ROOT;
406 ci->i_xattrs.count = 0;
407 ci->i_xattrs.names_size = 0;
408 ci->i_xattrs.vals_size = 0;
409 ci->i_xattrs.version = 0;
410 ci->i_xattrs.index_version = 0;
411
412 ci->i_caps = RB_ROOT;
413 ci->i_auth_cap = NULL;
414 ci->i_dirty_caps = 0;
415 ci->i_flushing_caps = 0;
416 INIT_LIST_HEAD(&ci->i_dirty_item);
417 INIT_LIST_HEAD(&ci->i_flushing_item);
418 ci->i_cap_flush_seq = 0;
419 ci->i_cap_flush_last_tid = 0;
420 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
421 init_waitqueue_head(&ci->i_cap_wq);
422 ci->i_hold_caps_min = 0;
423 ci->i_hold_caps_max = 0;
424 INIT_LIST_HEAD(&ci->i_cap_delay_list);
355da1eb
SW
425 INIT_LIST_HEAD(&ci->i_cap_snaps);
426 ci->i_head_snapc = NULL;
427 ci->i_snap_caps = 0;
428
429 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
430 ci->i_nr_by_mode[i] = 0;
431
b0d7c223 432 mutex_init(&ci->i_truncate_mutex);
355da1eb
SW
433 ci->i_truncate_seq = 0;
434 ci->i_truncate_size = 0;
435 ci->i_truncate_pending = 0;
436
437 ci->i_max_size = 0;
438 ci->i_reported_size = 0;
439 ci->i_wanted_max_size = 0;
440 ci->i_requested_max_size = 0;
441
442 ci->i_pin_ref = 0;
443 ci->i_rd_ref = 0;
444 ci->i_rdcache_ref = 0;
445 ci->i_wr_ref = 0;
d3d0720d 446 ci->i_wb_ref = 0;
355da1eb
SW
447 ci->i_wrbuffer_ref = 0;
448 ci->i_wrbuffer_ref_head = 0;
449 ci->i_shared_gen = 0;
450 ci->i_rdcache_gen = 0;
451 ci->i_rdcache_revoking = 0;
452
453 INIT_LIST_HEAD(&ci->i_unsafe_writes);
454 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
455 spin_lock_init(&ci->i_unsafe_lock);
456
457 ci->i_snap_realm = NULL;
458 INIT_LIST_HEAD(&ci->i_snap_realm_item);
459 INIT_LIST_HEAD(&ci->i_snap_flush_item);
460
3c6f6b79
SW
461 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
462 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
355da1eb
SW
463
464 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
465
99ccbd22
MT
466 ceph_fscache_inode_init(ci);
467
355da1eb
SW
468 return &ci->vfs_inode;
469}
470
fa0d7e3d
NP
471static void ceph_i_callback(struct rcu_head *head)
472{
473 struct inode *inode = container_of(head, struct inode, i_rcu);
474 struct ceph_inode_info *ci = ceph_inode(inode);
475
fa0d7e3d
NP
476 kmem_cache_free(ceph_inode_cachep, ci);
477}
478
355da1eb
SW
479void ceph_destroy_inode(struct inode *inode)
480{
481 struct ceph_inode_info *ci = ceph_inode(inode);
482 struct ceph_inode_frag *frag;
483 struct rb_node *n;
484
485 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
486
99ccbd22
MT
487 ceph_fscache_unregister_inode_cookie(ci);
488
355da1eb
SW
489 ceph_queue_caps_release(inode);
490
8b218b8a
SW
491 /*
492 * we may still have a snap_realm reference if there are stray
d9df2783 493 * caps in i_snap_caps.
8b218b8a
SW
494 */
495 if (ci->i_snap_realm) {
496 struct ceph_mds_client *mdsc =
3d14c5d2 497 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
8b218b8a
SW
498 struct ceph_snap_realm *realm = ci->i_snap_realm;
499
500 dout(" dropping residual ref to snap realm %p\n", realm);
501 spin_lock(&realm->inodes_with_caps_lock);
502 list_del_init(&ci->i_snap_realm_item);
503 spin_unlock(&realm->inodes_with_caps_lock);
504 ceph_put_snap_realm(mdsc, realm);
505 }
506
355da1eb
SW
507 kfree(ci->i_symlink);
508 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
509 frag = rb_entry(n, struct ceph_inode_frag, node);
510 rb_erase(n, &ci->i_fragtree);
511 kfree(frag);
512 }
513
514 __ceph_destroy_xattrs(ci);
b6c1d5b8
SW
515 if (ci->i_xattrs.blob)
516 ceph_buffer_put(ci->i_xattrs.blob);
517 if (ci->i_xattrs.prealloc_blob)
518 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
355da1eb 519
fa0d7e3d 520 call_rcu(&inode->i_rcu, ceph_i_callback);
355da1eb
SW
521}
522
9f12bd11
YZ
523int ceph_drop_inode(struct inode *inode)
524{
525 /*
526 * Positve dentry and corresponding inode are always accompanied
527 * in MDS reply. So no need to keep inode in the cache after
528 * dropping all its aliases.
529 */
530 return 1;
531}
532
355da1eb
SW
533/*
534 * Helpers to fill in size, ctime, mtime, and atime. We have to be
535 * careful because either the client or MDS may have more up to date
536 * info, depending on which capabilities are held, and whether
537 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
538 * and size are monotonically increasing, except when utimes() or
539 * truncate() increments the corresponding _seq values.)
540 */
541int ceph_fill_file_size(struct inode *inode, int issued,
542 u32 truncate_seq, u64 truncate_size, u64 size)
543{
544 struct ceph_inode_info *ci = ceph_inode(inode);
545 int queue_trunc = 0;
546
547 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
548 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
549 dout("size %lld -> %llu\n", inode->i_size, size);
550 inode->i_size = size;
551 inode->i_blocks = (size + (1<<9) - 1) >> 9;
552 ci->i_reported_size = size;
553 if (truncate_seq != ci->i_truncate_seq) {
554 dout("truncate_seq %u -> %u\n",
555 ci->i_truncate_seq, truncate_seq);
556 ci->i_truncate_seq = truncate_seq;
b0d7c223
YZ
557
558 /* the MDS should have revoked these caps */
559 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
560 CEPH_CAP_FILE_RD |
561 CEPH_CAP_FILE_WR |
562 CEPH_CAP_FILE_LAZYIO));
3d497d85
YS
563 /*
564 * If we hold relevant caps, or in the case where we're
565 * not the only client referencing this file and we
566 * don't hold those caps, then we need to check whether
567 * the file is either opened or mmaped
568 */
b0d7c223
YZ
569 if ((issued & (CEPH_CAP_FILE_CACHE|
570 CEPH_CAP_FILE_BUFFER)) ||
3d497d85
YS
571 mapping_mapped(inode->i_mapping) ||
572 __ceph_caps_file_wanted(ci)) {
355da1eb
SW
573 ci->i_truncate_pending++;
574 queue_trunc = 1;
575 }
576 }
577 }
578 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
579 ci->i_truncate_size != truncate_size) {
580 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
581 truncate_size);
582 ci->i_truncate_size = truncate_size;
583 }
99ccbd22
MT
584
585 if (queue_trunc)
586 ceph_fscache_invalidate(inode);
587
355da1eb
SW
588 return queue_trunc;
589}
590
591void ceph_fill_file_time(struct inode *inode, int issued,
592 u64 time_warp_seq, struct timespec *ctime,
593 struct timespec *mtime, struct timespec *atime)
594{
595 struct ceph_inode_info *ci = ceph_inode(inode);
596 int warn = 0;
597
598 if (issued & (CEPH_CAP_FILE_EXCL|
599 CEPH_CAP_FILE_WR|
d8672d64
SW
600 CEPH_CAP_FILE_BUFFER|
601 CEPH_CAP_AUTH_EXCL|
602 CEPH_CAP_XATTR_EXCL)) {
355da1eb
SW
603 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
604 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
605 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
606 ctime->tv_sec, ctime->tv_nsec);
607 inode->i_ctime = *ctime;
608 }
609 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
610 /* the MDS did a utimes() */
611 dout("mtime %ld.%09ld -> %ld.%09ld "
612 "tw %d -> %d\n",
613 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
614 mtime->tv_sec, mtime->tv_nsec,
615 ci->i_time_warp_seq, (int)time_warp_seq);
616
617 inode->i_mtime = *mtime;
618 inode->i_atime = *atime;
619 ci->i_time_warp_seq = time_warp_seq;
620 } else if (time_warp_seq == ci->i_time_warp_seq) {
621 /* nobody did utimes(); take the max */
622 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
623 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
624 inode->i_mtime.tv_sec,
625 inode->i_mtime.tv_nsec,
626 mtime->tv_sec, mtime->tv_nsec);
627 inode->i_mtime = *mtime;
628 }
629 if (timespec_compare(atime, &inode->i_atime) > 0) {
630 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
631 inode->i_atime.tv_sec,
632 inode->i_atime.tv_nsec,
633 atime->tv_sec, atime->tv_nsec);
634 inode->i_atime = *atime;
635 }
636 } else if (issued & CEPH_CAP_FILE_EXCL) {
637 /* we did a utimes(); ignore mds values */
638 } else {
639 warn = 1;
640 }
641 } else {
d8672d64 642 /* we have no write|excl caps; whatever the MDS says is true */
355da1eb
SW
643 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
644 inode->i_ctime = *ctime;
645 inode->i_mtime = *mtime;
646 inode->i_atime = *atime;
647 ci->i_time_warp_seq = time_warp_seq;
648 } else {
649 warn = 1;
650 }
651 }
652 if (warn) /* time_warp_seq shouldn't go backwards */
653 dout("%p mds time_warp_seq %llu < %u\n",
654 inode, time_warp_seq, ci->i_time_warp_seq);
655}
656
657/*
658 * Populate an inode based on info from mds. May be called on new or
659 * existing inodes.
660 */
661static int fill_inode(struct inode *inode,
662 struct ceph_mds_reply_info_in *iinfo,
663 struct ceph_mds_reply_dirfrag *dirinfo,
664 struct ceph_mds_session *session,
665 unsigned long ttl_from, int cap_fmode,
666 struct ceph_cap_reservation *caps_reservation)
667{
d9df2783 668 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
355da1eb
SW
669 struct ceph_mds_reply_inode *info = iinfo->in;
670 struct ceph_inode_info *ci = ceph_inode(inode);
f98a128a 671 int issued = 0, implemented, new_issued;
355da1eb 672 struct timespec mtime, atime, ctime;
355da1eb 673 struct ceph_buffer *xattr_blob = NULL;
d9df2783 674 struct ceph_cap *new_cap = NULL;
355da1eb 675 int err = 0;
d9df2783 676 bool wake = false;
f98a128a
YZ
677 bool queue_trunc = false;
678 bool new_version = false;
355da1eb
SW
679
680 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
681 inode, ceph_vinop(inode), le64_to_cpu(info->version),
682 ci->i_version);
683
d9df2783
YZ
684 /* prealloc new cap struct */
685 if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP)
686 new_cap = ceph_get_cap(mdsc, caps_reservation);
687
355da1eb
SW
688 /*
689 * prealloc xattr data, if it looks like we'll need it. only
690 * if len > 4 (meaning there are actually xattrs; the first 4
691 * bytes are the xattr count).
692 */
693 if (iinfo->xattr_len > 4) {
b6c1d5b8 694 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
355da1eb
SW
695 if (!xattr_blob)
696 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
697 iinfo->xattr_len);
698 }
699
be655596 700 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
701
702 /*
703 * provided version will be odd if inode value is projected,
8bd59e01
SW
704 * even if stable. skip the update if we have newer stable
705 * info (ours>=theirs, e.g. due to racing mds replies), unless
706 * we are getting projected (unstable) info (in which case the
707 * version is odd, and we want ours>theirs).
708 * us them
709 * 2 2 skip
710 * 3 2 skip
711 * 3 3 update
355da1eb 712 */
f98a128a
YZ
713 if (ci->i_version == 0 ||
714 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
715 le64_to_cpu(info->version) > (ci->i_version & ~1)))
716 new_version = true;
717
355da1eb
SW
718 issued = __ceph_caps_issued(ci, &implemented);
719 issued |= implemented | __ceph_caps_dirty(ci);
f98a128a 720 new_issued = ~issued & le32_to_cpu(info->cap.caps);
355da1eb
SW
721
722 /* update inode */
723 ci->i_version = le64_to_cpu(info->version);
724 inode->i_version++;
725 inode->i_rdev = le32_to_cpu(info->rdev);
f98a128a 726 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
355da1eb 727
f98a128a
YZ
728 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
729 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
355da1eb 730 inode->i_mode = le32_to_cpu(info->mode);
ab871b90
EB
731 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
732 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
355da1eb 733 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
bd2bae6a
EB
734 from_kuid(&init_user_ns, inode->i_uid),
735 from_kgid(&init_user_ns, inode->i_gid));
355da1eb
SW
736 }
737
f98a128a
YZ
738 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
739 (issued & CEPH_CAP_LINK_EXCL) == 0)
bfe86848 740 set_nlink(inode, le32_to_cpu(info->nlink));
355da1eb 741
f98a128a
YZ
742 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
743 /* be careful with mtime, atime, size */
744 ceph_decode_timespec(&atime, &info->atime);
745 ceph_decode_timespec(&mtime, &info->mtime);
746 ceph_decode_timespec(&ctime, &info->ctime);
747 ceph_fill_file_time(inode, issued,
748 le32_to_cpu(info->time_warp_seq),
749 &ctime, &mtime, &atime);
750 }
751
752 if (new_version ||
753 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
754 ci->i_layout = info->layout;
755 queue_trunc = ceph_fill_file_size(inode, issued,
756 le32_to_cpu(info->truncate_seq),
757 le64_to_cpu(info->truncate_size),
758 le64_to_cpu(info->size));
759 /* only update max_size on auth cap */
760 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
761 ci->i_max_size != le64_to_cpu(info->max_size)) {
762 dout("max_size %lld -> %llu\n", ci->i_max_size,
763 le64_to_cpu(info->max_size));
764 ci->i_max_size = le64_to_cpu(info->max_size);
765 }
766 }
355da1eb
SW
767
768 /* xattrs */
769 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
508b32d8 770 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
355da1eb
SW
771 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
772 if (ci->i_xattrs.blob)
773 ceph_buffer_put(ci->i_xattrs.blob);
774 ci->i_xattrs.blob = xattr_blob;
775 if (xattr_blob)
776 memcpy(ci->i_xattrs.blob->vec.iov_base,
777 iinfo->xattr_data, iinfo->xattr_len);
778 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
7221fe4c 779 ceph_forget_all_cached_acls(inode);
a6424e48 780 xattr_blob = NULL;
355da1eb
SW
781 }
782
783 inode->i_mapping->a_ops = &ceph_aops;
784 inode->i_mapping->backing_dev_info =
640ef79d 785 &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
355da1eb
SW
786
787 switch (inode->i_mode & S_IFMT) {
788 case S_IFIFO:
789 case S_IFBLK:
790 case S_IFCHR:
791 case S_IFSOCK:
792 init_special_inode(inode, inode->i_mode, inode->i_rdev);
793 inode->i_op = &ceph_file_iops;
794 break;
795 case S_IFREG:
796 inode->i_op = &ceph_file_iops;
797 inode->i_fop = &ceph_file_fops;
798 break;
799 case S_IFLNK:
800 inode->i_op = &ceph_symlink_iops;
801 if (!ci->i_symlink) {
810339ec 802 u32 symlen = iinfo->symlink_len;
355da1eb
SW
803 char *sym;
804
be655596 805 spin_unlock(&ci->i_ceph_lock);
355da1eb 806
810339ec
XW
807 err = -EINVAL;
808 if (WARN_ON(symlen != inode->i_size))
809 goto out;
810
355da1eb 811 err = -ENOMEM;
810339ec 812 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
355da1eb
SW
813 if (!sym)
814 goto out;
355da1eb 815
be655596 816 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
817 if (!ci->i_symlink)
818 ci->i_symlink = sym;
819 else
820 kfree(sym); /* lost a race */
821 }
822 break;
823 case S_IFDIR:
824 inode->i_op = &ceph_dir_iops;
825 inode->i_fop = &ceph_dir_fops;
826
14303d20
SW
827 ci->i_dir_layout = iinfo->dir_layout;
828
355da1eb
SW
829 ci->i_files = le64_to_cpu(info->files);
830 ci->i_subdirs = le64_to_cpu(info->subdirs);
831 ci->i_rbytes = le64_to_cpu(info->rbytes);
832 ci->i_rfiles = le64_to_cpu(info->rfiles);
833 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
834 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
355da1eb
SW
835 break;
836 default:
837 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
838 ceph_vinop(inode), inode->i_mode);
839 }
840
a8673d61
YZ
841 /* set dir completion flag? */
842 if (S_ISDIR(inode->i_mode) &&
843 ci->i_files == 0 && ci->i_subdirs == 0 &&
844 ceph_snap(inode) == CEPH_NOSNAP &&
845 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
846 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
2f276c51 847 !__ceph_dir_is_complete(ci)) {
a8673d61 848 dout(" marking %p complete (empty)\n", inode);
70db4f36
YZ
849 __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count),
850 ci->i_ordered_count);
a8673d61 851 }
355da1eb
SW
852
853 /* were we issued a capability? */
854 if (info->cap.caps) {
855 if (ceph_snap(inode) == CEPH_NOSNAP) {
856 ceph_add_cap(inode, session,
857 le64_to_cpu(info->cap.cap_id),
858 cap_fmode,
859 le32_to_cpu(info->cap.caps),
860 le32_to_cpu(info->cap.wanted),
861 le32_to_cpu(info->cap.seq),
862 le32_to_cpu(info->cap.mseq),
863 le64_to_cpu(info->cap.realm),
d9df2783
YZ
864 info->cap.flags, &new_cap);
865 wake = true;
355da1eb 866 } else {
355da1eb
SW
867 dout(" %p got snap_caps %s\n", inode,
868 ceph_cap_string(le32_to_cpu(info->cap.caps)));
869 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
870 if (cap_fmode >= 0)
871 __ceph_get_fmode(ci, cap_fmode);
355da1eb 872 }
04d000eb 873 } else if (cap_fmode >= 0) {
f3ae1b97 874 pr_warn("mds issued no caps on %llx.%llx\n",
04d000eb
SW
875 ceph_vinop(inode));
876 __ceph_get_fmode(ci, cap_fmode);
355da1eb 877 }
be655596 878 spin_unlock(&ci->i_ceph_lock);
355da1eb 879
d9df2783
YZ
880 if (wake)
881 wake_up_all(&ci->i_cap_wq);
882
355da1eb
SW
883 /* queue truncate if we saw i_size decrease */
884 if (queue_trunc)
3c6f6b79 885 ceph_queue_vmtruncate(inode);
355da1eb
SW
886
887 /* populate frag tree */
3e7fbe9c
YZ
888 if (S_ISDIR(inode->i_mode))
889 ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
355da1eb
SW
890
891 /* update delegation info? */
892 if (dirinfo)
893 ceph_fill_dirfrag(inode, dirinfo);
894
895 err = 0;
355da1eb 896out:
d9df2783
YZ
897 if (new_cap)
898 ceph_put_cap(mdsc, new_cap);
b6c1d5b8
SW
899 if (xattr_blob)
900 ceph_buffer_put(xattr_blob);
355da1eb
SW
901 return err;
902}
903
904/*
905 * caller should hold session s_mutex.
906 */
907static void update_dentry_lease(struct dentry *dentry,
908 struct ceph_mds_reply_lease *lease,
909 struct ceph_mds_session *session,
910 unsigned long from_time)
911{
912 struct ceph_dentry_info *di = ceph_dentry(dentry);
913 long unsigned duration = le32_to_cpu(lease->duration_ms);
914 long unsigned ttl = from_time + (duration * HZ) / 1000;
915 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
916 struct inode *dir;
917
918 /* only track leases on regular dentries */
919 if (dentry->d_op != &ceph_dentry_ops)
920 return;
921
922 spin_lock(&dentry->d_lock);
2f90b852
SW
923 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
924 dentry, duration, ttl);
355da1eb
SW
925
926 /* make lease_rdcache_gen match directory */
927 dir = dentry->d_parent->d_inode;
928 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
929
2f90b852 930 if (duration == 0)
355da1eb
SW
931 goto out_unlock;
932
933 if (di->lease_gen == session->s_cap_gen &&
934 time_before(ttl, dentry->d_time))
935 goto out_unlock; /* we already have a newer lease. */
936
937 if (di->lease_session && di->lease_session != session)
938 goto out_unlock;
939
940 ceph_dentry_lru_touch(dentry);
941
942 if (!di->lease_session)
943 di->lease_session = ceph_get_mds_session(session);
944 di->lease_gen = session->s_cap_gen;
945 di->lease_seq = le32_to_cpu(lease->seq);
946 di->lease_renew_after = half_ttl;
947 di->lease_renew_from = 0;
948 dentry->d_time = ttl;
949out_unlock:
950 spin_unlock(&dentry->d_lock);
951 return;
952}
953
954/*
955 * splice a dentry to an inode.
956 * caller must hold directory i_mutex for this to be safe.
957 *
958 * we will only rehash the resulting dentry if @prehash is
959 * true; @prehash will be set to false (for the benefit of
960 * the caller) if we fail.
961 */
962static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
0a8a70f9 963 bool *prehash)
355da1eb
SW
964{
965 struct dentry *realdn;
966
1cd3935b
SW
967 BUG_ON(dn->d_inode);
968
355da1eb
SW
969 /* dn must be unhashed */
970 if (!d_unhashed(dn))
971 d_drop(dn);
972 realdn = d_materialise_unique(dn, in);
973 if (IS_ERR(realdn)) {
d69ed05a
SW
974 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
975 PTR_ERR(realdn), dn, in, ceph_vinop(in));
355da1eb
SW
976 if (prehash)
977 *prehash = false; /* don't rehash on error */
978 dn = realdn; /* note realdn contains the error */
979 goto out;
980 } else if (realdn) {
981 dout("dn %p (%d) spliced with %p (%d) "
982 "inode %p ino %llx.%llx\n",
84d08fa8
AV
983 dn, d_count(dn),
984 realdn, d_count(realdn),
355da1eb
SW
985 realdn->d_inode, ceph_vinop(realdn->d_inode));
986 dput(dn);
987 dn = realdn;
988 } else {
989 BUG_ON(!ceph_dentry(dn));
355da1eb
SW
990 dout("dn %p attached to %p ino %llx.%llx\n",
991 dn, dn->d_inode, ceph_vinop(dn->d_inode));
992 }
993 if ((!prehash || *prehash) && d_unhashed(dn))
994 d_rehash(dn);
995out:
996 return dn;
997}
998
999/*
1000 * Incorporate results into the local cache. This is either just
1001 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1002 * after a lookup).
1003 *
1004 * A reply may contain
1005 * a directory inode along with a dentry.
1006 * and/or a target inode
1007 *
1008 * Called with snap_rwsem (read).
1009 */
1010int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1011 struct ceph_mds_session *session)
1012{
1013 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1014 struct inode *in = NULL;
355da1eb 1015 struct ceph_vino vino;
3d14c5d2 1016 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
355da1eb
SW
1017 int err = 0;
1018
1019 dout("fill_trace %p is_dentry %d is_target %d\n", req,
1020 rinfo->head->is_dentry, rinfo->head->is_target);
1021
1022#if 0
1023 /*
1024 * Debugging hook:
1025 *
1026 * If we resend completed ops to a recovering mds, we get no
1027 * trace. Since that is very rare, pretend this is the case
1028 * to ensure the 'no trace' handlers in the callers behave.
1029 *
1030 * Fill in inodes unconditionally to avoid breaking cap
1031 * invariants.
1032 */
1033 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
1034 pr_info("fill_trace faking empty trace on %lld %s\n",
1035 req->r_tid, ceph_mds_op_name(rinfo->head->op));
1036 if (rinfo->head->is_dentry) {
1037 rinfo->head->is_dentry = 0;
1038 err = fill_inode(req->r_locked_dir,
1039 &rinfo->diri, rinfo->dirfrag,
1040 session, req->r_request_started, -1);
1041 }
1042 if (rinfo->head->is_target) {
1043 rinfo->head->is_target = 0;
1044 ininfo = rinfo->targeti.in;
1045 vino.ino = le64_to_cpu(ininfo->ino);
1046 vino.snap = le64_to_cpu(ininfo->snapid);
1047 in = ceph_get_inode(sb, vino);
1048 err = fill_inode(in, &rinfo->targeti, NULL,
1049 session, req->r_request_started,
1050 req->r_fmode);
1051 iput(in);
1052 }
1053 }
1054#endif
1055
1056 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1057 dout("fill_trace reply is empty!\n");
167c9e35
SW
1058 if (rinfo->head->result == 0 && req->r_locked_dir)
1059 ceph_invalidate_dir_request(req);
355da1eb
SW
1060 return 0;
1061 }
1062
1063 if (rinfo->head->is_dentry) {
5b1daecd
SW
1064 struct inode *dir = req->r_locked_dir;
1065
6c5e50fa
SW
1066 if (dir) {
1067 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
1068 session, req->r_request_started, -1,
1069 &req->r_caps_reservation);
1070 if (err < 0)
19913b4e 1071 goto done;
6c5e50fa
SW
1072 } else {
1073 WARN_ON_ONCE(1);
1074 }
19913b4e
YZ
1075
1076 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) {
1077 struct qstr dname;
1078 struct dentry *dn, *parent;
1079
1080 BUG_ON(!rinfo->head->is_target);
1081 BUG_ON(req->r_dentry);
1082
1083 parent = d_find_any_alias(dir);
1084 BUG_ON(!parent);
1085
1086 dname.name = rinfo->dname;
1087 dname.len = rinfo->dname_len;
1088 dname.hash = full_name_hash(dname.name, dname.len);
1089 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1090 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1091retry_lookup:
1092 dn = d_lookup(parent, &dname);
1093 dout("d_lookup on parent=%p name=%.*s got %p\n",
1094 parent, dname.len, dname.name, dn);
1095
1096 if (!dn) {
1097 dn = d_alloc(parent, &dname);
1098 dout("d_alloc %p '%.*s' = %p\n", parent,
1099 dname.len, dname.name, dn);
1100 if (dn == NULL) {
1101 dput(parent);
1102 err = -ENOMEM;
1103 goto done;
1104 }
1105 err = ceph_init_dentry(dn);
1106 if (err < 0) {
1107 dput(dn);
1108 dput(parent);
1109 goto done;
1110 }
1111 } else if (dn->d_inode &&
1112 (ceph_ino(dn->d_inode) != vino.ino ||
1113 ceph_snap(dn->d_inode) != vino.snap)) {
1114 dout(" dn %p points to wrong inode %p\n",
1115 dn, dn->d_inode);
1116 d_delete(dn);
1117 dput(dn);
1118 goto retry_lookup;
1119 }
1120
1121 req->r_dentry = dn;
1122 dput(parent);
1123 }
5b1daecd
SW
1124 }
1125
86b58d13
YZ
1126 if (rinfo->head->is_target) {
1127 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1128 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1129
1130 in = ceph_get_inode(sb, vino);
1131 if (IS_ERR(in)) {
1132 err = PTR_ERR(in);
1133 goto done;
1134 }
1135 req->r_target_inode = in;
1136
1137 err = fill_inode(in, &rinfo->targeti, NULL,
1138 session, req->r_request_started,
48193012 1139 (!req->r_aborted && rinfo->head->result == 0) ?
86b58d13
YZ
1140 req->r_fmode : -1,
1141 &req->r_caps_reservation);
1142 if (err < 0) {
1143 pr_err("fill_inode badness %p %llx.%llx\n",
1144 in, ceph_vinop(in));
1145 goto done;
1146 }
1147 }
1148
9358c6d4
SW
1149 /*
1150 * ignore null lease/binding on snapdir ENOENT, or else we
1151 * will have trouble splicing in the virtual snapdir later
1152 */
1153 if (rinfo->head->is_dentry && !req->r_aborted &&
6c5e50fa 1154 req->r_locked_dir &&
9358c6d4 1155 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
3d14c5d2 1156 fsc->mount_options->snapdir_name,
9358c6d4 1157 req->r_dentry->d_name.len))) {
355da1eb
SW
1158 /*
1159 * lookup link rename : null -> possibly existing inode
1160 * mknod symlink mkdir : null -> new inode
1161 * unlink : linked -> null
1162 */
1163 struct inode *dir = req->r_locked_dir;
1164 struct dentry *dn = req->r_dentry;
1165 bool have_dir_cap, have_lease;
1166
1167 BUG_ON(!dn);
1168 BUG_ON(!dir);
1169 BUG_ON(dn->d_parent->d_inode != dir);
1170 BUG_ON(ceph_ino(dir) !=
1171 le64_to_cpu(rinfo->diri.in->ino));
1172 BUG_ON(ceph_snap(dir) !=
1173 le64_to_cpu(rinfo->diri.in->snapid));
1174
355da1eb
SW
1175 /* do we have a lease on the whole dir? */
1176 have_dir_cap =
1177 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1178 CEPH_CAP_FILE_SHARED);
1179
1180 /* do we have a dn lease? */
1181 have_lease = have_dir_cap ||
2f90b852 1182 le32_to_cpu(rinfo->dlease->duration_ms);
355da1eb
SW
1183 if (!have_lease)
1184 dout("fill_trace no dentry lease or dir cap\n");
1185
1186 /* rename? */
1187 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
0a8a70f9
YZ
1188 struct inode *olddir = req->r_old_dentry_dir;
1189 BUG_ON(!olddir);
1190
355da1eb
SW
1191 dout(" src %p '%.*s' dst %p '%.*s'\n",
1192 req->r_old_dentry,
1193 req->r_old_dentry->d_name.len,
1194 req->r_old_dentry->d_name.name,
1195 dn, dn->d_name.len, dn->d_name.name);
1196 dout("fill_trace doing d_move %p -> %p\n",
1197 req->r_old_dentry, dn);
c10f5e12 1198
355da1eb
SW
1199 d_move(req->r_old_dentry, dn);
1200 dout(" src %p '%.*s' dst %p '%.*s'\n",
1201 req->r_old_dentry,
1202 req->r_old_dentry->d_name.len,
1203 req->r_old_dentry->d_name.name,
1204 dn, dn->d_name.len, dn->d_name.name);
81a6cf2d 1205
c4a29f26
SW
1206 /* ensure target dentry is invalidated, despite
1207 rehashing bug in vfs_rename_dir */
81a6cf2d
SW
1208 ceph_invalidate_dentry_lease(dn);
1209
0a8a70f9 1210 /* d_move screws up sibling dentries' offsets */
70db4f36
YZ
1211 ceph_dir_clear_ordered(dir);
1212 ceph_dir_clear_ordered(olddir);
0a8a70f9 1213
99ccbd22 1214 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1cd3935b 1215 ceph_dentry(req->r_old_dentry)->offset);
81a6cf2d 1216
355da1eb 1217 dn = req->r_old_dentry; /* use old_dentry */
355da1eb
SW
1218 }
1219
1220 /* null dentry? */
1221 if (!rinfo->head->is_target) {
1222 dout("fill_trace null dentry\n");
1223 if (dn->d_inode) {
70db4f36 1224 ceph_dir_clear_ordered(dir);
355da1eb
SW
1225 dout("d_delete %p\n", dn);
1226 d_delete(dn);
1227 } else {
1228 dout("d_instantiate %p NULL\n", dn);
1229 d_instantiate(dn, NULL);
1230 if (have_lease && d_unhashed(dn))
1231 d_rehash(dn);
1232 update_dentry_lease(dn, rinfo->dlease,
1233 session,
1234 req->r_request_started);
1235 }
1236 goto done;
1237 }
1238
1239 /* attach proper inode */
86b58d13 1240 if (!dn->d_inode) {
70db4f36 1241 ceph_dir_clear_ordered(dir);
86b58d13 1242 ihold(in);
0a8a70f9 1243 dn = splice_dentry(dn, in, &have_lease);
355da1eb
SW
1244 if (IS_ERR(dn)) {
1245 err = PTR_ERR(dn);
1246 goto done;
1247 }
1248 req->r_dentry = dn; /* may have spliced */
86b58d13 1249 } else if (dn->d_inode && dn->d_inode != in) {
355da1eb 1250 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
86b58d13
YZ
1251 dn, dn->d_inode, ceph_vinop(dn->d_inode),
1252 ceph_vinop(in));
355da1eb 1253 have_lease = false;
355da1eb
SW
1254 }
1255
1256 if (have_lease)
1257 update_dentry_lease(dn, rinfo->dlease, session,
1258 req->r_request_started);
1259 dout(" final dn %p\n", dn);
86b58d13
YZ
1260 } else if (!req->r_aborted &&
1261 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1262 req->r_op == CEPH_MDS_OP_MKSNAP)) {
355da1eb 1263 struct dentry *dn = req->r_dentry;
0a8a70f9 1264 struct inode *dir = req->r_locked_dir;
355da1eb
SW
1265
1266 /* fill out a snapdir LOOKUPSNAP dentry */
1267 BUG_ON(!dn);
0a8a70f9
YZ
1268 BUG_ON(!dir);
1269 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
355da1eb 1270 dout(" linking snapped dir %p to dn %p\n", in, dn);
70db4f36 1271 ceph_dir_clear_ordered(dir);
86b58d13 1272 ihold(in);
0a8a70f9 1273 dn = splice_dentry(dn, in, NULL);
355da1eb
SW
1274 if (IS_ERR(dn)) {
1275 err = PTR_ERR(dn);
1276 goto done;
1277 }
1278 req->r_dentry = dn; /* may have spliced */
355da1eb 1279 }
355da1eb
SW
1280done:
1281 dout("fill_trace done err=%d\n", err);
1282 return err;
1283}
1284
1285/*
1286 * Prepopulate our cache with readdir results, leases, etc.
1287 */
79f9f99a
SW
1288static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1289 struct ceph_mds_session *session)
1290{
1291 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1292 int i, err = 0;
1293
1294 for (i = 0; i < rinfo->dir_nr; i++) {
1295 struct ceph_vino vino;
1296 struct inode *in;
1297 int rc;
1298
1299 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1300 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1301
1302 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1303 if (IS_ERR(in)) {
1304 err = PTR_ERR(in);
1305 dout("new_inode badness got %d\n", err);
1306 continue;
1307 }
1308 rc = fill_inode(in, &rinfo->dir_in[i], NULL, session,
1309 req->r_request_started, -1,
1310 &req->r_caps_reservation);
1311 if (rc < 0) {
1312 pr_err("fill_inode badness on %p got %d\n", in, rc);
1313 err = rc;
1314 continue;
1315 }
1316 }
1317
1318 return err;
1319}
1320
355da1eb
SW
1321int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1322 struct ceph_mds_session *session)
1323{
1324 struct dentry *parent = req->r_dentry;
1325 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1326 struct qstr dname;
1327 struct dentry *dn;
1328 struct inode *in;
86b58d13 1329 int err = 0, ret, i;
355da1eb
SW
1330 struct inode *snapdir = NULL;
1331 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
355da1eb 1332 struct ceph_dentry_info *di;
81c6aea5
YZ
1333 u64 r_readdir_offset = req->r_readdir_offset;
1334 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1335
1336 if (rinfo->dir_dir &&
1337 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1338 dout("readdir_prepopulate got new frag %x -> %x\n",
1339 frag, le32_to_cpu(rinfo->dir_dir->frag));
1340 frag = le32_to_cpu(rinfo->dir_dir->frag);
1341 if (ceph_frag_is_leftmost(frag))
1342 r_readdir_offset = 2;
1343 else
1344 r_readdir_offset = 0;
1345 }
355da1eb 1346
79f9f99a
SW
1347 if (req->r_aborted)
1348 return readdir_prepopulate_inodes_only(req, session);
355da1eb
SW
1349
1350 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1351 snapdir = ceph_get_snapdir(parent->d_inode);
1352 parent = d_find_alias(snapdir);
1353 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1354 rinfo->dir_nr, parent);
1355 } else {
1356 dout("readdir_prepopulate %d items under dn %p\n",
1357 rinfo->dir_nr, parent);
1358 if (rinfo->dir_dir)
1359 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1360 }
1361
86b58d13 1362 /* FIXME: release caps/leases if error occurs */
355da1eb
SW
1363 for (i = 0; i < rinfo->dir_nr; i++) {
1364 struct ceph_vino vino;
1365
1366 dname.name = rinfo->dir_dname[i];
1367 dname.len = rinfo->dir_dname_len[i];
1368 dname.hash = full_name_hash(dname.name, dname.len);
1369
1370 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1371 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1372
1373retry_lookup:
1374 dn = d_lookup(parent, &dname);
1375 dout("d_lookup on parent=%p name=%.*s got %p\n",
1376 parent, dname.len, dname.name, dn);
1377
1378 if (!dn) {
1379 dn = d_alloc(parent, &dname);
1380 dout("d_alloc %p '%.*s' = %p\n", parent,
1381 dname.len, dname.name, dn);
1382 if (dn == NULL) {
1383 dout("d_alloc badness\n");
1384 err = -ENOMEM;
1385 goto out;
1386 }
86b58d13
YZ
1387 ret = ceph_init_dentry(dn);
1388 if (ret < 0) {
8c696737 1389 dput(dn);
86b58d13 1390 err = ret;
355da1eb 1391 goto out;
8c696737 1392 }
355da1eb
SW
1393 } else if (dn->d_inode &&
1394 (ceph_ino(dn->d_inode) != vino.ino ||
1395 ceph_snap(dn->d_inode) != vino.snap)) {
1396 dout(" dn %p points to wrong inode %p\n",
1397 dn, dn->d_inode);
1398 d_delete(dn);
1399 dput(dn);
1400 goto retry_lookup;
1401 } else {
1402 /* reorder parent's d_subdirs */
2fd6b7f5
NP
1403 spin_lock(&parent->d_lock);
1404 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
355da1eb
SW
1405 list_move(&dn->d_u.d_child, &parent->d_subdirs);
1406 spin_unlock(&dn->d_lock);
2fd6b7f5 1407 spin_unlock(&parent->d_lock);
355da1eb
SW
1408 }
1409
355da1eb
SW
1410 /* inode */
1411 if (dn->d_inode) {
1412 in = dn->d_inode;
1413 } else {
1414 in = ceph_get_inode(parent->d_sb, vino);
ac1f12ef 1415 if (IS_ERR(in)) {
355da1eb 1416 dout("new_inode badness\n");
2744c171 1417 d_drop(dn);
355da1eb 1418 dput(dn);
ac1f12ef 1419 err = PTR_ERR(in);
355da1eb
SW
1420 goto out;
1421 }
355da1eb
SW
1422 }
1423
1424 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1425 req->r_request_started, -1,
1426 &req->r_caps_reservation) < 0) {
1427 pr_err("fill_inode badness on %p\n", in);
86b58d13
YZ
1428 if (!dn->d_inode)
1429 iput(in);
1430 d_drop(dn);
d69ed05a 1431 goto next_item;
355da1eb 1432 }
86b58d13
YZ
1433
1434 if (!dn->d_inode) {
0a8a70f9 1435 dn = splice_dentry(dn, in, NULL);
86b58d13
YZ
1436 if (IS_ERR(dn)) {
1437 err = PTR_ERR(dn);
1438 dn = NULL;
1439 goto next_item;
1440 }
1441 }
1442
1443 di = dn->d_fsdata;
1444 di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
1445
1446 update_dentry_lease(dn, rinfo->dir_dlease[i],
1447 req->r_session,
1448 req->r_request_started);
d69ed05a
SW
1449next_item:
1450 if (dn)
1451 dput(dn);
355da1eb 1452 }
86b58d13
YZ
1453 if (err == 0)
1454 req->r_did_prepopulate = true;
355da1eb
SW
1455
1456out:
1457 if (snapdir) {
1458 iput(snapdir);
1459 dput(parent);
1460 }
1461 dout("readdir_prepopulate done\n");
1462 return err;
1463}
1464
1465int ceph_inode_set_size(struct inode *inode, loff_t size)
1466{
1467 struct ceph_inode_info *ci = ceph_inode(inode);
1468 int ret = 0;
1469
be655596 1470 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1471 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1472 inode->i_size = size;
1473 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1474
1475 /* tell the MDS if we are approaching max_size */
1476 if ((size << 1) >= ci->i_max_size &&
1477 (ci->i_reported_size << 1) < ci->i_max_size)
1478 ret = 1;
1479
be655596 1480 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1481 return ret;
1482}
1483
1484/*
1485 * Write back inode data in a worker thread. (This can't be done
1486 * in the message handler context.)
1487 */
3c6f6b79
SW
1488void ceph_queue_writeback(struct inode *inode)
1489{
15a2015f 1490 ihold(inode);
3c6f6b79
SW
1491 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1492 &ceph_inode(inode)->i_wb_work)) {
2c27c9a5 1493 dout("ceph_queue_writeback %p\n", inode);
3c6f6b79 1494 } else {
2c27c9a5 1495 dout("ceph_queue_writeback %p failed\n", inode);
15a2015f 1496 iput(inode);
3c6f6b79
SW
1497 }
1498}
1499
1500static void ceph_writeback_work(struct work_struct *work)
355da1eb
SW
1501{
1502 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1503 i_wb_work);
1504 struct inode *inode = &ci->vfs_inode;
1505
1506 dout("writeback %p\n", inode);
1507 filemap_fdatawrite(&inode->i_data);
1508 iput(inode);
1509}
1510
3c6f6b79
SW
1511/*
1512 * queue an async invalidation
1513 */
1514void ceph_queue_invalidate(struct inode *inode)
1515{
15a2015f 1516 ihold(inode);
3c6f6b79
SW
1517 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1518 &ceph_inode(inode)->i_pg_inv_work)) {
1519 dout("ceph_queue_invalidate %p\n", inode);
3c6f6b79
SW
1520 } else {
1521 dout("ceph_queue_invalidate %p failed\n", inode);
15a2015f 1522 iput(inode);
3c6f6b79
SW
1523 }
1524}
1525
355da1eb
SW
1526/*
1527 * Invalidate inode pages in a worker thread. (This can't be done
1528 * in the message handler context.)
1529 */
3c6f6b79 1530static void ceph_invalidate_work(struct work_struct *work)
355da1eb
SW
1531{
1532 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1533 i_pg_inv_work);
1534 struct inode *inode = &ci->vfs_inode;
1535 u32 orig_gen;
1536 int check = 0;
1537
b0d7c223 1538 mutex_lock(&ci->i_truncate_mutex);
be655596 1539 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1540 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1541 ci->i_rdcache_gen, ci->i_rdcache_revoking);
cd045cb4 1542 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
9563f88c
YZ
1543 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1544 check = 1;
be655596 1545 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1546 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1547 goto out;
1548 }
1549 orig_gen = ci->i_rdcache_gen;
be655596 1550 spin_unlock(&ci->i_ceph_lock);
355da1eb 1551
4e217b5d 1552 truncate_pagecache(inode, 0);
355da1eb 1553
be655596 1554 spin_lock(&ci->i_ceph_lock);
cd045cb4
SW
1555 if (orig_gen == ci->i_rdcache_gen &&
1556 orig_gen == ci->i_rdcache_revoking) {
355da1eb
SW
1557 dout("invalidate_pages %p gen %d successful\n", inode,
1558 ci->i_rdcache_gen);
cd045cb4 1559 ci->i_rdcache_revoking--;
355da1eb
SW
1560 check = 1;
1561 } else {
cd045cb4
SW
1562 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1563 inode, orig_gen, ci->i_rdcache_gen,
1564 ci->i_rdcache_revoking);
9563f88c
YZ
1565 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1566 check = 1;
355da1eb 1567 }
be655596 1568 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1569 mutex_unlock(&ci->i_truncate_mutex);
9563f88c 1570out:
355da1eb
SW
1571 if (check)
1572 ceph_check_caps(ci, 0, NULL);
355da1eb
SW
1573 iput(inode);
1574}
1575
1576
1577/*
3f99969f 1578 * called by trunc_wq;
355da1eb
SW
1579 *
1580 * We also truncate in a separate thread as well.
1581 */
3c6f6b79 1582static void ceph_vmtruncate_work(struct work_struct *work)
355da1eb
SW
1583{
1584 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1585 i_vmtruncate_work);
1586 struct inode *inode = &ci->vfs_inode;
1587
1588 dout("vmtruncate_work %p\n", inode);
b415bf4f 1589 __ceph_do_pending_vmtruncate(inode);
355da1eb
SW
1590 iput(inode);
1591}
1592
3c6f6b79
SW
1593/*
1594 * Queue an async vmtruncate. If we fail to queue work, we will handle
1595 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1596 */
1597void ceph_queue_vmtruncate(struct inode *inode)
1598{
1599 struct ceph_inode_info *ci = ceph_inode(inode);
1600
15a2015f 1601 ihold(inode);
99ccbd22 1602
640ef79d 1603 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
3c6f6b79
SW
1604 &ci->i_vmtruncate_work)) {
1605 dout("ceph_queue_vmtruncate %p\n", inode);
3c6f6b79
SW
1606 } else {
1607 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1608 inode, ci->i_truncate_pending);
15a2015f 1609 iput(inode);
3c6f6b79
SW
1610 }
1611}
1612
355da1eb 1613/*
355da1eb
SW
1614 * Make sure any pending truncation is applied before doing anything
1615 * that may depend on it.
1616 */
b415bf4f 1617void __ceph_do_pending_vmtruncate(struct inode *inode)
355da1eb
SW
1618{
1619 struct ceph_inode_info *ci = ceph_inode(inode);
1620 u64 to;
a85f50b6 1621 int wrbuffer_refs, finish = 0;
355da1eb 1622
b0d7c223 1623 mutex_lock(&ci->i_truncate_mutex);
355da1eb 1624retry:
be655596 1625 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1626 if (ci->i_truncate_pending == 0) {
1627 dout("__do_pending_vmtruncate %p none pending\n", inode);
be655596 1628 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1629 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1630 return;
1631 }
1632
1633 /*
1634 * make sure any dirty snapped pages are flushed before we
1635 * possibly truncate them.. so write AND block!
1636 */
1637 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1638 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1639 inode);
be655596 1640 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1641 filemap_write_and_wait_range(&inode->i_data, 0,
1642 inode->i_sb->s_maxbytes);
1643 goto retry;
1644 }
1645
b0d7c223
YZ
1646 /* there should be no reader or writer */
1647 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1648
355da1eb
SW
1649 to = ci->i_truncate_size;
1650 wrbuffer_refs = ci->i_wrbuffer_ref;
1651 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1652 ci->i_truncate_pending, to);
be655596 1653 spin_unlock(&ci->i_ceph_lock);
355da1eb 1654
4e217b5d 1655 truncate_pagecache(inode, to);
355da1eb 1656
be655596 1657 spin_lock(&ci->i_ceph_lock);
a85f50b6
YZ
1658 if (to == ci->i_truncate_size) {
1659 ci->i_truncate_pending = 0;
1660 finish = 1;
1661 }
be655596 1662 spin_unlock(&ci->i_ceph_lock);
a85f50b6
YZ
1663 if (!finish)
1664 goto retry;
355da1eb 1665
b0d7c223
YZ
1666 mutex_unlock(&ci->i_truncate_mutex);
1667
355da1eb
SW
1668 if (wrbuffer_refs == 0)
1669 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
a85f50b6
YZ
1670
1671 wake_up_all(&ci->i_cap_wq);
355da1eb
SW
1672}
1673
355da1eb
SW
1674/*
1675 * symlinks
1676 */
1677static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1678{
1679 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1680 nd_set_link(nd, ci->i_symlink);
1681 return NULL;
1682}
1683
1684static const struct inode_operations ceph_symlink_iops = {
1685 .readlink = generic_readlink,
1686 .follow_link = ceph_sym_follow_link,
0b932672
YZ
1687 .setattr = ceph_setattr,
1688 .getattr = ceph_getattr,
1689 .setxattr = ceph_setxattr,
1690 .getxattr = ceph_getxattr,
1691 .listxattr = ceph_listxattr,
1692 .removexattr = ceph_removexattr,
355da1eb
SW
1693};
1694
1695/*
1696 * setattr
1697 */
1698int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1699{
1700 struct inode *inode = dentry->d_inode;
1701 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
1702 const unsigned int ia_valid = attr->ia_valid;
1703 struct ceph_mds_request *req;
3d14c5d2 1704 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
355da1eb
SW
1705 int issued;
1706 int release = 0, dirtied = 0;
1707 int mask = 0;
1708 int err = 0;
fca65b4a 1709 int inode_dirty_flags = 0;
355da1eb
SW
1710
1711 if (ceph_snap(inode) != CEPH_NOSNAP)
1712 return -EROFS;
1713
355da1eb
SW
1714 err = inode_change_ok(inode, attr);
1715 if (err != 0)
1716 return err;
1717
1718 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1719 USE_AUTH_MDS);
1720 if (IS_ERR(req))
1721 return PTR_ERR(req);
1722
be655596 1723 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1724 issued = __ceph_caps_issued(ci, NULL);
1725 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1726
1727 if (ia_valid & ATTR_UID) {
1728 dout("setattr %p uid %d -> %d\n", inode,
bd2bae6a
EB
1729 from_kuid(&init_user_ns, inode->i_uid),
1730 from_kuid(&init_user_ns, attr->ia_uid));
355da1eb
SW
1731 if (issued & CEPH_CAP_AUTH_EXCL) {
1732 inode->i_uid = attr->ia_uid;
1733 dirtied |= CEPH_CAP_AUTH_EXCL;
1734 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
ab871b90
EB
1735 !uid_eq(attr->ia_uid, inode->i_uid)) {
1736 req->r_args.setattr.uid = cpu_to_le32(
1737 from_kuid(&init_user_ns, attr->ia_uid));
355da1eb
SW
1738 mask |= CEPH_SETATTR_UID;
1739 release |= CEPH_CAP_AUTH_SHARED;
1740 }
1741 }
1742 if (ia_valid & ATTR_GID) {
1743 dout("setattr %p gid %d -> %d\n", inode,
bd2bae6a
EB
1744 from_kgid(&init_user_ns, inode->i_gid),
1745 from_kgid(&init_user_ns, attr->ia_gid));
355da1eb
SW
1746 if (issued & CEPH_CAP_AUTH_EXCL) {
1747 inode->i_gid = attr->ia_gid;
1748 dirtied |= CEPH_CAP_AUTH_EXCL;
1749 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
ab871b90
EB
1750 !gid_eq(attr->ia_gid, inode->i_gid)) {
1751 req->r_args.setattr.gid = cpu_to_le32(
1752 from_kgid(&init_user_ns, attr->ia_gid));
355da1eb
SW
1753 mask |= CEPH_SETATTR_GID;
1754 release |= CEPH_CAP_AUTH_SHARED;
1755 }
1756 }
1757 if (ia_valid & ATTR_MODE) {
1758 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1759 attr->ia_mode);
1760 if (issued & CEPH_CAP_AUTH_EXCL) {
1761 inode->i_mode = attr->ia_mode;
1762 dirtied |= CEPH_CAP_AUTH_EXCL;
1763 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1764 attr->ia_mode != inode->i_mode) {
7221fe4c 1765 inode->i_mode = attr->ia_mode;
355da1eb
SW
1766 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1767 mask |= CEPH_SETATTR_MODE;
1768 release |= CEPH_CAP_AUTH_SHARED;
1769 }
1770 }
1771
1772 if (ia_valid & ATTR_ATIME) {
1773 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1774 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1775 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1776 if (issued & CEPH_CAP_FILE_EXCL) {
1777 ci->i_time_warp_seq++;
1778 inode->i_atime = attr->ia_atime;
1779 dirtied |= CEPH_CAP_FILE_EXCL;
1780 } else if ((issued & CEPH_CAP_FILE_WR) &&
1781 timespec_compare(&inode->i_atime,
1782 &attr->ia_atime) < 0) {
1783 inode->i_atime = attr->ia_atime;
1784 dirtied |= CEPH_CAP_FILE_WR;
1785 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1786 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1787 ceph_encode_timespec(&req->r_args.setattr.atime,
1788 &attr->ia_atime);
1789 mask |= CEPH_SETATTR_ATIME;
1790 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1791 CEPH_CAP_FILE_WR;
1792 }
1793 }
1794 if (ia_valid & ATTR_MTIME) {
1795 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1796 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1797 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1798 if (issued & CEPH_CAP_FILE_EXCL) {
1799 ci->i_time_warp_seq++;
1800 inode->i_mtime = attr->ia_mtime;
1801 dirtied |= CEPH_CAP_FILE_EXCL;
1802 } else if ((issued & CEPH_CAP_FILE_WR) &&
1803 timespec_compare(&inode->i_mtime,
1804 &attr->ia_mtime) < 0) {
1805 inode->i_mtime = attr->ia_mtime;
1806 dirtied |= CEPH_CAP_FILE_WR;
1807 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1808 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1809 ceph_encode_timespec(&req->r_args.setattr.mtime,
1810 &attr->ia_mtime);
1811 mask |= CEPH_SETATTR_MTIME;
1812 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1813 CEPH_CAP_FILE_WR;
1814 }
1815 }
1816 if (ia_valid & ATTR_SIZE) {
1817 dout("setattr %p size %lld -> %lld\n", inode,
1818 inode->i_size, attr->ia_size);
355da1eb
SW
1819 if ((issued & CEPH_CAP_FILE_EXCL) &&
1820 attr->ia_size > inode->i_size) {
1821 inode->i_size = attr->ia_size;
355da1eb
SW
1822 inode->i_blocks =
1823 (attr->ia_size + (1 << 9) - 1) >> 9;
1824 inode->i_ctime = attr->ia_ctime;
1825 ci->i_reported_size = attr->ia_size;
1826 dirtied |= CEPH_CAP_FILE_EXCL;
1827 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1828 attr->ia_size != inode->i_size) {
1829 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1830 req->r_args.setattr.old_size =
1831 cpu_to_le64(inode->i_size);
1832 mask |= CEPH_SETATTR_SIZE;
1833 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1834 CEPH_CAP_FILE_WR;
1835 }
1836 }
1837
1838 /* these do nothing */
1839 if (ia_valid & ATTR_CTIME) {
1840 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1841 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1842 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1843 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1844 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1845 only ? "ctime only" : "ignored");
1846 inode->i_ctime = attr->ia_ctime;
1847 if (only) {
1848 /*
1849 * if kernel wants to dirty ctime but nothing else,
1850 * we need to choose a cap to dirty under, or do
1851 * a almost-no-op setattr
1852 */
1853 if (issued & CEPH_CAP_AUTH_EXCL)
1854 dirtied |= CEPH_CAP_AUTH_EXCL;
1855 else if (issued & CEPH_CAP_FILE_EXCL)
1856 dirtied |= CEPH_CAP_FILE_EXCL;
1857 else if (issued & CEPH_CAP_XATTR_EXCL)
1858 dirtied |= CEPH_CAP_XATTR_EXCL;
1859 else
1860 mask |= CEPH_SETATTR_CTIME;
1861 }
1862 }
1863 if (ia_valid & ATTR_FILE)
1864 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1865
1866 if (dirtied) {
fca65b4a 1867 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
355da1eb
SW
1868 inode->i_ctime = CURRENT_TIME;
1869 }
1870
1871 release &= issued;
be655596 1872 spin_unlock(&ci->i_ceph_lock);
355da1eb 1873
fca65b4a
SW
1874 if (inode_dirty_flags)
1875 __mark_inode_dirty(inode, inode_dirty_flags);
1876
7221fe4c 1877 if (ia_valid & ATTR_MODE) {
4db658ea 1878 err = posix_acl_chmod(inode, attr->ia_mode);
7221fe4c
GZ
1879 if (err)
1880 goto out_put;
1881 }
1882
355da1eb 1883 if (mask) {
70b666c3
SW
1884 req->r_inode = inode;
1885 ihold(inode);
355da1eb
SW
1886 req->r_inode_drop = release;
1887 req->r_args.setattr.mask = cpu_to_le32(mask);
1888 req->r_num_caps = 1;
752c8bdc 1889 err = ceph_mdsc_do_request(mdsc, NULL, req);
355da1eb
SW
1890 }
1891 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1892 ceph_cap_string(dirtied), mask);
1893
1894 ceph_mdsc_put_request(req);
b0d7c223
YZ
1895 if (mask & CEPH_SETATTR_SIZE)
1896 __ceph_do_pending_vmtruncate(inode);
355da1eb 1897 return err;
7221fe4c 1898out_put:
355da1eb
SW
1899 ceph_mdsc_put_request(req);
1900 return err;
1901}
1902
1903/*
1904 * Verify that we have a lease on the given mask. If not,
1905 * do a getattr against an mds.
1906 */
508b32d8 1907int ceph_do_getattr(struct inode *inode, int mask, bool force)
355da1eb 1908{
3d14c5d2
YS
1909 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1910 struct ceph_mds_client *mdsc = fsc->mdsc;
355da1eb
SW
1911 struct ceph_mds_request *req;
1912 int err;
1913
1914 if (ceph_snap(inode) == CEPH_SNAPDIR) {
1915 dout("do_getattr inode %p SNAPDIR\n", inode);
1916 return 0;
1917 }
1918
b7495fc2 1919 dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
508b32d8 1920 if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
355da1eb
SW
1921 return 0;
1922
1923 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1924 if (IS_ERR(req))
1925 return PTR_ERR(req);
70b666c3
SW
1926 req->r_inode = inode;
1927 ihold(inode);
355da1eb
SW
1928 req->r_num_caps = 1;
1929 req->r_args.getattr.mask = cpu_to_le32(mask);
1930 err = ceph_mdsc_do_request(mdsc, NULL, req);
1931 ceph_mdsc_put_request(req);
1932 dout("do_getattr result=%d\n", err);
1933 return err;
1934}
1935
1936
1937/*
1938 * Check inode permissions. We verify we have a valid value for
1939 * the AUTH cap, then call the generic handler.
1940 */
10556cb2 1941int ceph_permission(struct inode *inode, int mask)
355da1eb 1942{
b74c79e9
NP
1943 int err;
1944
10556cb2 1945 if (mask & MAY_NOT_BLOCK)
b74c79e9
NP
1946 return -ECHILD;
1947
508b32d8 1948 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
355da1eb
SW
1949
1950 if (!err)
2830ba7f 1951 err = generic_permission(inode, mask);
355da1eb
SW
1952 return err;
1953}
1954
1955/*
1956 * Get all attributes. Hopefully somedata we'll have a statlite()
1957 * and can limit the fields we require to be accurate.
1958 */
1959int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1960 struct kstat *stat)
1961{
1962 struct inode *inode = dentry->d_inode;
232d4b01 1963 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
1964 int err;
1965
508b32d8 1966 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false);
355da1eb
SW
1967 if (!err) {
1968 generic_fillattr(inode, stat);
ad1fee96 1969 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
355da1eb
SW
1970 if (ceph_snap(inode) != CEPH_NOSNAP)
1971 stat->dev = ceph_snap(inode);
1972 else
1973 stat->dev = 0;
232d4b01 1974 if (S_ISDIR(inode->i_mode)) {
1c1266bb
YS
1975 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
1976 RBYTES))
1977 stat->size = ci->i_rbytes;
1978 else
1979 stat->size = ci->i_files + ci->i_subdirs;
232d4b01 1980 stat->blocks = 0;
355da1eb 1981 stat->blksize = 65536;
232d4b01 1982 }
355da1eb
SW
1983 }
1984 return err;
1985}