ceph: avoid updating directory inode's i_size accidentally
[linux-2.6-block.git] / fs / ceph / inode.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
355da1eb
SW
2
3#include <linux/module.h>
4#include <linux/fs.h>
355da1eb
SW
5#include <linux/slab.h>
6#include <linux/string.h>
7#include <linux/uaccess.h>
8#include <linux/kernel.h>
355da1eb
SW
9#include <linux/writeback.h>
10#include <linux/vmalloc.h>
4db658ea 11#include <linux/posix_acl.h>
3e7fbe9c 12#include <linux/random.h>
355da1eb
SW
13
14#include "super.h"
3d14c5d2 15#include "mds_client.h"
99ccbd22 16#include "cache.h"
3d14c5d2 17#include <linux/ceph/decode.h>
355da1eb
SW
18
19/*
20 * Ceph inode operations
21 *
22 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
23 * setattr, etc.), xattr helpers, and helpers for assimilating
24 * metadata returned by the MDS into our cache.
25 *
26 * Also define helpers for doing asynchronous writeback, invalidation,
27 * and truncation for the benefit of those who can't afford to block
28 * (typically because they are in the message handler path).
29 */
30
31static const struct inode_operations ceph_symlink_iops;
32
3c6f6b79
SW
33static void ceph_invalidate_work(struct work_struct *work);
34static void ceph_writeback_work(struct work_struct *work);
35static void ceph_vmtruncate_work(struct work_struct *work);
355da1eb
SW
36
37/*
38 * find or create an inode, given the ceph ino number
39 */
ad1fee96
YS
40static int ceph_set_ino_cb(struct inode *inode, void *data)
41{
42 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
43 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
44 return 0;
45}
46
355da1eb
SW
47struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
48{
49 struct inode *inode;
50 ino_t t = ceph_vino_to_ino(vino);
51
52 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
53 if (inode == NULL)
54 return ERR_PTR(-ENOMEM);
55 if (inode->i_state & I_NEW) {
56 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
57 inode, ceph_vinop(inode), (u64)inode->i_ino);
58 unlock_new_inode(inode);
59 }
60
61 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
62 vino.snap, inode);
63 return inode;
64}
65
66/*
67 * get/constuct snapdir inode for a given directory
68 */
69struct inode *ceph_get_snapdir(struct inode *parent)
70{
71 struct ceph_vino vino = {
72 .ino = ceph_ino(parent),
73 .snap = CEPH_SNAPDIR,
74 };
75 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
b377ff13 76 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
77
78 BUG_ON(!S_ISDIR(parent->i_mode));
79 if (IS_ERR(inode))
7e34bc52 80 return inode;
355da1eb
SW
81 inode->i_mode = parent->i_mode;
82 inode->i_uid = parent->i_uid;
83 inode->i_gid = parent->i_gid;
38c48b5f
YZ
84 inode->i_op = &ceph_snapdir_iops;
85 inode->i_fop = &ceph_snapdir_fops;
b377ff13
SW
86 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
87 ci->i_rbytes = 0;
355da1eb
SW
88 return inode;
89}
90
91const struct inode_operations ceph_file_iops = {
92 .permission = ceph_permission,
93 .setattr = ceph_setattr,
94 .getattr = ceph_getattr,
95 .setxattr = ceph_setxattr,
96 .getxattr = ceph_getxattr,
97 .listxattr = ceph_listxattr,
98 .removexattr = ceph_removexattr,
7221fe4c 99 .get_acl = ceph_get_acl,
72466d0b 100 .set_acl = ceph_set_acl,
355da1eb
SW
101};
102
103
104/*
105 * We use a 'frag tree' to keep track of the MDS's directory fragments
106 * for a given inode (usually there is just a single fragment). We
107 * need to know when a child frag is delegated to a new MDS, or when
108 * it is flagged as replicated, so we can direct our requests
109 * accordingly.
110 */
111
112/*
113 * find/create a frag in the tree
114 */
115static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
116 u32 f)
117{
118 struct rb_node **p;
119 struct rb_node *parent = NULL;
120 struct ceph_inode_frag *frag;
121 int c;
122
123 p = &ci->i_fragtree.rb_node;
124 while (*p) {
125 parent = *p;
126 frag = rb_entry(parent, struct ceph_inode_frag, node);
127 c = ceph_frag_compare(f, frag->frag);
128 if (c < 0)
129 p = &(*p)->rb_left;
130 else if (c > 0)
131 p = &(*p)->rb_right;
132 else
133 return frag;
134 }
135
136 frag = kmalloc(sizeof(*frag), GFP_NOFS);
137 if (!frag) {
138 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
139 "frag %x\n", &ci->vfs_inode,
140 ceph_vinop(&ci->vfs_inode), f);
141 return ERR_PTR(-ENOMEM);
142 }
143 frag->frag = f;
144 frag->split_by = 0;
145 frag->mds = -1;
146 frag->ndist = 0;
147
148 rb_link_node(&frag->node, parent, p);
149 rb_insert_color(&frag->node, &ci->i_fragtree);
150
151 dout("get_or_create_frag added %llx.%llx frag %x\n",
152 ceph_vinop(&ci->vfs_inode), f);
153 return frag;
154}
155
156/*
157 * find a specific frag @f
158 */
159struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
160{
161 struct rb_node *n = ci->i_fragtree.rb_node;
162
163 while (n) {
164 struct ceph_inode_frag *frag =
165 rb_entry(n, struct ceph_inode_frag, node);
166 int c = ceph_frag_compare(f, frag->frag);
167 if (c < 0)
168 n = n->rb_left;
169 else if (c > 0)
170 n = n->rb_right;
171 else
172 return frag;
173 }
174 return NULL;
175}
176
177/*
178 * Choose frag containing the given value @v. If @pfrag is
179 * specified, copy the frag delegation info to the caller if
180 * it is present.
181 */
3e7fbe9c
YZ
182static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
183 struct ceph_inode_frag *pfrag, int *found)
355da1eb
SW
184{
185 u32 t = ceph_frag_make(0, 0);
186 struct ceph_inode_frag *frag;
187 unsigned nway, i;
188 u32 n;
189
190 if (found)
191 *found = 0;
192
355da1eb
SW
193 while (1) {
194 WARN_ON(!ceph_frag_contains_value(t, v));
195 frag = __ceph_find_frag(ci, t);
196 if (!frag)
197 break; /* t is a leaf */
198 if (frag->split_by == 0) {
199 if (pfrag)
200 memcpy(pfrag, frag, sizeof(*pfrag));
201 if (found)
202 *found = 1;
203 break;
204 }
205
206 /* choose child */
207 nway = 1 << frag->split_by;
208 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
209 frag->split_by, nway);
210 for (i = 0; i < nway; i++) {
211 n = ceph_frag_make_child(t, frag->split_by, i);
212 if (ceph_frag_contains_value(n, v)) {
213 t = n;
214 break;
215 }
216 }
217 BUG_ON(i == nway);
218 }
219 dout("choose_frag(%x) = %x\n", v, t);
220
355da1eb
SW
221 return t;
222}
223
3e7fbe9c
YZ
224u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
225 struct ceph_inode_frag *pfrag, int *found)
226{
227 u32 ret;
228 mutex_lock(&ci->i_fragtree_mutex);
229 ret = __ceph_choose_frag(ci, v, pfrag, found);
230 mutex_unlock(&ci->i_fragtree_mutex);
231 return ret;
232}
233
355da1eb
SW
234/*
235 * Process dirfrag (delegation) info from the mds. Include leaf
236 * fragment in tree ONLY if ndist > 0. Otherwise, only
237 * branches/splits are included in i_fragtree)
238 */
239static int ceph_fill_dirfrag(struct inode *inode,
240 struct ceph_mds_reply_dirfrag *dirinfo)
241{
242 struct ceph_inode_info *ci = ceph_inode(inode);
243 struct ceph_inode_frag *frag;
244 u32 id = le32_to_cpu(dirinfo->frag);
245 int mds = le32_to_cpu(dirinfo->auth);
246 int ndist = le32_to_cpu(dirinfo->ndist);
8d08503c 247 int diri_auth = -1;
355da1eb
SW
248 int i;
249 int err = 0;
250
8d08503c
YZ
251 spin_lock(&ci->i_ceph_lock);
252 if (ci->i_auth_cap)
253 diri_auth = ci->i_auth_cap->mds;
254 spin_unlock(&ci->i_ceph_lock);
255
355da1eb 256 mutex_lock(&ci->i_fragtree_mutex);
8d08503c 257 if (ndist == 0 && mds == diri_auth) {
355da1eb
SW
258 /* no delegation info needed. */
259 frag = __ceph_find_frag(ci, id);
260 if (!frag)
261 goto out;
262 if (frag->split_by == 0) {
263 /* tree leaf, remove */
264 dout("fill_dirfrag removed %llx.%llx frag %x"
265 " (no ref)\n", ceph_vinop(inode), id);
266 rb_erase(&frag->node, &ci->i_fragtree);
267 kfree(frag);
268 } else {
269 /* tree branch, keep and clear */
270 dout("fill_dirfrag cleared %llx.%llx frag %x"
271 " referral\n", ceph_vinop(inode), id);
272 frag->mds = -1;
273 frag->ndist = 0;
274 }
275 goto out;
276 }
277
278
279 /* find/add this frag to store mds delegation info */
280 frag = __get_or_create_frag(ci, id);
281 if (IS_ERR(frag)) {
282 /* this is not the end of the world; we can continue
283 with bad/inaccurate delegation info */
284 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
285 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
286 err = -ENOMEM;
287 goto out;
288 }
289
290 frag->mds = mds;
291 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
292 for (i = 0; i < frag->ndist; i++)
293 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
294 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
295 ceph_vinop(inode), frag->frag, frag->ndist);
296
297out:
298 mutex_unlock(&ci->i_fragtree_mutex);
299 return err;
300}
301
3e7fbe9c
YZ
302static int ceph_fill_fragtree(struct inode *inode,
303 struct ceph_frag_tree_head *fragtree,
304 struct ceph_mds_reply_dirfrag *dirinfo)
305{
306 struct ceph_inode_info *ci = ceph_inode(inode);
307 struct ceph_inode_frag *frag;
308 struct rb_node *rb_node;
309 int i;
310 u32 id, nsplits;
311 bool update = false;
312
313 mutex_lock(&ci->i_fragtree_mutex);
314 nsplits = le32_to_cpu(fragtree->nsplits);
315 if (nsplits) {
316 i = prandom_u32() % nsplits;
317 id = le32_to_cpu(fragtree->splits[i].frag);
318 if (!__ceph_find_frag(ci, id))
319 update = true;
320 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
321 rb_node = rb_first(&ci->i_fragtree);
322 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
323 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
324 update = true;
325 }
326 if (!update && dirinfo) {
327 id = le32_to_cpu(dirinfo->frag);
328 if (id != __ceph_choose_frag(ci, id, NULL, NULL))
329 update = true;
330 }
331 if (!update)
332 goto out_unlock;
333
334 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
335 rb_node = rb_first(&ci->i_fragtree);
336 for (i = 0; i < nsplits; i++) {
337 id = le32_to_cpu(fragtree->splits[i].frag);
338 frag = NULL;
339 while (rb_node) {
340 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
341 if (ceph_frag_compare(frag->frag, id) >= 0) {
342 if (frag->frag != id)
343 frag = NULL;
344 else
345 rb_node = rb_next(rb_node);
346 break;
347 }
348 rb_node = rb_next(rb_node);
349 rb_erase(&frag->node, &ci->i_fragtree);
350 kfree(frag);
351 frag = NULL;
352 }
353 if (!frag) {
354 frag = __get_or_create_frag(ci, id);
355 if (IS_ERR(frag))
356 continue;
357 }
358 frag->split_by = le32_to_cpu(fragtree->splits[i].by);
359 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
360 }
361 while (rb_node) {
362 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
363 rb_node = rb_next(rb_node);
364 rb_erase(&frag->node, &ci->i_fragtree);
365 kfree(frag);
366 }
367out_unlock:
368 mutex_unlock(&ci->i_fragtree_mutex);
369 return 0;
370}
355da1eb
SW
371
372/*
373 * initialize a newly allocated inode.
374 */
375struct inode *ceph_alloc_inode(struct super_block *sb)
376{
377 struct ceph_inode_info *ci;
378 int i;
379
380 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
381 if (!ci)
382 return NULL;
383
384 dout("alloc_inode %p\n", &ci->vfs_inode);
385
be655596
SW
386 spin_lock_init(&ci->i_ceph_lock);
387
355da1eb 388 ci->i_version = 0;
31c542a1 389 ci->i_inline_version = 0;
355da1eb
SW
390 ci->i_time_warp_seq = 0;
391 ci->i_ceph_flags = 0;
fdd4e158
YZ
392 atomic64_set(&ci->i_ordered_count, 1);
393 atomic64_set(&ci->i_release_count, 1);
394 atomic64_set(&ci->i_complete_seq[0], 0);
395 atomic64_set(&ci->i_complete_seq[1], 0);
355da1eb
SW
396 ci->i_symlink = NULL;
397
6c0f3af7 398 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
5ea5c5e0 399 ci->i_pool_ns_len = 0;
6c0f3af7 400
355da1eb
SW
401 ci->i_fragtree = RB_ROOT;
402 mutex_init(&ci->i_fragtree_mutex);
403
404 ci->i_xattrs.blob = NULL;
405 ci->i_xattrs.prealloc_blob = NULL;
406 ci->i_xattrs.dirty = false;
407 ci->i_xattrs.index = RB_ROOT;
408 ci->i_xattrs.count = 0;
409 ci->i_xattrs.names_size = 0;
410 ci->i_xattrs.vals_size = 0;
411 ci->i_xattrs.version = 0;
412 ci->i_xattrs.index_version = 0;
413
414 ci->i_caps = RB_ROOT;
415 ci->i_auth_cap = NULL;
416 ci->i_dirty_caps = 0;
417 ci->i_flushing_caps = 0;
418 INIT_LIST_HEAD(&ci->i_dirty_item);
419 INIT_LIST_HEAD(&ci->i_flushing_item);
f66fd9f0 420 ci->i_prealloc_cap_flush = NULL;
553adfd9 421 ci->i_cap_flush_tree = RB_ROOT;
355da1eb
SW
422 init_waitqueue_head(&ci->i_cap_wq);
423 ci->i_hold_caps_min = 0;
424 ci->i_hold_caps_max = 0;
425 INIT_LIST_HEAD(&ci->i_cap_delay_list);
355da1eb
SW
426 INIT_LIST_HEAD(&ci->i_cap_snaps);
427 ci->i_head_snapc = NULL;
428 ci->i_snap_caps = 0;
429
430 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
431 ci->i_nr_by_mode[i] = 0;
432
b0d7c223 433 mutex_init(&ci->i_truncate_mutex);
355da1eb
SW
434 ci->i_truncate_seq = 0;
435 ci->i_truncate_size = 0;
436 ci->i_truncate_pending = 0;
437
438 ci->i_max_size = 0;
439 ci->i_reported_size = 0;
440 ci->i_wanted_max_size = 0;
441 ci->i_requested_max_size = 0;
442
443 ci->i_pin_ref = 0;
444 ci->i_rd_ref = 0;
445 ci->i_rdcache_ref = 0;
446 ci->i_wr_ref = 0;
d3d0720d 447 ci->i_wb_ref = 0;
355da1eb
SW
448 ci->i_wrbuffer_ref = 0;
449 ci->i_wrbuffer_ref_head = 0;
450 ci->i_shared_gen = 0;
451 ci->i_rdcache_gen = 0;
452 ci->i_rdcache_revoking = 0;
453
454 INIT_LIST_HEAD(&ci->i_unsafe_writes);
455 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
68cd5b4b 456 INIT_LIST_HEAD(&ci->i_unsafe_iops);
355da1eb
SW
457 spin_lock_init(&ci->i_unsafe_lock);
458
459 ci->i_snap_realm = NULL;
460 INIT_LIST_HEAD(&ci->i_snap_realm_item);
461 INIT_LIST_HEAD(&ci->i_snap_flush_item);
462
3c6f6b79
SW
463 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
464 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
355da1eb
SW
465
466 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
467
99ccbd22
MT
468 ceph_fscache_inode_init(ci);
469
355da1eb
SW
470 return &ci->vfs_inode;
471}
472
fa0d7e3d
NP
473static void ceph_i_callback(struct rcu_head *head)
474{
475 struct inode *inode = container_of(head, struct inode, i_rcu);
476 struct ceph_inode_info *ci = ceph_inode(inode);
477
fa0d7e3d
NP
478 kmem_cache_free(ceph_inode_cachep, ci);
479}
480
355da1eb
SW
481void ceph_destroy_inode(struct inode *inode)
482{
483 struct ceph_inode_info *ci = ceph_inode(inode);
484 struct ceph_inode_frag *frag;
485 struct rb_node *n;
486
487 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
488
99ccbd22
MT
489 ceph_fscache_unregister_inode_cookie(ci);
490
355da1eb
SW
491 ceph_queue_caps_release(inode);
492
8b218b8a
SW
493 /*
494 * we may still have a snap_realm reference if there are stray
d9df2783 495 * caps in i_snap_caps.
8b218b8a
SW
496 */
497 if (ci->i_snap_realm) {
498 struct ceph_mds_client *mdsc =
3d14c5d2 499 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
8b218b8a
SW
500 struct ceph_snap_realm *realm = ci->i_snap_realm;
501
502 dout(" dropping residual ref to snap realm %p\n", realm);
503 spin_lock(&realm->inodes_with_caps_lock);
504 list_del_init(&ci->i_snap_realm_item);
505 spin_unlock(&realm->inodes_with_caps_lock);
506 ceph_put_snap_realm(mdsc, realm);
507 }
508
355da1eb
SW
509 kfree(ci->i_symlink);
510 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
511 frag = rb_entry(n, struct ceph_inode_frag, node);
512 rb_erase(n, &ci->i_fragtree);
513 kfree(frag);
514 }
515
516 __ceph_destroy_xattrs(ci);
b6c1d5b8
SW
517 if (ci->i_xattrs.blob)
518 ceph_buffer_put(ci->i_xattrs.blob);
519 if (ci->i_xattrs.prealloc_blob)
520 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
355da1eb 521
fa0d7e3d 522 call_rcu(&inode->i_rcu, ceph_i_callback);
355da1eb
SW
523}
524
9f12bd11
YZ
525int ceph_drop_inode(struct inode *inode)
526{
527 /*
528 * Positve dentry and corresponding inode are always accompanied
529 * in MDS reply. So no need to keep inode in the cache after
530 * dropping all its aliases.
531 */
532 return 1;
533}
534
355da1eb
SW
535/*
536 * Helpers to fill in size, ctime, mtime, and atime. We have to be
537 * careful because either the client or MDS may have more up to date
538 * info, depending on which capabilities are held, and whether
539 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
540 * and size are monotonically increasing, except when utimes() or
541 * truncate() increments the corresponding _seq values.)
542 */
543int ceph_fill_file_size(struct inode *inode, int issued,
544 u32 truncate_seq, u64 truncate_size, u64 size)
545{
546 struct ceph_inode_info *ci = ceph_inode(inode);
547 int queue_trunc = 0;
548
549 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
550 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
551 dout("size %lld -> %llu\n", inode->i_size, size);
a3d714c3
YZ
552 if (size > 0 && S_ISDIR(inode->i_mode)) {
553 pr_err("fill_file_size non-zero size for directory\n");
554 size = 0;
555 }
99c88e69 556 i_size_write(inode, size);
355da1eb
SW
557 inode->i_blocks = (size + (1<<9) - 1) >> 9;
558 ci->i_reported_size = size;
559 if (truncate_seq != ci->i_truncate_seq) {
560 dout("truncate_seq %u -> %u\n",
561 ci->i_truncate_seq, truncate_seq);
562 ci->i_truncate_seq = truncate_seq;
b0d7c223
YZ
563
564 /* the MDS should have revoked these caps */
565 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
566 CEPH_CAP_FILE_RD |
567 CEPH_CAP_FILE_WR |
568 CEPH_CAP_FILE_LAZYIO));
3d497d85
YS
569 /*
570 * If we hold relevant caps, or in the case where we're
571 * not the only client referencing this file and we
572 * don't hold those caps, then we need to check whether
573 * the file is either opened or mmaped
574 */
b0d7c223
YZ
575 if ((issued & (CEPH_CAP_FILE_CACHE|
576 CEPH_CAP_FILE_BUFFER)) ||
3d497d85
YS
577 mapping_mapped(inode->i_mapping) ||
578 __ceph_caps_file_wanted(ci)) {
355da1eb
SW
579 ci->i_truncate_pending++;
580 queue_trunc = 1;
581 }
582 }
583 }
584 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
585 ci->i_truncate_size != truncate_size) {
586 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
587 truncate_size);
588 ci->i_truncate_size = truncate_size;
589 }
99ccbd22
MT
590
591 if (queue_trunc)
592 ceph_fscache_invalidate(inode);
593
355da1eb
SW
594 return queue_trunc;
595}
596
597void ceph_fill_file_time(struct inode *inode, int issued,
598 u64 time_warp_seq, struct timespec *ctime,
599 struct timespec *mtime, struct timespec *atime)
600{
601 struct ceph_inode_info *ci = ceph_inode(inode);
602 int warn = 0;
603
604 if (issued & (CEPH_CAP_FILE_EXCL|
605 CEPH_CAP_FILE_WR|
d8672d64
SW
606 CEPH_CAP_FILE_BUFFER|
607 CEPH_CAP_AUTH_EXCL|
608 CEPH_CAP_XATTR_EXCL)) {
355da1eb
SW
609 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
610 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
611 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
612 ctime->tv_sec, ctime->tv_nsec);
613 inode->i_ctime = *ctime;
614 }
615 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
616 /* the MDS did a utimes() */
617 dout("mtime %ld.%09ld -> %ld.%09ld "
618 "tw %d -> %d\n",
619 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
620 mtime->tv_sec, mtime->tv_nsec,
621 ci->i_time_warp_seq, (int)time_warp_seq);
622
623 inode->i_mtime = *mtime;
624 inode->i_atime = *atime;
625 ci->i_time_warp_seq = time_warp_seq;
626 } else if (time_warp_seq == ci->i_time_warp_seq) {
627 /* nobody did utimes(); take the max */
628 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
629 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
630 inode->i_mtime.tv_sec,
631 inode->i_mtime.tv_nsec,
632 mtime->tv_sec, mtime->tv_nsec);
633 inode->i_mtime = *mtime;
634 }
635 if (timespec_compare(atime, &inode->i_atime) > 0) {
636 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
637 inode->i_atime.tv_sec,
638 inode->i_atime.tv_nsec,
639 atime->tv_sec, atime->tv_nsec);
640 inode->i_atime = *atime;
641 }
642 } else if (issued & CEPH_CAP_FILE_EXCL) {
643 /* we did a utimes(); ignore mds values */
644 } else {
645 warn = 1;
646 }
647 } else {
d8672d64 648 /* we have no write|excl caps; whatever the MDS says is true */
355da1eb
SW
649 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
650 inode->i_ctime = *ctime;
651 inode->i_mtime = *mtime;
652 inode->i_atime = *atime;
653 ci->i_time_warp_seq = time_warp_seq;
654 } else {
655 warn = 1;
656 }
657 }
658 if (warn) /* time_warp_seq shouldn't go backwards */
659 dout("%p mds time_warp_seq %llu < %u\n",
660 inode, time_warp_seq, ci->i_time_warp_seq);
661}
662
663/*
664 * Populate an inode based on info from mds. May be called on new or
665 * existing inodes.
666 */
01deead0 667static int fill_inode(struct inode *inode, struct page *locked_page,
355da1eb
SW
668 struct ceph_mds_reply_info_in *iinfo,
669 struct ceph_mds_reply_dirfrag *dirinfo,
670 struct ceph_mds_session *session,
671 unsigned long ttl_from, int cap_fmode,
672 struct ceph_cap_reservation *caps_reservation)
673{
d9df2783 674 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
355da1eb
SW
675 struct ceph_mds_reply_inode *info = iinfo->in;
676 struct ceph_inode_info *ci = ceph_inode(inode);
f98a128a 677 int issued = 0, implemented, new_issued;
355da1eb 678 struct timespec mtime, atime, ctime;
355da1eb 679 struct ceph_buffer *xattr_blob = NULL;
d9df2783 680 struct ceph_cap *new_cap = NULL;
355da1eb 681 int err = 0;
d9df2783 682 bool wake = false;
f98a128a
YZ
683 bool queue_trunc = false;
684 bool new_version = false;
31c542a1 685 bool fill_inline = false;
355da1eb
SW
686
687 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
688 inode, ceph_vinop(inode), le64_to_cpu(info->version),
689 ci->i_version);
690
d9df2783
YZ
691 /* prealloc new cap struct */
692 if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP)
693 new_cap = ceph_get_cap(mdsc, caps_reservation);
694
355da1eb
SW
695 /*
696 * prealloc xattr data, if it looks like we'll need it. only
697 * if len > 4 (meaning there are actually xattrs; the first 4
698 * bytes are the xattr count).
699 */
700 if (iinfo->xattr_len > 4) {
b6c1d5b8 701 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
355da1eb
SW
702 if (!xattr_blob)
703 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
704 iinfo->xattr_len);
705 }
706
be655596 707 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
708
709 /*
710 * provided version will be odd if inode value is projected,
8bd59e01
SW
711 * even if stable. skip the update if we have newer stable
712 * info (ours>=theirs, e.g. due to racing mds replies), unless
713 * we are getting projected (unstable) info (in which case the
714 * version is odd, and we want ours>theirs).
715 * us them
716 * 2 2 skip
717 * 3 2 skip
718 * 3 3 update
355da1eb 719 */
f98a128a
YZ
720 if (ci->i_version == 0 ||
721 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
722 le64_to_cpu(info->version) > (ci->i_version & ~1)))
723 new_version = true;
724
355da1eb
SW
725 issued = __ceph_caps_issued(ci, &implemented);
726 issued |= implemented | __ceph_caps_dirty(ci);
f98a128a 727 new_issued = ~issued & le32_to_cpu(info->cap.caps);
355da1eb
SW
728
729 /* update inode */
730 ci->i_version = le64_to_cpu(info->version);
731 inode->i_version++;
732 inode->i_rdev = le32_to_cpu(info->rdev);
f98a128a 733 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
355da1eb 734
f98a128a
YZ
735 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
736 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
355da1eb 737 inode->i_mode = le32_to_cpu(info->mode);
ab871b90
EB
738 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
739 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
355da1eb 740 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
bd2bae6a
EB
741 from_kuid(&init_user_ns, inode->i_uid),
742 from_kgid(&init_user_ns, inode->i_gid));
355da1eb
SW
743 }
744
f98a128a
YZ
745 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
746 (issued & CEPH_CAP_LINK_EXCL) == 0)
bfe86848 747 set_nlink(inode, le32_to_cpu(info->nlink));
355da1eb 748
f98a128a
YZ
749 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
750 /* be careful with mtime, atime, size */
751 ceph_decode_timespec(&atime, &info->atime);
752 ceph_decode_timespec(&mtime, &info->mtime);
753 ceph_decode_timespec(&ctime, &info->ctime);
754 ceph_fill_file_time(inode, issued,
755 le32_to_cpu(info->time_warp_seq),
756 &ctime, &mtime, &atime);
757 }
758
759 if (new_version ||
760 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
10183a69
YZ
761 if (ci->i_layout.fl_pg_pool != info->layout.fl_pg_pool)
762 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
f98a128a 763 ci->i_layout = info->layout;
5ea5c5e0 764 ci->i_pool_ns_len = iinfo->pool_ns_len;
10183a69 765
f98a128a
YZ
766 queue_trunc = ceph_fill_file_size(inode, issued,
767 le32_to_cpu(info->truncate_seq),
768 le64_to_cpu(info->truncate_size),
769 le64_to_cpu(info->size));
770 /* only update max_size on auth cap */
771 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
772 ci->i_max_size != le64_to_cpu(info->max_size)) {
773 dout("max_size %lld -> %llu\n", ci->i_max_size,
774 le64_to_cpu(info->max_size));
775 ci->i_max_size = le64_to_cpu(info->max_size);
776 }
777 }
355da1eb
SW
778
779 /* xattrs */
780 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
508b32d8 781 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
355da1eb
SW
782 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
783 if (ci->i_xattrs.blob)
784 ceph_buffer_put(ci->i_xattrs.blob);
785 ci->i_xattrs.blob = xattr_blob;
786 if (xattr_blob)
787 memcpy(ci->i_xattrs.blob->vec.iov_base,
788 iinfo->xattr_data, iinfo->xattr_len);
789 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
7221fe4c 790 ceph_forget_all_cached_acls(inode);
a6424e48 791 xattr_blob = NULL;
355da1eb
SW
792 }
793
794 inode->i_mapping->a_ops = &ceph_aops;
355da1eb
SW
795
796 switch (inode->i_mode & S_IFMT) {
797 case S_IFIFO:
798 case S_IFBLK:
799 case S_IFCHR:
800 case S_IFSOCK:
801 init_special_inode(inode, inode->i_mode, inode->i_rdev);
802 inode->i_op = &ceph_file_iops;
803 break;
804 case S_IFREG:
805 inode->i_op = &ceph_file_iops;
806 inode->i_fop = &ceph_file_fops;
807 break;
808 case S_IFLNK:
809 inode->i_op = &ceph_symlink_iops;
810 if (!ci->i_symlink) {
810339ec 811 u32 symlen = iinfo->symlink_len;
355da1eb
SW
812 char *sym;
813
be655596 814 spin_unlock(&ci->i_ceph_lock);
355da1eb 815
810339ec 816 err = -EINVAL;
99c88e69 817 if (WARN_ON(symlen != i_size_read(inode)))
810339ec
XW
818 goto out;
819
355da1eb 820 err = -ENOMEM;
810339ec 821 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
355da1eb
SW
822 if (!sym)
823 goto out;
355da1eb 824
be655596 825 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
826 if (!ci->i_symlink)
827 ci->i_symlink = sym;
828 else
829 kfree(sym); /* lost a race */
830 }
ac194dcc 831 inode->i_link = ci->i_symlink;
355da1eb
SW
832 break;
833 case S_IFDIR:
834 inode->i_op = &ceph_dir_iops;
835 inode->i_fop = &ceph_dir_fops;
836
14303d20
SW
837 ci->i_dir_layout = iinfo->dir_layout;
838
355da1eb
SW
839 ci->i_files = le64_to_cpu(info->files);
840 ci->i_subdirs = le64_to_cpu(info->subdirs);
841 ci->i_rbytes = le64_to_cpu(info->rbytes);
842 ci->i_rfiles = le64_to_cpu(info->rfiles);
843 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
844 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
355da1eb
SW
845 break;
846 default:
847 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
848 ceph_vinop(inode), inode->i_mode);
849 }
850
355da1eb
SW
851 /* were we issued a capability? */
852 if (info->cap.caps) {
853 if (ceph_snap(inode) == CEPH_NOSNAP) {
2f92b3d0 854 unsigned caps = le32_to_cpu(info->cap.caps);
355da1eb
SW
855 ceph_add_cap(inode, session,
856 le64_to_cpu(info->cap.cap_id),
2f92b3d0 857 cap_fmode, caps,
355da1eb
SW
858 le32_to_cpu(info->cap.wanted),
859 le32_to_cpu(info->cap.seq),
860 le32_to_cpu(info->cap.mseq),
861 le64_to_cpu(info->cap.realm),
d9df2783 862 info->cap.flags, &new_cap);
2f92b3d0
YZ
863
864 /* set dir completion flag? */
865 if (S_ISDIR(inode->i_mode) &&
866 ci->i_files == 0 && ci->i_subdirs == 0 &&
867 (caps & CEPH_CAP_FILE_SHARED) &&
868 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
869 !__ceph_dir_is_complete(ci)) {
870 dout(" marking %p complete (empty)\n", inode);
fdd4e158 871 i_size_write(inode, 0);
2f92b3d0 872 __ceph_dir_set_complete(ci,
fdd4e158
YZ
873 atomic64_read(&ci->i_release_count),
874 atomic64_read(&ci->i_ordered_count));
2f92b3d0
YZ
875 }
876
d9df2783 877 wake = true;
355da1eb 878 } else {
355da1eb
SW
879 dout(" %p got snap_caps %s\n", inode,
880 ceph_cap_string(le32_to_cpu(info->cap.caps)));
881 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
882 if (cap_fmode >= 0)
883 __ceph_get_fmode(ci, cap_fmode);
355da1eb 884 }
04d000eb 885 } else if (cap_fmode >= 0) {
f3ae1b97 886 pr_warn("mds issued no caps on %llx.%llx\n",
04d000eb
SW
887 ceph_vinop(inode));
888 __ceph_get_fmode(ci, cap_fmode);
355da1eb 889 }
31c542a1
YZ
890
891 if (iinfo->inline_version > 0 &&
892 iinfo->inline_version >= ci->i_inline_version) {
893 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
894 ci->i_inline_version = iinfo->inline_version;
895 if (ci->i_inline_version != CEPH_INLINE_NONE &&
01deead0
YZ
896 (locked_page ||
897 (le32_to_cpu(info->cap.caps) & cache_caps)))
31c542a1
YZ
898 fill_inline = true;
899 }
900
be655596 901 spin_unlock(&ci->i_ceph_lock);
355da1eb 902
31c542a1 903 if (fill_inline)
01deead0 904 ceph_fill_inline_data(inode, locked_page,
31c542a1
YZ
905 iinfo->inline_data, iinfo->inline_len);
906
d9df2783
YZ
907 if (wake)
908 wake_up_all(&ci->i_cap_wq);
909
355da1eb
SW
910 /* queue truncate if we saw i_size decrease */
911 if (queue_trunc)
3c6f6b79 912 ceph_queue_vmtruncate(inode);
355da1eb
SW
913
914 /* populate frag tree */
3e7fbe9c
YZ
915 if (S_ISDIR(inode->i_mode))
916 ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
355da1eb
SW
917
918 /* update delegation info? */
919 if (dirinfo)
920 ceph_fill_dirfrag(inode, dirinfo);
921
922 err = 0;
355da1eb 923out:
d9df2783
YZ
924 if (new_cap)
925 ceph_put_cap(mdsc, new_cap);
b6c1d5b8
SW
926 if (xattr_blob)
927 ceph_buffer_put(xattr_blob);
355da1eb
SW
928 return err;
929}
930
931/*
932 * caller should hold session s_mutex.
933 */
934static void update_dentry_lease(struct dentry *dentry,
935 struct ceph_mds_reply_lease *lease,
936 struct ceph_mds_session *session,
937 unsigned long from_time)
938{
939 struct ceph_dentry_info *di = ceph_dentry(dentry);
940 long unsigned duration = le32_to_cpu(lease->duration_ms);
941 long unsigned ttl = from_time + (duration * HZ) / 1000;
942 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
943 struct inode *dir;
944
945 /* only track leases on regular dentries */
946 if (dentry->d_op != &ceph_dentry_ops)
947 return;
948
949 spin_lock(&dentry->d_lock);
2f90b852
SW
950 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
951 dentry, duration, ttl);
355da1eb
SW
952
953 /* make lease_rdcache_gen match directory */
2b0143b5 954 dir = d_inode(dentry->d_parent);
355da1eb
SW
955 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
956
2f90b852 957 if (duration == 0)
355da1eb
SW
958 goto out_unlock;
959
960 if (di->lease_gen == session->s_cap_gen &&
961 time_before(ttl, dentry->d_time))
962 goto out_unlock; /* we already have a newer lease. */
963
964 if (di->lease_session && di->lease_session != session)
965 goto out_unlock;
966
967 ceph_dentry_lru_touch(dentry);
968
969 if (!di->lease_session)
970 di->lease_session = ceph_get_mds_session(session);
971 di->lease_gen = session->s_cap_gen;
972 di->lease_seq = le32_to_cpu(lease->seq);
973 di->lease_renew_after = half_ttl;
974 di->lease_renew_from = 0;
975 dentry->d_time = ttl;
976out_unlock:
977 spin_unlock(&dentry->d_lock);
978 return;
979}
980
981/*
982 * splice a dentry to an inode.
983 * caller must hold directory i_mutex for this to be safe.
355da1eb 984 */
f7380af0 985static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
355da1eb
SW
986{
987 struct dentry *realdn;
988
2b0143b5 989 BUG_ON(d_inode(dn));
1cd3935b 990
355da1eb
SW
991 /* dn must be unhashed */
992 if (!d_unhashed(dn))
993 d_drop(dn);
41d28bca 994 realdn = d_splice_alias(in, dn);
355da1eb 995 if (IS_ERR(realdn)) {
d69ed05a
SW
996 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
997 PTR_ERR(realdn), dn, in, ceph_vinop(in));
355da1eb
SW
998 dn = realdn; /* note realdn contains the error */
999 goto out;
1000 } else if (realdn) {
1001 dout("dn %p (%d) spliced with %p (%d) "
1002 "inode %p ino %llx.%llx\n",
84d08fa8
AV
1003 dn, d_count(dn),
1004 realdn, d_count(realdn),
2b0143b5 1005 d_inode(realdn), ceph_vinop(d_inode(realdn)));
355da1eb
SW
1006 dput(dn);
1007 dn = realdn;
1008 } else {
1009 BUG_ON(!ceph_dentry(dn));
355da1eb 1010 dout("dn %p attached to %p ino %llx.%llx\n",
2b0143b5 1011 dn, d_inode(dn), ceph_vinop(d_inode(dn)));
355da1eb 1012 }
355da1eb
SW
1013out:
1014 return dn;
1015}
1016
1017/*
1018 * Incorporate results into the local cache. This is either just
1019 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1020 * after a lookup).
1021 *
1022 * A reply may contain
1023 * a directory inode along with a dentry.
1024 * and/or a target inode
1025 *
1026 * Called with snap_rwsem (read).
1027 */
1028int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1029 struct ceph_mds_session *session)
1030{
1031 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1032 struct inode *in = NULL;
355da1eb 1033 struct ceph_vino vino;
3d14c5d2 1034 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
355da1eb
SW
1035 int err = 0;
1036
1037 dout("fill_trace %p is_dentry %d is_target %d\n", req,
1038 rinfo->head->is_dentry, rinfo->head->is_target);
1039
1040#if 0
1041 /*
1042 * Debugging hook:
1043 *
1044 * If we resend completed ops to a recovering mds, we get no
1045 * trace. Since that is very rare, pretend this is the case
1046 * to ensure the 'no trace' handlers in the callers behave.
1047 *
1048 * Fill in inodes unconditionally to avoid breaking cap
1049 * invariants.
1050 */
1051 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
1052 pr_info("fill_trace faking empty trace on %lld %s\n",
1053 req->r_tid, ceph_mds_op_name(rinfo->head->op));
1054 if (rinfo->head->is_dentry) {
1055 rinfo->head->is_dentry = 0;
1056 err = fill_inode(req->r_locked_dir,
1057 &rinfo->diri, rinfo->dirfrag,
1058 session, req->r_request_started, -1);
1059 }
1060 if (rinfo->head->is_target) {
1061 rinfo->head->is_target = 0;
1062 ininfo = rinfo->targeti.in;
1063 vino.ino = le64_to_cpu(ininfo->ino);
1064 vino.snap = le64_to_cpu(ininfo->snapid);
1065 in = ceph_get_inode(sb, vino);
1066 err = fill_inode(in, &rinfo->targeti, NULL,
1067 session, req->r_request_started,
1068 req->r_fmode);
1069 iput(in);
1070 }
1071 }
1072#endif
1073
1074 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1075 dout("fill_trace reply is empty!\n");
167c9e35
SW
1076 if (rinfo->head->result == 0 && req->r_locked_dir)
1077 ceph_invalidate_dir_request(req);
355da1eb
SW
1078 return 0;
1079 }
1080
1081 if (rinfo->head->is_dentry) {
5b1daecd
SW
1082 struct inode *dir = req->r_locked_dir;
1083
6c5e50fa 1084 if (dir) {
01deead0
YZ
1085 err = fill_inode(dir, NULL,
1086 &rinfo->diri, rinfo->dirfrag,
6c5e50fa
SW
1087 session, req->r_request_started, -1,
1088 &req->r_caps_reservation);
1089 if (err < 0)
19913b4e 1090 goto done;
6c5e50fa
SW
1091 } else {
1092 WARN_ON_ONCE(1);
1093 }
19913b4e
YZ
1094
1095 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) {
1096 struct qstr dname;
1097 struct dentry *dn, *parent;
1098
1099 BUG_ON(!rinfo->head->is_target);
1100 BUG_ON(req->r_dentry);
1101
1102 parent = d_find_any_alias(dir);
1103 BUG_ON(!parent);
1104
1105 dname.name = rinfo->dname;
1106 dname.len = rinfo->dname_len;
1107 dname.hash = full_name_hash(dname.name, dname.len);
1108 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1109 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1110retry_lookup:
1111 dn = d_lookup(parent, &dname);
1112 dout("d_lookup on parent=%p name=%.*s got %p\n",
1113 parent, dname.len, dname.name, dn);
1114
1115 if (!dn) {
1116 dn = d_alloc(parent, &dname);
1117 dout("d_alloc %p '%.*s' = %p\n", parent,
1118 dname.len, dname.name, dn);
1119 if (dn == NULL) {
1120 dput(parent);
1121 err = -ENOMEM;
1122 goto done;
1123 }
1124 err = ceph_init_dentry(dn);
1125 if (err < 0) {
1126 dput(dn);
1127 dput(parent);
1128 goto done;
1129 }
2b0143b5
DH
1130 } else if (d_really_is_positive(dn) &&
1131 (ceph_ino(d_inode(dn)) != vino.ino ||
1132 ceph_snap(d_inode(dn)) != vino.snap)) {
19913b4e 1133 dout(" dn %p points to wrong inode %p\n",
2b0143b5 1134 dn, d_inode(dn));
19913b4e
YZ
1135 d_delete(dn);
1136 dput(dn);
1137 goto retry_lookup;
1138 }
1139
1140 req->r_dentry = dn;
1141 dput(parent);
1142 }
5b1daecd
SW
1143 }
1144
86b58d13
YZ
1145 if (rinfo->head->is_target) {
1146 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1147 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1148
1149 in = ceph_get_inode(sb, vino);
1150 if (IS_ERR(in)) {
1151 err = PTR_ERR(in);
1152 goto done;
1153 }
1154 req->r_target_inode = in;
1155
01deead0 1156 err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL,
86b58d13 1157 session, req->r_request_started,
48193012 1158 (!req->r_aborted && rinfo->head->result == 0) ?
86b58d13
YZ
1159 req->r_fmode : -1,
1160 &req->r_caps_reservation);
1161 if (err < 0) {
1162 pr_err("fill_inode badness %p %llx.%llx\n",
1163 in, ceph_vinop(in));
1164 goto done;
1165 }
1166 }
1167
9358c6d4
SW
1168 /*
1169 * ignore null lease/binding on snapdir ENOENT, or else we
1170 * will have trouble splicing in the virtual snapdir later
1171 */
1172 if (rinfo->head->is_dentry && !req->r_aborted &&
6c5e50fa 1173 req->r_locked_dir &&
9358c6d4 1174 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
3d14c5d2 1175 fsc->mount_options->snapdir_name,
9358c6d4 1176 req->r_dentry->d_name.len))) {
355da1eb
SW
1177 /*
1178 * lookup link rename : null -> possibly existing inode
1179 * mknod symlink mkdir : null -> new inode
1180 * unlink : linked -> null
1181 */
1182 struct inode *dir = req->r_locked_dir;
1183 struct dentry *dn = req->r_dentry;
1184 bool have_dir_cap, have_lease;
1185
1186 BUG_ON(!dn);
1187 BUG_ON(!dir);
2b0143b5 1188 BUG_ON(d_inode(dn->d_parent) != dir);
355da1eb
SW
1189 BUG_ON(ceph_ino(dir) !=
1190 le64_to_cpu(rinfo->diri.in->ino));
1191 BUG_ON(ceph_snap(dir) !=
1192 le64_to_cpu(rinfo->diri.in->snapid));
1193
355da1eb
SW
1194 /* do we have a lease on the whole dir? */
1195 have_dir_cap =
1196 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1197 CEPH_CAP_FILE_SHARED);
1198
1199 /* do we have a dn lease? */
1200 have_lease = have_dir_cap ||
2f90b852 1201 le32_to_cpu(rinfo->dlease->duration_ms);
355da1eb
SW
1202 if (!have_lease)
1203 dout("fill_trace no dentry lease or dir cap\n");
1204
1205 /* rename? */
1206 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
0a8a70f9
YZ
1207 struct inode *olddir = req->r_old_dentry_dir;
1208 BUG_ON(!olddir);
1209
a455589f 1210 dout(" src %p '%pd' dst %p '%pd'\n",
355da1eb 1211 req->r_old_dentry,
a455589f
AV
1212 req->r_old_dentry,
1213 dn, dn);
355da1eb
SW
1214 dout("fill_trace doing d_move %p -> %p\n",
1215 req->r_old_dentry, dn);
c10f5e12 1216
fdd4e158
YZ
1217 /* d_move screws up sibling dentries' offsets */
1218 ceph_dir_clear_ordered(dir);
1219 ceph_dir_clear_ordered(olddir);
1220
355da1eb 1221 d_move(req->r_old_dentry, dn);
a455589f
AV
1222 dout(" src %p '%pd' dst %p '%pd'\n",
1223 req->r_old_dentry,
355da1eb 1224 req->r_old_dentry,
a455589f 1225 dn, dn);
81a6cf2d 1226
c4a29f26
SW
1227 /* ensure target dentry is invalidated, despite
1228 rehashing bug in vfs_rename_dir */
81a6cf2d
SW
1229 ceph_invalidate_dentry_lease(dn);
1230
99ccbd22 1231 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1cd3935b 1232 ceph_dentry(req->r_old_dentry)->offset);
81a6cf2d 1233
355da1eb 1234 dn = req->r_old_dentry; /* use old_dentry */
355da1eb
SW
1235 }
1236
1237 /* null dentry? */
1238 if (!rinfo->head->is_target) {
1239 dout("fill_trace null dentry\n");
2b0143b5 1240 if (d_really_is_positive(dn)) {
70db4f36 1241 ceph_dir_clear_ordered(dir);
355da1eb
SW
1242 dout("d_delete %p\n", dn);
1243 d_delete(dn);
1244 } else {
355da1eb 1245 if (have_lease && d_unhashed(dn))
f8b31710 1246 d_add(dn, NULL);
355da1eb
SW
1247 update_dentry_lease(dn, rinfo->dlease,
1248 session,
1249 req->r_request_started);
1250 }
1251 goto done;
1252 }
1253
1254 /* attach proper inode */
2b0143b5 1255 if (d_really_is_negative(dn)) {
70db4f36 1256 ceph_dir_clear_ordered(dir);
86b58d13 1257 ihold(in);
f7380af0 1258 dn = splice_dentry(dn, in);
355da1eb
SW
1259 if (IS_ERR(dn)) {
1260 err = PTR_ERR(dn);
1261 goto done;
1262 }
1263 req->r_dentry = dn; /* may have spliced */
2b0143b5 1264 } else if (d_really_is_positive(dn) && d_inode(dn) != in) {
355da1eb 1265 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
2b0143b5 1266 dn, d_inode(dn), ceph_vinop(d_inode(dn)),
86b58d13 1267 ceph_vinop(in));
355da1eb 1268 have_lease = false;
355da1eb
SW
1269 }
1270
1271 if (have_lease)
1272 update_dentry_lease(dn, rinfo->dlease, session,
1273 req->r_request_started);
1274 dout(" final dn %p\n", dn);
86b58d13
YZ
1275 } else if (!req->r_aborted &&
1276 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1277 req->r_op == CEPH_MDS_OP_MKSNAP)) {
355da1eb 1278 struct dentry *dn = req->r_dentry;
0a8a70f9 1279 struct inode *dir = req->r_locked_dir;
355da1eb
SW
1280
1281 /* fill out a snapdir LOOKUPSNAP dentry */
1282 BUG_ON(!dn);
0a8a70f9
YZ
1283 BUG_ON(!dir);
1284 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
355da1eb 1285 dout(" linking snapped dir %p to dn %p\n", in, dn);
70db4f36 1286 ceph_dir_clear_ordered(dir);
86b58d13 1287 ihold(in);
f7380af0 1288 dn = splice_dentry(dn, in);
355da1eb
SW
1289 if (IS_ERR(dn)) {
1290 err = PTR_ERR(dn);
1291 goto done;
1292 }
1293 req->r_dentry = dn; /* may have spliced */
355da1eb 1294 }
355da1eb
SW
1295done:
1296 dout("fill_trace done err=%d\n", err);
1297 return err;
1298}
1299
1300/*
1301 * Prepopulate our cache with readdir results, leases, etc.
1302 */
79f9f99a
SW
1303static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1304 struct ceph_mds_session *session)
1305{
1306 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1307 int i, err = 0;
1308
1309 for (i = 0; i < rinfo->dir_nr; i++) {
1310 struct ceph_vino vino;
1311 struct inode *in;
1312 int rc;
1313
1314 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1315 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1316
1317 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1318 if (IS_ERR(in)) {
1319 err = PTR_ERR(in);
1320 dout("new_inode badness got %d\n", err);
1321 continue;
1322 }
01deead0 1323 rc = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
79f9f99a
SW
1324 req->r_request_started, -1,
1325 &req->r_caps_reservation);
1326 if (rc < 0) {
1327 pr_err("fill_inode badness on %p got %d\n", in, rc);
1328 err = rc;
1329 continue;
1330 }
1331 }
1332
1333 return err;
1334}
1335
fdd4e158
YZ
1336void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1337{
1338 if (ctl->page) {
1339 kunmap(ctl->page);
1340 page_cache_release(ctl->page);
1341 ctl->page = NULL;
1342 }
1343}
1344
1345static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1346 struct ceph_readdir_cache_control *ctl,
1347 struct ceph_mds_request *req)
1348{
1349 struct ceph_inode_info *ci = ceph_inode(dir);
1350 unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry*);
1351 unsigned idx = ctl->index % nsize;
1352 pgoff_t pgoff = ctl->index / nsize;
1353
1354 if (!ctl->page || pgoff != page_index(ctl->page)) {
1355 ceph_readdir_cache_release(ctl);
af5e5eb5
YZ
1356 if (idx == 0)
1357 ctl->page = grab_cache_page(&dir->i_data, pgoff);
1358 else
1359 ctl->page = find_lock_page(&dir->i_data, pgoff);
fdd4e158
YZ
1360 if (!ctl->page) {
1361 ctl->index = -1;
af5e5eb5 1362 return idx == 0 ? -ENOMEM : 0;
fdd4e158
YZ
1363 }
1364 /* reading/filling the cache are serialized by
1365 * i_mutex, no need to use page lock */
1366 unlock_page(ctl->page);
1367 ctl->dentries = kmap(ctl->page);
af5e5eb5
YZ
1368 if (idx == 0)
1369 memset(ctl->dentries, 0, PAGE_CACHE_SIZE);
fdd4e158
YZ
1370 }
1371
1372 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1373 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1374 dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1375 ctl->dentries[idx] = dn;
1376 ctl->index++;
1377 } else {
1378 dout("disable readdir cache\n");
1379 ctl->index = -1;
1380 }
1381 return 0;
1382}
1383
355da1eb
SW
1384int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1385 struct ceph_mds_session *session)
1386{
1387 struct dentry *parent = req->r_dentry;
1388 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1389 struct qstr dname;
1390 struct dentry *dn;
1391 struct inode *in;
86b58d13 1392 int err = 0, ret, i;
355da1eb
SW
1393 struct inode *snapdir = NULL;
1394 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
355da1eb 1395 struct ceph_dentry_info *di;
81c6aea5 1396 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
fdd4e158
YZ
1397 struct ceph_readdir_cache_control cache_ctl = {};
1398
1399 if (req->r_aborted)
1400 return readdir_prepopulate_inodes_only(req, session);
81c6aea5
YZ
1401
1402 if (rinfo->dir_dir &&
1403 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1404 dout("readdir_prepopulate got new frag %x -> %x\n",
1405 frag, le32_to_cpu(rinfo->dir_dir->frag));
1406 frag = le32_to_cpu(rinfo->dir_dir->frag);
1407 if (ceph_frag_is_leftmost(frag))
fdd4e158 1408 req->r_readdir_offset = 2;
81c6aea5 1409 else
fdd4e158 1410 req->r_readdir_offset = 0;
81c6aea5 1411 }
355da1eb
SW
1412
1413 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
2b0143b5 1414 snapdir = ceph_get_snapdir(d_inode(parent));
355da1eb
SW
1415 parent = d_find_alias(snapdir);
1416 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1417 rinfo->dir_nr, parent);
1418 } else {
1419 dout("readdir_prepopulate %d items under dn %p\n",
1420 rinfo->dir_nr, parent);
1421 if (rinfo->dir_dir)
2b0143b5 1422 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
355da1eb
SW
1423 }
1424
fdd4e158
YZ
1425 if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) {
1426 /* note dir version at start of readdir so we can tell
1427 * if any dentries get dropped */
1428 struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1429 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
1430 req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count);
1431 req->r_readdir_cache_idx = 0;
1432 }
1433
1434 cache_ctl.index = req->r_readdir_cache_idx;
1435
86b58d13 1436 /* FIXME: release caps/leases if error occurs */
355da1eb
SW
1437 for (i = 0; i < rinfo->dir_nr; i++) {
1438 struct ceph_vino vino;
1439
1440 dname.name = rinfo->dir_dname[i];
1441 dname.len = rinfo->dir_dname_len[i];
1442 dname.hash = full_name_hash(dname.name, dname.len);
1443
1444 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1445 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1446
1447retry_lookup:
1448 dn = d_lookup(parent, &dname);
1449 dout("d_lookup on parent=%p name=%.*s got %p\n",
1450 parent, dname.len, dname.name, dn);
1451
1452 if (!dn) {
1453 dn = d_alloc(parent, &dname);
1454 dout("d_alloc %p '%.*s' = %p\n", parent,
1455 dname.len, dname.name, dn);
1456 if (dn == NULL) {
1457 dout("d_alloc badness\n");
1458 err = -ENOMEM;
1459 goto out;
1460 }
86b58d13
YZ
1461 ret = ceph_init_dentry(dn);
1462 if (ret < 0) {
8c696737 1463 dput(dn);
86b58d13 1464 err = ret;
355da1eb 1465 goto out;
8c696737 1466 }
2b0143b5
DH
1467 } else if (d_really_is_positive(dn) &&
1468 (ceph_ino(d_inode(dn)) != vino.ino ||
1469 ceph_snap(d_inode(dn)) != vino.snap)) {
355da1eb 1470 dout(" dn %p points to wrong inode %p\n",
2b0143b5 1471 dn, d_inode(dn));
355da1eb
SW
1472 d_delete(dn);
1473 dput(dn);
1474 goto retry_lookup;
355da1eb
SW
1475 }
1476
355da1eb 1477 /* inode */
2b0143b5
DH
1478 if (d_really_is_positive(dn)) {
1479 in = d_inode(dn);
355da1eb
SW
1480 } else {
1481 in = ceph_get_inode(parent->d_sb, vino);
ac1f12ef 1482 if (IS_ERR(in)) {
355da1eb 1483 dout("new_inode badness\n");
2744c171 1484 d_drop(dn);
355da1eb 1485 dput(dn);
ac1f12ef 1486 err = PTR_ERR(in);
355da1eb
SW
1487 goto out;
1488 }
355da1eb
SW
1489 }
1490
fdd4e158
YZ
1491 ret = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
1492 req->r_request_started, -1,
1493 &req->r_caps_reservation);
1494 if (ret < 0) {
355da1eb 1495 pr_err("fill_inode badness on %p\n", in);
2b0143b5 1496 if (d_really_is_negative(dn))
86b58d13
YZ
1497 iput(in);
1498 d_drop(dn);
fdd4e158 1499 err = ret;
d69ed05a 1500 goto next_item;
355da1eb 1501 }
86b58d13 1502
2b0143b5 1503 if (d_really_is_negative(dn)) {
f7380af0 1504 struct dentry *realdn = splice_dentry(dn, in);
5cba372c
YZ
1505 if (IS_ERR(realdn)) {
1506 err = PTR_ERR(realdn);
1507 d_drop(dn);
86b58d13
YZ
1508 dn = NULL;
1509 goto next_item;
1510 }
5cba372c 1511 dn = realdn;
86b58d13
YZ
1512 }
1513
1514 di = dn->d_fsdata;
fdd4e158 1515 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
86b58d13
YZ
1516
1517 update_dentry_lease(dn, rinfo->dir_dlease[i],
1518 req->r_session,
1519 req->r_request_started);
fdd4e158
YZ
1520
1521 if (err == 0 && cache_ctl.index >= 0) {
1522 ret = fill_readdir_cache(d_inode(parent), dn,
1523 &cache_ctl, req);
1524 if (ret < 0)
1525 err = ret;
1526 }
d69ed05a
SW
1527next_item:
1528 if (dn)
1529 dput(dn);
355da1eb 1530 }
355da1eb 1531out:
fdd4e158
YZ
1532 if (err == 0) {
1533 req->r_did_prepopulate = true;
1534 req->r_readdir_cache_idx = cache_ctl.index;
1535 }
1536 ceph_readdir_cache_release(&cache_ctl);
355da1eb
SW
1537 if (snapdir) {
1538 iput(snapdir);
1539 dput(parent);
1540 }
1541 dout("readdir_prepopulate done\n");
1542 return err;
1543}
1544
1545int ceph_inode_set_size(struct inode *inode, loff_t size)
1546{
1547 struct ceph_inode_info *ci = ceph_inode(inode);
1548 int ret = 0;
1549
be655596 1550 spin_lock(&ci->i_ceph_lock);
355da1eb 1551 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
99c88e69 1552 i_size_write(inode, size);
355da1eb
SW
1553 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1554
1555 /* tell the MDS if we are approaching max_size */
1556 if ((size << 1) >= ci->i_max_size &&
1557 (ci->i_reported_size << 1) < ci->i_max_size)
1558 ret = 1;
1559
be655596 1560 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1561 return ret;
1562}
1563
1564/*
1565 * Write back inode data in a worker thread. (This can't be done
1566 * in the message handler context.)
1567 */
3c6f6b79
SW
1568void ceph_queue_writeback(struct inode *inode)
1569{
15a2015f 1570 ihold(inode);
3c6f6b79
SW
1571 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1572 &ceph_inode(inode)->i_wb_work)) {
2c27c9a5 1573 dout("ceph_queue_writeback %p\n", inode);
3c6f6b79 1574 } else {
2c27c9a5 1575 dout("ceph_queue_writeback %p failed\n", inode);
15a2015f 1576 iput(inode);
3c6f6b79
SW
1577 }
1578}
1579
1580static void ceph_writeback_work(struct work_struct *work)
355da1eb
SW
1581{
1582 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1583 i_wb_work);
1584 struct inode *inode = &ci->vfs_inode;
1585
1586 dout("writeback %p\n", inode);
1587 filemap_fdatawrite(&inode->i_data);
1588 iput(inode);
1589}
1590
3c6f6b79
SW
1591/*
1592 * queue an async invalidation
1593 */
1594void ceph_queue_invalidate(struct inode *inode)
1595{
15a2015f 1596 ihold(inode);
3c6f6b79
SW
1597 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1598 &ceph_inode(inode)->i_pg_inv_work)) {
1599 dout("ceph_queue_invalidate %p\n", inode);
3c6f6b79
SW
1600 } else {
1601 dout("ceph_queue_invalidate %p failed\n", inode);
15a2015f 1602 iput(inode);
3c6f6b79
SW
1603 }
1604}
1605
355da1eb
SW
1606/*
1607 * Invalidate inode pages in a worker thread. (This can't be done
1608 * in the message handler context.)
1609 */
3c6f6b79 1610static void ceph_invalidate_work(struct work_struct *work)
355da1eb
SW
1611{
1612 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1613 i_pg_inv_work);
1614 struct inode *inode = &ci->vfs_inode;
1615 u32 orig_gen;
1616 int check = 0;
1617
b0d7c223 1618 mutex_lock(&ci->i_truncate_mutex);
be655596 1619 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1620 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1621 ci->i_rdcache_gen, ci->i_rdcache_revoking);
cd045cb4 1622 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
9563f88c
YZ
1623 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1624 check = 1;
be655596 1625 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1626 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1627 goto out;
1628 }
1629 orig_gen = ci->i_rdcache_gen;
be655596 1630 spin_unlock(&ci->i_ceph_lock);
355da1eb 1631
4e217b5d 1632 truncate_pagecache(inode, 0);
355da1eb 1633
be655596 1634 spin_lock(&ci->i_ceph_lock);
cd045cb4
SW
1635 if (orig_gen == ci->i_rdcache_gen &&
1636 orig_gen == ci->i_rdcache_revoking) {
355da1eb
SW
1637 dout("invalidate_pages %p gen %d successful\n", inode,
1638 ci->i_rdcache_gen);
cd045cb4 1639 ci->i_rdcache_revoking--;
355da1eb
SW
1640 check = 1;
1641 } else {
cd045cb4
SW
1642 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1643 inode, orig_gen, ci->i_rdcache_gen,
1644 ci->i_rdcache_revoking);
9563f88c
YZ
1645 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1646 check = 1;
355da1eb 1647 }
be655596 1648 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1649 mutex_unlock(&ci->i_truncate_mutex);
9563f88c 1650out:
355da1eb
SW
1651 if (check)
1652 ceph_check_caps(ci, 0, NULL);
355da1eb
SW
1653 iput(inode);
1654}
1655
1656
1657/*
3f99969f 1658 * called by trunc_wq;
355da1eb
SW
1659 *
1660 * We also truncate in a separate thread as well.
1661 */
3c6f6b79 1662static void ceph_vmtruncate_work(struct work_struct *work)
355da1eb
SW
1663{
1664 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1665 i_vmtruncate_work);
1666 struct inode *inode = &ci->vfs_inode;
1667
1668 dout("vmtruncate_work %p\n", inode);
b415bf4f 1669 __ceph_do_pending_vmtruncate(inode);
355da1eb
SW
1670 iput(inode);
1671}
1672
3c6f6b79
SW
1673/*
1674 * Queue an async vmtruncate. If we fail to queue work, we will handle
1675 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1676 */
1677void ceph_queue_vmtruncate(struct inode *inode)
1678{
1679 struct ceph_inode_info *ci = ceph_inode(inode);
1680
15a2015f 1681 ihold(inode);
99ccbd22 1682
640ef79d 1683 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
3c6f6b79
SW
1684 &ci->i_vmtruncate_work)) {
1685 dout("ceph_queue_vmtruncate %p\n", inode);
3c6f6b79
SW
1686 } else {
1687 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1688 inode, ci->i_truncate_pending);
15a2015f 1689 iput(inode);
3c6f6b79
SW
1690 }
1691}
1692
355da1eb 1693/*
355da1eb
SW
1694 * Make sure any pending truncation is applied before doing anything
1695 * that may depend on it.
1696 */
b415bf4f 1697void __ceph_do_pending_vmtruncate(struct inode *inode)
355da1eb
SW
1698{
1699 struct ceph_inode_info *ci = ceph_inode(inode);
1700 u64 to;
a85f50b6 1701 int wrbuffer_refs, finish = 0;
355da1eb 1702
b0d7c223 1703 mutex_lock(&ci->i_truncate_mutex);
355da1eb 1704retry:
be655596 1705 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1706 if (ci->i_truncate_pending == 0) {
1707 dout("__do_pending_vmtruncate %p none pending\n", inode);
be655596 1708 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1709 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1710 return;
1711 }
1712
1713 /*
1714 * make sure any dirty snapped pages are flushed before we
1715 * possibly truncate them.. so write AND block!
1716 */
1717 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1718 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1719 inode);
be655596 1720 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1721 filemap_write_and_wait_range(&inode->i_data, 0,
1722 inode->i_sb->s_maxbytes);
1723 goto retry;
1724 }
1725
b0d7c223
YZ
1726 /* there should be no reader or writer */
1727 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1728
355da1eb
SW
1729 to = ci->i_truncate_size;
1730 wrbuffer_refs = ci->i_wrbuffer_ref;
1731 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1732 ci->i_truncate_pending, to);
be655596 1733 spin_unlock(&ci->i_ceph_lock);
355da1eb 1734
4e217b5d 1735 truncate_pagecache(inode, to);
355da1eb 1736
be655596 1737 spin_lock(&ci->i_ceph_lock);
a85f50b6
YZ
1738 if (to == ci->i_truncate_size) {
1739 ci->i_truncate_pending = 0;
1740 finish = 1;
1741 }
be655596 1742 spin_unlock(&ci->i_ceph_lock);
a85f50b6
YZ
1743 if (!finish)
1744 goto retry;
355da1eb 1745
b0d7c223
YZ
1746 mutex_unlock(&ci->i_truncate_mutex);
1747
355da1eb
SW
1748 if (wrbuffer_refs == 0)
1749 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
a85f50b6
YZ
1750
1751 wake_up_all(&ci->i_cap_wq);
355da1eb
SW
1752}
1753
355da1eb
SW
1754/*
1755 * symlinks
1756 */
355da1eb
SW
1757static const struct inode_operations ceph_symlink_iops = {
1758 .readlink = generic_readlink,
6b255391 1759 .get_link = simple_get_link,
0b932672
YZ
1760 .setattr = ceph_setattr,
1761 .getattr = ceph_getattr,
1762 .setxattr = ceph_setxattr,
1763 .getxattr = ceph_getxattr,
1764 .listxattr = ceph_listxattr,
1765 .removexattr = ceph_removexattr,
355da1eb
SW
1766};
1767
1768/*
1769 * setattr
1770 */
1771int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1772{
2b0143b5 1773 struct inode *inode = d_inode(dentry);
355da1eb 1774 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
1775 const unsigned int ia_valid = attr->ia_valid;
1776 struct ceph_mds_request *req;
3d14c5d2 1777 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
f66fd9f0 1778 struct ceph_cap_flush *prealloc_cf;
355da1eb
SW
1779 int issued;
1780 int release = 0, dirtied = 0;
1781 int mask = 0;
1782 int err = 0;
fca65b4a 1783 int inode_dirty_flags = 0;
604d1b02 1784 bool lock_snap_rwsem = false;
355da1eb
SW
1785
1786 if (ceph_snap(inode) != CEPH_NOSNAP)
1787 return -EROFS;
1788
355da1eb
SW
1789 err = inode_change_ok(inode, attr);
1790 if (err != 0)
1791 return err;
1792
f66fd9f0
YZ
1793 prealloc_cf = ceph_alloc_cap_flush();
1794 if (!prealloc_cf)
1795 return -ENOMEM;
1796
355da1eb
SW
1797 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1798 USE_AUTH_MDS);
f66fd9f0
YZ
1799 if (IS_ERR(req)) {
1800 ceph_free_cap_flush(prealloc_cf);
355da1eb 1801 return PTR_ERR(req);
f66fd9f0 1802 }
355da1eb 1803
be655596 1804 spin_lock(&ci->i_ceph_lock);
355da1eb 1805 issued = __ceph_caps_issued(ci, NULL);
604d1b02
YZ
1806
1807 if (!ci->i_head_snapc &&
1808 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
1809 lock_snap_rwsem = true;
1810 if (!down_read_trylock(&mdsc->snap_rwsem)) {
1811 spin_unlock(&ci->i_ceph_lock);
1812 down_read(&mdsc->snap_rwsem);
1813 spin_lock(&ci->i_ceph_lock);
1814 issued = __ceph_caps_issued(ci, NULL);
1815 }
1816 }
1817
355da1eb
SW
1818 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1819
1820 if (ia_valid & ATTR_UID) {
1821 dout("setattr %p uid %d -> %d\n", inode,
bd2bae6a
EB
1822 from_kuid(&init_user_ns, inode->i_uid),
1823 from_kuid(&init_user_ns, attr->ia_uid));
355da1eb
SW
1824 if (issued & CEPH_CAP_AUTH_EXCL) {
1825 inode->i_uid = attr->ia_uid;
1826 dirtied |= CEPH_CAP_AUTH_EXCL;
1827 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
ab871b90
EB
1828 !uid_eq(attr->ia_uid, inode->i_uid)) {
1829 req->r_args.setattr.uid = cpu_to_le32(
1830 from_kuid(&init_user_ns, attr->ia_uid));
355da1eb
SW
1831 mask |= CEPH_SETATTR_UID;
1832 release |= CEPH_CAP_AUTH_SHARED;
1833 }
1834 }
1835 if (ia_valid & ATTR_GID) {
1836 dout("setattr %p gid %d -> %d\n", inode,
bd2bae6a
EB
1837 from_kgid(&init_user_ns, inode->i_gid),
1838 from_kgid(&init_user_ns, attr->ia_gid));
355da1eb
SW
1839 if (issued & CEPH_CAP_AUTH_EXCL) {
1840 inode->i_gid = attr->ia_gid;
1841 dirtied |= CEPH_CAP_AUTH_EXCL;
1842 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
ab871b90
EB
1843 !gid_eq(attr->ia_gid, inode->i_gid)) {
1844 req->r_args.setattr.gid = cpu_to_le32(
1845 from_kgid(&init_user_ns, attr->ia_gid));
355da1eb
SW
1846 mask |= CEPH_SETATTR_GID;
1847 release |= CEPH_CAP_AUTH_SHARED;
1848 }
1849 }
1850 if (ia_valid & ATTR_MODE) {
1851 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1852 attr->ia_mode);
1853 if (issued & CEPH_CAP_AUTH_EXCL) {
1854 inode->i_mode = attr->ia_mode;
1855 dirtied |= CEPH_CAP_AUTH_EXCL;
1856 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1857 attr->ia_mode != inode->i_mode) {
7221fe4c 1858 inode->i_mode = attr->ia_mode;
355da1eb
SW
1859 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1860 mask |= CEPH_SETATTR_MODE;
1861 release |= CEPH_CAP_AUTH_SHARED;
1862 }
1863 }
1864
1865 if (ia_valid & ATTR_ATIME) {
1866 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1867 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1868 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1869 if (issued & CEPH_CAP_FILE_EXCL) {
1870 ci->i_time_warp_seq++;
1871 inode->i_atime = attr->ia_atime;
1872 dirtied |= CEPH_CAP_FILE_EXCL;
1873 } else if ((issued & CEPH_CAP_FILE_WR) &&
1874 timespec_compare(&inode->i_atime,
1875 &attr->ia_atime) < 0) {
1876 inode->i_atime = attr->ia_atime;
1877 dirtied |= CEPH_CAP_FILE_WR;
1878 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1879 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1880 ceph_encode_timespec(&req->r_args.setattr.atime,
1881 &attr->ia_atime);
1882 mask |= CEPH_SETATTR_ATIME;
1883 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1884 CEPH_CAP_FILE_WR;
1885 }
1886 }
1887 if (ia_valid & ATTR_MTIME) {
1888 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1889 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1890 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1891 if (issued & CEPH_CAP_FILE_EXCL) {
1892 ci->i_time_warp_seq++;
1893 inode->i_mtime = attr->ia_mtime;
1894 dirtied |= CEPH_CAP_FILE_EXCL;
1895 } else if ((issued & CEPH_CAP_FILE_WR) &&
1896 timespec_compare(&inode->i_mtime,
1897 &attr->ia_mtime) < 0) {
1898 inode->i_mtime = attr->ia_mtime;
1899 dirtied |= CEPH_CAP_FILE_WR;
1900 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1901 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1902 ceph_encode_timespec(&req->r_args.setattr.mtime,
1903 &attr->ia_mtime);
1904 mask |= CEPH_SETATTR_MTIME;
1905 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1906 CEPH_CAP_FILE_WR;
1907 }
1908 }
1909 if (ia_valid & ATTR_SIZE) {
1910 dout("setattr %p size %lld -> %lld\n", inode,
1911 inode->i_size, attr->ia_size);
355da1eb
SW
1912 if ((issued & CEPH_CAP_FILE_EXCL) &&
1913 attr->ia_size > inode->i_size) {
99c88e69 1914 i_size_write(inode, attr->ia_size);
355da1eb
SW
1915 inode->i_blocks =
1916 (attr->ia_size + (1 << 9) - 1) >> 9;
1917 inode->i_ctime = attr->ia_ctime;
1918 ci->i_reported_size = attr->ia_size;
1919 dirtied |= CEPH_CAP_FILE_EXCL;
1920 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1921 attr->ia_size != inode->i_size) {
1922 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1923 req->r_args.setattr.old_size =
1924 cpu_to_le64(inode->i_size);
1925 mask |= CEPH_SETATTR_SIZE;
1926 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1927 CEPH_CAP_FILE_WR;
1928 }
1929 }
1930
1931 /* these do nothing */
1932 if (ia_valid & ATTR_CTIME) {
1933 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1934 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1935 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1936 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1937 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1938 only ? "ctime only" : "ignored");
1939 inode->i_ctime = attr->ia_ctime;
1940 if (only) {
1941 /*
1942 * if kernel wants to dirty ctime but nothing else,
1943 * we need to choose a cap to dirty under, or do
1944 * a almost-no-op setattr
1945 */
1946 if (issued & CEPH_CAP_AUTH_EXCL)
1947 dirtied |= CEPH_CAP_AUTH_EXCL;
1948 else if (issued & CEPH_CAP_FILE_EXCL)
1949 dirtied |= CEPH_CAP_FILE_EXCL;
1950 else if (issued & CEPH_CAP_XATTR_EXCL)
1951 dirtied |= CEPH_CAP_XATTR_EXCL;
1952 else
1953 mask |= CEPH_SETATTR_CTIME;
1954 }
1955 }
1956 if (ia_valid & ATTR_FILE)
1957 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1958
1959 if (dirtied) {
f66fd9f0
YZ
1960 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
1961 &prealloc_cf);
8bbd4714 1962 inode->i_ctime = current_fs_time(inode->i_sb);
355da1eb
SW
1963 }
1964
1965 release &= issued;
be655596 1966 spin_unlock(&ci->i_ceph_lock);
604d1b02
YZ
1967 if (lock_snap_rwsem)
1968 up_read(&mdsc->snap_rwsem);
355da1eb 1969
fca65b4a
SW
1970 if (inode_dirty_flags)
1971 __mark_inode_dirty(inode, inode_dirty_flags);
1972
7221fe4c 1973 if (ia_valid & ATTR_MODE) {
4db658ea 1974 err = posix_acl_chmod(inode, attr->ia_mode);
7221fe4c
GZ
1975 if (err)
1976 goto out_put;
1977 }
1978
355da1eb 1979 if (mask) {
70b666c3
SW
1980 req->r_inode = inode;
1981 ihold(inode);
355da1eb
SW
1982 req->r_inode_drop = release;
1983 req->r_args.setattr.mask = cpu_to_le32(mask);
1984 req->r_num_caps = 1;
752c8bdc 1985 err = ceph_mdsc_do_request(mdsc, NULL, req);
355da1eb
SW
1986 }
1987 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1988 ceph_cap_string(dirtied), mask);
1989
1990 ceph_mdsc_put_request(req);
b0d7c223
YZ
1991 if (mask & CEPH_SETATTR_SIZE)
1992 __ceph_do_pending_vmtruncate(inode);
f66fd9f0 1993 ceph_free_cap_flush(prealloc_cf);
355da1eb 1994 return err;
7221fe4c 1995out_put:
355da1eb 1996 ceph_mdsc_put_request(req);
f66fd9f0 1997 ceph_free_cap_flush(prealloc_cf);
355da1eb
SW
1998 return err;
1999}
2000
2001/*
2002 * Verify that we have a lease on the given mask. If not,
2003 * do a getattr against an mds.
2004 */
01deead0
YZ
2005int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2006 int mask, bool force)
355da1eb 2007{
3d14c5d2
YS
2008 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2009 struct ceph_mds_client *mdsc = fsc->mdsc;
355da1eb
SW
2010 struct ceph_mds_request *req;
2011 int err;
2012
2013 if (ceph_snap(inode) == CEPH_SNAPDIR) {
2014 dout("do_getattr inode %p SNAPDIR\n", inode);
2015 return 0;
2016 }
2017
01deead0
YZ
2018 dout("do_getattr inode %p mask %s mode 0%o\n",
2019 inode, ceph_cap_string(mask), inode->i_mode);
508b32d8 2020 if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
355da1eb
SW
2021 return 0;
2022
2023 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
2024 if (IS_ERR(req))
2025 return PTR_ERR(req);
70b666c3
SW
2026 req->r_inode = inode;
2027 ihold(inode);
355da1eb
SW
2028 req->r_num_caps = 1;
2029 req->r_args.getattr.mask = cpu_to_le32(mask);
01deead0 2030 req->r_locked_page = locked_page;
355da1eb 2031 err = ceph_mdsc_do_request(mdsc, NULL, req);
01deead0
YZ
2032 if (locked_page && err == 0) {
2033 u64 inline_version = req->r_reply_info.targeti.inline_version;
2034 if (inline_version == 0) {
2035 /* the reply is supposed to contain inline data */
2036 err = -EINVAL;
2037 } else if (inline_version == CEPH_INLINE_NONE) {
2038 err = -ENODATA;
2039 } else {
2040 err = req->r_reply_info.targeti.inline_len;
2041 }
2042 }
355da1eb
SW
2043 ceph_mdsc_put_request(req);
2044 dout("do_getattr result=%d\n", err);
2045 return err;
2046}
2047
2048
2049/*
2050 * Check inode permissions. We verify we have a valid value for
2051 * the AUTH cap, then call the generic handler.
2052 */
10556cb2 2053int ceph_permission(struct inode *inode, int mask)
355da1eb 2054{
b74c79e9
NP
2055 int err;
2056
10556cb2 2057 if (mask & MAY_NOT_BLOCK)
b74c79e9
NP
2058 return -ECHILD;
2059
508b32d8 2060 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
355da1eb
SW
2061
2062 if (!err)
2830ba7f 2063 err = generic_permission(inode, mask);
355da1eb
SW
2064 return err;
2065}
2066
2067/*
2068 * Get all attributes. Hopefully somedata we'll have a statlite()
2069 * and can limit the fields we require to be accurate.
2070 */
2071int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
2072 struct kstat *stat)
2073{
2b0143b5 2074 struct inode *inode = d_inode(dentry);
232d4b01 2075 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
2076 int err;
2077
508b32d8 2078 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false);
355da1eb
SW
2079 if (!err) {
2080 generic_fillattr(inode, stat);
ad1fee96 2081 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
355da1eb
SW
2082 if (ceph_snap(inode) != CEPH_NOSNAP)
2083 stat->dev = ceph_snap(inode);
2084 else
2085 stat->dev = 0;
232d4b01 2086 if (S_ISDIR(inode->i_mode)) {
1c1266bb
YS
2087 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2088 RBYTES))
2089 stat->size = ci->i_rbytes;
2090 else
2091 stat->size = ci->i_files + ci->i_subdirs;
232d4b01 2092 stat->blocks = 0;
355da1eb 2093 stat->blksize = 65536;
232d4b01 2094 }
355da1eb
SW
2095 }
2096 return err;
2097}