Merge tag 'fscache-fixes-for-ceph' into wip-fscache
[linux-2.6-block.git] / fs / ceph / inode.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
355da1eb
SW
2
3#include <linux/module.h>
4#include <linux/fs.h>
355da1eb
SW
5#include <linux/slab.h>
6#include <linux/string.h>
7#include <linux/uaccess.h>
8#include <linux/kernel.h>
9#include <linux/namei.h>
10#include <linux/writeback.h>
11#include <linux/vmalloc.h>
12
13#include "super.h"
3d14c5d2
YS
14#include "mds_client.h"
15#include <linux/ceph/decode.h>
355da1eb
SW
16
17/*
18 * Ceph inode operations
19 *
20 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
21 * setattr, etc.), xattr helpers, and helpers for assimilating
22 * metadata returned by the MDS into our cache.
23 *
24 * Also define helpers for doing asynchronous writeback, invalidation,
25 * and truncation for the benefit of those who can't afford to block
26 * (typically because they are in the message handler path).
27 */
28
29static const struct inode_operations ceph_symlink_iops;
30
3c6f6b79
SW
31static void ceph_invalidate_work(struct work_struct *work);
32static void ceph_writeback_work(struct work_struct *work);
33static void ceph_vmtruncate_work(struct work_struct *work);
355da1eb
SW
34
35/*
36 * find or create an inode, given the ceph ino number
37 */
ad1fee96
YS
38static int ceph_set_ino_cb(struct inode *inode, void *data)
39{
40 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
41 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
42 return 0;
43}
44
355da1eb
SW
45struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
46{
47 struct inode *inode;
48 ino_t t = ceph_vino_to_ino(vino);
49
50 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
51 if (inode == NULL)
52 return ERR_PTR(-ENOMEM);
53 if (inode->i_state & I_NEW) {
54 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
55 inode, ceph_vinop(inode), (u64)inode->i_ino);
56 unlock_new_inode(inode);
57 }
58
59 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
60 vino.snap, inode);
61 return inode;
62}
63
6f60f889
YZ
64struct inode *ceph_lookup_inode(struct super_block *sb, struct ceph_vino vino)
65{
66 struct inode *inode;
67 ino_t t = ceph_vino_to_ino(vino);
68 inode = ilookup5_nowait(sb, t, ceph_ino_compare, &vino);
69 return inode;
70}
71
355da1eb
SW
72/*
73 * get/constuct snapdir inode for a given directory
74 */
75struct inode *ceph_get_snapdir(struct inode *parent)
76{
77 struct ceph_vino vino = {
78 .ino = ceph_ino(parent),
79 .snap = CEPH_SNAPDIR,
80 };
81 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
b377ff13 82 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
83
84 BUG_ON(!S_ISDIR(parent->i_mode));
85 if (IS_ERR(inode))
7e34bc52 86 return inode;
355da1eb
SW
87 inode->i_mode = parent->i_mode;
88 inode->i_uid = parent->i_uid;
89 inode->i_gid = parent->i_gid;
90 inode->i_op = &ceph_dir_iops;
91 inode->i_fop = &ceph_dir_fops;
b377ff13
SW
92 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
93 ci->i_rbytes = 0;
355da1eb
SW
94 return inode;
95}
96
97const struct inode_operations ceph_file_iops = {
98 .permission = ceph_permission,
99 .setattr = ceph_setattr,
100 .getattr = ceph_getattr,
101 .setxattr = ceph_setxattr,
102 .getxattr = ceph_getxattr,
103 .listxattr = ceph_listxattr,
104 .removexattr = ceph_removexattr,
105};
106
107
108/*
109 * We use a 'frag tree' to keep track of the MDS's directory fragments
110 * for a given inode (usually there is just a single fragment). We
111 * need to know when a child frag is delegated to a new MDS, or when
112 * it is flagged as replicated, so we can direct our requests
113 * accordingly.
114 */
115
116/*
117 * find/create a frag in the tree
118 */
119static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
120 u32 f)
121{
122 struct rb_node **p;
123 struct rb_node *parent = NULL;
124 struct ceph_inode_frag *frag;
125 int c;
126
127 p = &ci->i_fragtree.rb_node;
128 while (*p) {
129 parent = *p;
130 frag = rb_entry(parent, struct ceph_inode_frag, node);
131 c = ceph_frag_compare(f, frag->frag);
132 if (c < 0)
133 p = &(*p)->rb_left;
134 else if (c > 0)
135 p = &(*p)->rb_right;
136 else
137 return frag;
138 }
139
140 frag = kmalloc(sizeof(*frag), GFP_NOFS);
141 if (!frag) {
142 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
143 "frag %x\n", &ci->vfs_inode,
144 ceph_vinop(&ci->vfs_inode), f);
145 return ERR_PTR(-ENOMEM);
146 }
147 frag->frag = f;
148 frag->split_by = 0;
149 frag->mds = -1;
150 frag->ndist = 0;
151
152 rb_link_node(&frag->node, parent, p);
153 rb_insert_color(&frag->node, &ci->i_fragtree);
154
155 dout("get_or_create_frag added %llx.%llx frag %x\n",
156 ceph_vinop(&ci->vfs_inode), f);
157 return frag;
158}
159
160/*
161 * find a specific frag @f
162 */
163struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
164{
165 struct rb_node *n = ci->i_fragtree.rb_node;
166
167 while (n) {
168 struct ceph_inode_frag *frag =
169 rb_entry(n, struct ceph_inode_frag, node);
170 int c = ceph_frag_compare(f, frag->frag);
171 if (c < 0)
172 n = n->rb_left;
173 else if (c > 0)
174 n = n->rb_right;
175 else
176 return frag;
177 }
178 return NULL;
179}
180
181/*
182 * Choose frag containing the given value @v. If @pfrag is
183 * specified, copy the frag delegation info to the caller if
184 * it is present.
185 */
186u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
187 struct ceph_inode_frag *pfrag,
188 int *found)
189{
190 u32 t = ceph_frag_make(0, 0);
191 struct ceph_inode_frag *frag;
192 unsigned nway, i;
193 u32 n;
194
195 if (found)
196 *found = 0;
197
198 mutex_lock(&ci->i_fragtree_mutex);
199 while (1) {
200 WARN_ON(!ceph_frag_contains_value(t, v));
201 frag = __ceph_find_frag(ci, t);
202 if (!frag)
203 break; /* t is a leaf */
204 if (frag->split_by == 0) {
205 if (pfrag)
206 memcpy(pfrag, frag, sizeof(*pfrag));
207 if (found)
208 *found = 1;
209 break;
210 }
211
212 /* choose child */
213 nway = 1 << frag->split_by;
214 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
215 frag->split_by, nway);
216 for (i = 0; i < nway; i++) {
217 n = ceph_frag_make_child(t, frag->split_by, i);
218 if (ceph_frag_contains_value(n, v)) {
219 t = n;
220 break;
221 }
222 }
223 BUG_ON(i == nway);
224 }
225 dout("choose_frag(%x) = %x\n", v, t);
226
227 mutex_unlock(&ci->i_fragtree_mutex);
228 return t;
229}
230
231/*
232 * Process dirfrag (delegation) info from the mds. Include leaf
233 * fragment in tree ONLY if ndist > 0. Otherwise, only
234 * branches/splits are included in i_fragtree)
235 */
236static int ceph_fill_dirfrag(struct inode *inode,
237 struct ceph_mds_reply_dirfrag *dirinfo)
238{
239 struct ceph_inode_info *ci = ceph_inode(inode);
240 struct ceph_inode_frag *frag;
241 u32 id = le32_to_cpu(dirinfo->frag);
242 int mds = le32_to_cpu(dirinfo->auth);
243 int ndist = le32_to_cpu(dirinfo->ndist);
244 int i;
245 int err = 0;
246
247 mutex_lock(&ci->i_fragtree_mutex);
248 if (ndist == 0) {
249 /* no delegation info needed. */
250 frag = __ceph_find_frag(ci, id);
251 if (!frag)
252 goto out;
253 if (frag->split_by == 0) {
254 /* tree leaf, remove */
255 dout("fill_dirfrag removed %llx.%llx frag %x"
256 " (no ref)\n", ceph_vinop(inode), id);
257 rb_erase(&frag->node, &ci->i_fragtree);
258 kfree(frag);
259 } else {
260 /* tree branch, keep and clear */
261 dout("fill_dirfrag cleared %llx.%llx frag %x"
262 " referral\n", ceph_vinop(inode), id);
263 frag->mds = -1;
264 frag->ndist = 0;
265 }
266 goto out;
267 }
268
269
270 /* find/add this frag to store mds delegation info */
271 frag = __get_or_create_frag(ci, id);
272 if (IS_ERR(frag)) {
273 /* this is not the end of the world; we can continue
274 with bad/inaccurate delegation info */
275 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
276 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
277 err = -ENOMEM;
278 goto out;
279 }
280
281 frag->mds = mds;
282 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
283 for (i = 0; i < frag->ndist; i++)
284 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
285 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
286 ceph_vinop(inode), frag->frag, frag->ndist);
287
288out:
289 mutex_unlock(&ci->i_fragtree_mutex);
290 return err;
291}
292
293
294/*
295 * initialize a newly allocated inode.
296 */
297struct inode *ceph_alloc_inode(struct super_block *sb)
298{
299 struct ceph_inode_info *ci;
300 int i;
301
302 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
303 if (!ci)
304 return NULL;
305
306 dout("alloc_inode %p\n", &ci->vfs_inode);
307
be655596
SW
308 spin_lock_init(&ci->i_ceph_lock);
309
355da1eb
SW
310 ci->i_version = 0;
311 ci->i_time_warp_seq = 0;
312 ci->i_ceph_flags = 0;
2f276c51
YZ
313 atomic_set(&ci->i_release_count, 1);
314 atomic_set(&ci->i_complete_count, 0);
355da1eb
SW
315 ci->i_symlink = NULL;
316
6c0f3af7
SW
317 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
318
355da1eb
SW
319 ci->i_fragtree = RB_ROOT;
320 mutex_init(&ci->i_fragtree_mutex);
321
322 ci->i_xattrs.blob = NULL;
323 ci->i_xattrs.prealloc_blob = NULL;
324 ci->i_xattrs.dirty = false;
325 ci->i_xattrs.index = RB_ROOT;
326 ci->i_xattrs.count = 0;
327 ci->i_xattrs.names_size = 0;
328 ci->i_xattrs.vals_size = 0;
329 ci->i_xattrs.version = 0;
330 ci->i_xattrs.index_version = 0;
331
332 ci->i_caps = RB_ROOT;
333 ci->i_auth_cap = NULL;
334 ci->i_dirty_caps = 0;
335 ci->i_flushing_caps = 0;
336 INIT_LIST_HEAD(&ci->i_dirty_item);
337 INIT_LIST_HEAD(&ci->i_flushing_item);
338 ci->i_cap_flush_seq = 0;
339 ci->i_cap_flush_last_tid = 0;
340 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
341 init_waitqueue_head(&ci->i_cap_wq);
342 ci->i_hold_caps_min = 0;
343 ci->i_hold_caps_max = 0;
344 INIT_LIST_HEAD(&ci->i_cap_delay_list);
345 ci->i_cap_exporting_mds = 0;
346 ci->i_cap_exporting_mseq = 0;
347 ci->i_cap_exporting_issued = 0;
348 INIT_LIST_HEAD(&ci->i_cap_snaps);
349 ci->i_head_snapc = NULL;
350 ci->i_snap_caps = 0;
351
352 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
353 ci->i_nr_by_mode[i] = 0;
354
b0d7c223 355 mutex_init(&ci->i_truncate_mutex);
355da1eb
SW
356 ci->i_truncate_seq = 0;
357 ci->i_truncate_size = 0;
358 ci->i_truncate_pending = 0;
359
360 ci->i_max_size = 0;
361 ci->i_reported_size = 0;
362 ci->i_wanted_max_size = 0;
363 ci->i_requested_max_size = 0;
364
365 ci->i_pin_ref = 0;
366 ci->i_rd_ref = 0;
367 ci->i_rdcache_ref = 0;
368 ci->i_wr_ref = 0;
d3d0720d 369 ci->i_wb_ref = 0;
355da1eb
SW
370 ci->i_wrbuffer_ref = 0;
371 ci->i_wrbuffer_ref_head = 0;
372 ci->i_shared_gen = 0;
373 ci->i_rdcache_gen = 0;
374 ci->i_rdcache_revoking = 0;
375
376 INIT_LIST_HEAD(&ci->i_unsafe_writes);
377 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
378 spin_lock_init(&ci->i_unsafe_lock);
379
380 ci->i_snap_realm = NULL;
381 INIT_LIST_HEAD(&ci->i_snap_realm_item);
382 INIT_LIST_HEAD(&ci->i_snap_flush_item);
383
3c6f6b79
SW
384 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
385 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
355da1eb
SW
386
387 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
388
389 return &ci->vfs_inode;
390}
391
fa0d7e3d
NP
392static void ceph_i_callback(struct rcu_head *head)
393{
394 struct inode *inode = container_of(head, struct inode, i_rcu);
395 struct ceph_inode_info *ci = ceph_inode(inode);
396
fa0d7e3d
NP
397 kmem_cache_free(ceph_inode_cachep, ci);
398}
399
355da1eb
SW
400void ceph_destroy_inode(struct inode *inode)
401{
402 struct ceph_inode_info *ci = ceph_inode(inode);
403 struct ceph_inode_frag *frag;
404 struct rb_node *n;
405
406 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
407
408 ceph_queue_caps_release(inode);
409
8b218b8a
SW
410 /*
411 * we may still have a snap_realm reference if there are stray
412 * caps in i_cap_exporting_issued or i_snap_caps.
413 */
414 if (ci->i_snap_realm) {
415 struct ceph_mds_client *mdsc =
3d14c5d2 416 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
8b218b8a
SW
417 struct ceph_snap_realm *realm = ci->i_snap_realm;
418
419 dout(" dropping residual ref to snap realm %p\n", realm);
420 spin_lock(&realm->inodes_with_caps_lock);
421 list_del_init(&ci->i_snap_realm_item);
422 spin_unlock(&realm->inodes_with_caps_lock);
423 ceph_put_snap_realm(mdsc, realm);
424 }
425
355da1eb
SW
426 kfree(ci->i_symlink);
427 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
428 frag = rb_entry(n, struct ceph_inode_frag, node);
429 rb_erase(n, &ci->i_fragtree);
430 kfree(frag);
431 }
432
433 __ceph_destroy_xattrs(ci);
b6c1d5b8
SW
434 if (ci->i_xattrs.blob)
435 ceph_buffer_put(ci->i_xattrs.blob);
436 if (ci->i_xattrs.prealloc_blob)
437 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
355da1eb 438
fa0d7e3d 439 call_rcu(&inode->i_rcu, ceph_i_callback);
355da1eb
SW
440}
441
442
443/*
444 * Helpers to fill in size, ctime, mtime, and atime. We have to be
445 * careful because either the client or MDS may have more up to date
446 * info, depending on which capabilities are held, and whether
447 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
448 * and size are monotonically increasing, except when utimes() or
449 * truncate() increments the corresponding _seq values.)
450 */
451int ceph_fill_file_size(struct inode *inode, int issued,
452 u32 truncate_seq, u64 truncate_size, u64 size)
453{
454 struct ceph_inode_info *ci = ceph_inode(inode);
455 int queue_trunc = 0;
456
457 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
458 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
459 dout("size %lld -> %llu\n", inode->i_size, size);
460 inode->i_size = size;
461 inode->i_blocks = (size + (1<<9) - 1) >> 9;
462 ci->i_reported_size = size;
463 if (truncate_seq != ci->i_truncate_seq) {
464 dout("truncate_seq %u -> %u\n",
465 ci->i_truncate_seq, truncate_seq);
466 ci->i_truncate_seq = truncate_seq;
b0d7c223
YZ
467
468 /* the MDS should have revoked these caps */
469 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
470 CEPH_CAP_FILE_RD |
471 CEPH_CAP_FILE_WR |
472 CEPH_CAP_FILE_LAZYIO));
3d497d85
YS
473 /*
474 * If we hold relevant caps, or in the case where we're
475 * not the only client referencing this file and we
476 * don't hold those caps, then we need to check whether
477 * the file is either opened or mmaped
478 */
b0d7c223
YZ
479 if ((issued & (CEPH_CAP_FILE_CACHE|
480 CEPH_CAP_FILE_BUFFER)) ||
3d497d85
YS
481 mapping_mapped(inode->i_mapping) ||
482 __ceph_caps_file_wanted(ci)) {
355da1eb
SW
483 ci->i_truncate_pending++;
484 queue_trunc = 1;
485 }
486 }
487 }
488 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
489 ci->i_truncate_size != truncate_size) {
490 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
491 truncate_size);
492 ci->i_truncate_size = truncate_size;
493 }
494 return queue_trunc;
495}
496
497void ceph_fill_file_time(struct inode *inode, int issued,
498 u64 time_warp_seq, struct timespec *ctime,
499 struct timespec *mtime, struct timespec *atime)
500{
501 struct ceph_inode_info *ci = ceph_inode(inode);
502 int warn = 0;
503
504 if (issued & (CEPH_CAP_FILE_EXCL|
505 CEPH_CAP_FILE_WR|
d8672d64
SW
506 CEPH_CAP_FILE_BUFFER|
507 CEPH_CAP_AUTH_EXCL|
508 CEPH_CAP_XATTR_EXCL)) {
355da1eb
SW
509 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
510 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
511 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
512 ctime->tv_sec, ctime->tv_nsec);
513 inode->i_ctime = *ctime;
514 }
515 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
516 /* the MDS did a utimes() */
517 dout("mtime %ld.%09ld -> %ld.%09ld "
518 "tw %d -> %d\n",
519 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
520 mtime->tv_sec, mtime->tv_nsec,
521 ci->i_time_warp_seq, (int)time_warp_seq);
522
523 inode->i_mtime = *mtime;
524 inode->i_atime = *atime;
525 ci->i_time_warp_seq = time_warp_seq;
526 } else if (time_warp_seq == ci->i_time_warp_seq) {
527 /* nobody did utimes(); take the max */
528 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
529 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
530 inode->i_mtime.tv_sec,
531 inode->i_mtime.tv_nsec,
532 mtime->tv_sec, mtime->tv_nsec);
533 inode->i_mtime = *mtime;
534 }
535 if (timespec_compare(atime, &inode->i_atime) > 0) {
536 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
537 inode->i_atime.tv_sec,
538 inode->i_atime.tv_nsec,
539 atime->tv_sec, atime->tv_nsec);
540 inode->i_atime = *atime;
541 }
542 } else if (issued & CEPH_CAP_FILE_EXCL) {
543 /* we did a utimes(); ignore mds values */
544 } else {
545 warn = 1;
546 }
547 } else {
d8672d64 548 /* we have no write|excl caps; whatever the MDS says is true */
355da1eb
SW
549 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
550 inode->i_ctime = *ctime;
551 inode->i_mtime = *mtime;
552 inode->i_atime = *atime;
553 ci->i_time_warp_seq = time_warp_seq;
554 } else {
555 warn = 1;
556 }
557 }
558 if (warn) /* time_warp_seq shouldn't go backwards */
559 dout("%p mds time_warp_seq %llu < %u\n",
560 inode, time_warp_seq, ci->i_time_warp_seq);
561}
562
563/*
564 * Populate an inode based on info from mds. May be called on new or
565 * existing inodes.
566 */
567static int fill_inode(struct inode *inode,
568 struct ceph_mds_reply_info_in *iinfo,
569 struct ceph_mds_reply_dirfrag *dirinfo,
570 struct ceph_mds_session *session,
571 unsigned long ttl_from, int cap_fmode,
572 struct ceph_cap_reservation *caps_reservation)
573{
574 struct ceph_mds_reply_inode *info = iinfo->in;
575 struct ceph_inode_info *ci = ceph_inode(inode);
576 int i;
dfabbed6 577 int issued = 0, implemented;
355da1eb
SW
578 struct timespec mtime, atime, ctime;
579 u32 nsplits;
580 struct ceph_buffer *xattr_blob = NULL;
581 int err = 0;
582 int queue_trunc = 0;
583
584 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
585 inode, ceph_vinop(inode), le64_to_cpu(info->version),
586 ci->i_version);
587
588 /*
589 * prealloc xattr data, if it looks like we'll need it. only
590 * if len > 4 (meaning there are actually xattrs; the first 4
591 * bytes are the xattr count).
592 */
593 if (iinfo->xattr_len > 4) {
b6c1d5b8 594 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
355da1eb
SW
595 if (!xattr_blob)
596 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
597 iinfo->xattr_len);
598 }
599
be655596 600 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
601
602 /*
603 * provided version will be odd if inode value is projected,
8bd59e01
SW
604 * even if stable. skip the update if we have newer stable
605 * info (ours>=theirs, e.g. due to racing mds replies), unless
606 * we are getting projected (unstable) info (in which case the
607 * version is odd, and we want ours>theirs).
608 * us them
609 * 2 2 skip
610 * 3 2 skip
611 * 3 3 update
355da1eb
SW
612 */
613 if (le64_to_cpu(info->version) > 0 &&
8bd59e01 614 (ci->i_version & ~1) >= le64_to_cpu(info->version))
355da1eb 615 goto no_change;
dfabbed6 616
355da1eb
SW
617 issued = __ceph_caps_issued(ci, &implemented);
618 issued |= implemented | __ceph_caps_dirty(ci);
619
620 /* update inode */
621 ci->i_version = le64_to_cpu(info->version);
622 inode->i_version++;
623 inode->i_rdev = le32_to_cpu(info->rdev);
624
625 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
626 inode->i_mode = le32_to_cpu(info->mode);
ab871b90
EB
627 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
628 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
355da1eb 629 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
bd2bae6a
EB
630 from_kuid(&init_user_ns, inode->i_uid),
631 from_kgid(&init_user_ns, inode->i_gid));
355da1eb
SW
632 }
633
634 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
bfe86848 635 set_nlink(inode, le32_to_cpu(info->nlink));
355da1eb
SW
636
637 /* be careful with mtime, atime, size */
638 ceph_decode_timespec(&atime, &info->atime);
639 ceph_decode_timespec(&mtime, &info->mtime);
640 ceph_decode_timespec(&ctime, &info->ctime);
641 queue_trunc = ceph_fill_file_size(inode, issued,
642 le32_to_cpu(info->truncate_seq),
643 le64_to_cpu(info->truncate_size),
355da1eb
SW
644 le64_to_cpu(info->size));
645 ceph_fill_file_time(inode, issued,
646 le32_to_cpu(info->time_warp_seq),
647 &ctime, &mtime, &atime);
648
912a9b03
SW
649 /* only update max_size on auth cap */
650 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
651 ci->i_max_size != le64_to_cpu(info->max_size)) {
652 dout("max_size %lld -> %llu\n", ci->i_max_size,
653 le64_to_cpu(info->max_size));
654 ci->i_max_size = le64_to_cpu(info->max_size);
655 }
656
355da1eb
SW
657 ci->i_layout = info->layout;
658 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
659
660 /* xattrs */
661 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
662 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
663 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
664 if (ci->i_xattrs.blob)
665 ceph_buffer_put(ci->i_xattrs.blob);
666 ci->i_xattrs.blob = xattr_blob;
667 if (xattr_blob)
668 memcpy(ci->i_xattrs.blob->vec.iov_base,
669 iinfo->xattr_data, iinfo->xattr_len);
670 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
a6424e48 671 xattr_blob = NULL;
355da1eb
SW
672 }
673
674 inode->i_mapping->a_ops = &ceph_aops;
675 inode->i_mapping->backing_dev_info =
640ef79d 676 &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
355da1eb
SW
677
678 switch (inode->i_mode & S_IFMT) {
679 case S_IFIFO:
680 case S_IFBLK:
681 case S_IFCHR:
682 case S_IFSOCK:
683 init_special_inode(inode, inode->i_mode, inode->i_rdev);
684 inode->i_op = &ceph_file_iops;
685 break;
686 case S_IFREG:
687 inode->i_op = &ceph_file_iops;
688 inode->i_fop = &ceph_file_fops;
689 break;
690 case S_IFLNK:
691 inode->i_op = &ceph_symlink_iops;
692 if (!ci->i_symlink) {
810339ec 693 u32 symlen = iinfo->symlink_len;
355da1eb
SW
694 char *sym;
695
be655596 696 spin_unlock(&ci->i_ceph_lock);
355da1eb 697
810339ec
XW
698 err = -EINVAL;
699 if (WARN_ON(symlen != inode->i_size))
700 goto out;
701
355da1eb 702 err = -ENOMEM;
810339ec 703 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
355da1eb
SW
704 if (!sym)
705 goto out;
355da1eb 706
be655596 707 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
708 if (!ci->i_symlink)
709 ci->i_symlink = sym;
710 else
711 kfree(sym); /* lost a race */
712 }
713 break;
714 case S_IFDIR:
715 inode->i_op = &ceph_dir_iops;
716 inode->i_fop = &ceph_dir_fops;
717
14303d20
SW
718 ci->i_dir_layout = iinfo->dir_layout;
719
355da1eb
SW
720 ci->i_files = le64_to_cpu(info->files);
721 ci->i_subdirs = le64_to_cpu(info->subdirs);
722 ci->i_rbytes = le64_to_cpu(info->rbytes);
723 ci->i_rfiles = le64_to_cpu(info->rfiles);
724 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
725 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
355da1eb
SW
726 break;
727 default:
728 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
729 ceph_vinop(inode), inode->i_mode);
730 }
731
a8673d61
YZ
732 /* set dir completion flag? */
733 if (S_ISDIR(inode->i_mode) &&
734 ci->i_files == 0 && ci->i_subdirs == 0 &&
735 ceph_snap(inode) == CEPH_NOSNAP &&
736 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
737 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
2f276c51 738 !__ceph_dir_is_complete(ci)) {
a8673d61 739 dout(" marking %p complete (empty)\n", inode);
2f276c51 740 __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
a8673d61
YZ
741 ci->i_max_offset = 2;
742 }
355da1eb 743no_change:
be655596 744 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
745
746 /* queue truncate if we saw i_size decrease */
747 if (queue_trunc)
3c6f6b79 748 ceph_queue_vmtruncate(inode);
355da1eb
SW
749
750 /* populate frag tree */
751 /* FIXME: move me up, if/when version reflects fragtree changes */
752 nsplits = le32_to_cpu(info->fragtree.nsplits);
753 mutex_lock(&ci->i_fragtree_mutex);
754 for (i = 0; i < nsplits; i++) {
755 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
756 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
757
758 if (IS_ERR(frag))
759 continue;
760 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
761 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
762 }
763 mutex_unlock(&ci->i_fragtree_mutex);
764
765 /* were we issued a capability? */
766 if (info->cap.caps) {
767 if (ceph_snap(inode) == CEPH_NOSNAP) {
768 ceph_add_cap(inode, session,
769 le64_to_cpu(info->cap.cap_id),
770 cap_fmode,
771 le32_to_cpu(info->cap.caps),
772 le32_to_cpu(info->cap.wanted),
773 le32_to_cpu(info->cap.seq),
774 le32_to_cpu(info->cap.mseq),
775 le64_to_cpu(info->cap.realm),
776 info->cap.flags,
777 caps_reservation);
778 } else {
be655596 779 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
780 dout(" %p got snap_caps %s\n", inode,
781 ceph_cap_string(le32_to_cpu(info->cap.caps)));
782 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
783 if (cap_fmode >= 0)
784 __ceph_get_fmode(ci, cap_fmode);
be655596 785 spin_unlock(&ci->i_ceph_lock);
355da1eb 786 }
04d000eb
SW
787 } else if (cap_fmode >= 0) {
788 pr_warning("mds issued no caps on %llx.%llx\n",
789 ceph_vinop(inode));
790 __ceph_get_fmode(ci, cap_fmode);
355da1eb
SW
791 }
792
793 /* update delegation info? */
794 if (dirinfo)
795 ceph_fill_dirfrag(inode, dirinfo);
796
797 err = 0;
798
799out:
b6c1d5b8
SW
800 if (xattr_blob)
801 ceph_buffer_put(xattr_blob);
355da1eb
SW
802 return err;
803}
804
805/*
806 * caller should hold session s_mutex.
807 */
808static void update_dentry_lease(struct dentry *dentry,
809 struct ceph_mds_reply_lease *lease,
810 struct ceph_mds_session *session,
811 unsigned long from_time)
812{
813 struct ceph_dentry_info *di = ceph_dentry(dentry);
814 long unsigned duration = le32_to_cpu(lease->duration_ms);
815 long unsigned ttl = from_time + (duration * HZ) / 1000;
816 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
817 struct inode *dir;
818
819 /* only track leases on regular dentries */
820 if (dentry->d_op != &ceph_dentry_ops)
821 return;
822
823 spin_lock(&dentry->d_lock);
2f90b852
SW
824 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
825 dentry, duration, ttl);
355da1eb
SW
826
827 /* make lease_rdcache_gen match directory */
828 dir = dentry->d_parent->d_inode;
829 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
830
2f90b852 831 if (duration == 0)
355da1eb
SW
832 goto out_unlock;
833
834 if (di->lease_gen == session->s_cap_gen &&
835 time_before(ttl, dentry->d_time))
836 goto out_unlock; /* we already have a newer lease. */
837
838 if (di->lease_session && di->lease_session != session)
839 goto out_unlock;
840
841 ceph_dentry_lru_touch(dentry);
842
843 if (!di->lease_session)
844 di->lease_session = ceph_get_mds_session(session);
845 di->lease_gen = session->s_cap_gen;
846 di->lease_seq = le32_to_cpu(lease->seq);
847 di->lease_renew_after = half_ttl;
848 di->lease_renew_from = 0;
849 dentry->d_time = ttl;
850out_unlock:
851 spin_unlock(&dentry->d_lock);
852 return;
853}
854
1cd3935b
SW
855/*
856 * Set dentry's directory position based on the current dir's max, and
857 * order it in d_subdirs, so that dcache_readdir behaves.
4f177264
SW
858 *
859 * Always called under directory's i_mutex.
1cd3935b
SW
860 */
861static void ceph_set_dentry_offset(struct dentry *dn)
862{
863 struct dentry *dir = dn->d_parent;
4f177264 864 struct inode *inode = dir->d_inode;
b8cd952b 865 struct ceph_inode_info *ci;
1cd3935b
SW
866 struct ceph_dentry_info *di;
867
868 BUG_ON(!inode);
869
b8cd952b 870 ci = ceph_inode(inode);
1cd3935b
SW
871 di = ceph_dentry(dn);
872
be655596 873 spin_lock(&ci->i_ceph_lock);
2f276c51 874 if (!__ceph_dir_is_complete(ci)) {
be655596 875 spin_unlock(&ci->i_ceph_lock);
1cd3935b
SW
876 return;
877 }
878 di->offset = ceph_inode(inode)->i_max_offset++;
be655596 879 spin_unlock(&ci->i_ceph_lock);
1cd3935b 880
2fd6b7f5
NP
881 spin_lock(&dir->d_lock);
882 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
13a4214c 883 list_move(&dn->d_u.d_child, &dir->d_subdirs);
1cd3935b
SW
884 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
885 dn->d_u.d_child.prev, dn->d_u.d_child.next);
886 spin_unlock(&dn->d_lock);
2fd6b7f5 887 spin_unlock(&dir->d_lock);
1cd3935b
SW
888}
889
355da1eb
SW
890/*
891 * splice a dentry to an inode.
892 * caller must hold directory i_mutex for this to be safe.
893 *
894 * we will only rehash the resulting dentry if @prehash is
895 * true; @prehash will be set to false (for the benefit of
896 * the caller) if we fail.
897 */
898static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
467c5251 899 bool *prehash, bool set_offset)
355da1eb
SW
900{
901 struct dentry *realdn;
902
1cd3935b
SW
903 BUG_ON(dn->d_inode);
904
355da1eb
SW
905 /* dn must be unhashed */
906 if (!d_unhashed(dn))
907 d_drop(dn);
908 realdn = d_materialise_unique(dn, in);
909 if (IS_ERR(realdn)) {
d69ed05a
SW
910 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
911 PTR_ERR(realdn), dn, in, ceph_vinop(in));
355da1eb
SW
912 if (prehash)
913 *prehash = false; /* don't rehash on error */
914 dn = realdn; /* note realdn contains the error */
915 goto out;
916 } else if (realdn) {
917 dout("dn %p (%d) spliced with %p (%d) "
918 "inode %p ino %llx.%llx\n",
84d08fa8
AV
919 dn, d_count(dn),
920 realdn, d_count(realdn),
355da1eb
SW
921 realdn->d_inode, ceph_vinop(realdn->d_inode));
922 dput(dn);
923 dn = realdn;
924 } else {
925 BUG_ON(!ceph_dentry(dn));
355da1eb
SW
926 dout("dn %p attached to %p ino %llx.%llx\n",
927 dn, dn->d_inode, ceph_vinop(dn->d_inode));
928 }
929 if ((!prehash || *prehash) && d_unhashed(dn))
930 d_rehash(dn);
467c5251
SW
931 if (set_offset)
932 ceph_set_dentry_offset(dn);
355da1eb
SW
933out:
934 return dn;
935}
936
937/*
938 * Incorporate results into the local cache. This is either just
939 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
940 * after a lookup).
941 *
942 * A reply may contain
943 * a directory inode along with a dentry.
944 * and/or a target inode
945 *
946 * Called with snap_rwsem (read).
947 */
948int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
949 struct ceph_mds_session *session)
950{
951 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
952 struct inode *in = NULL;
953 struct ceph_mds_reply_inode *ininfo;
954 struct ceph_vino vino;
3d14c5d2 955 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
355da1eb
SW
956 int i = 0;
957 int err = 0;
958
959 dout("fill_trace %p is_dentry %d is_target %d\n", req,
960 rinfo->head->is_dentry, rinfo->head->is_target);
961
962#if 0
963 /*
964 * Debugging hook:
965 *
966 * If we resend completed ops to a recovering mds, we get no
967 * trace. Since that is very rare, pretend this is the case
968 * to ensure the 'no trace' handlers in the callers behave.
969 *
970 * Fill in inodes unconditionally to avoid breaking cap
971 * invariants.
972 */
973 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
974 pr_info("fill_trace faking empty trace on %lld %s\n",
975 req->r_tid, ceph_mds_op_name(rinfo->head->op));
976 if (rinfo->head->is_dentry) {
977 rinfo->head->is_dentry = 0;
978 err = fill_inode(req->r_locked_dir,
979 &rinfo->diri, rinfo->dirfrag,
980 session, req->r_request_started, -1);
981 }
982 if (rinfo->head->is_target) {
983 rinfo->head->is_target = 0;
984 ininfo = rinfo->targeti.in;
985 vino.ino = le64_to_cpu(ininfo->ino);
986 vino.snap = le64_to_cpu(ininfo->snapid);
987 in = ceph_get_inode(sb, vino);
988 err = fill_inode(in, &rinfo->targeti, NULL,
989 session, req->r_request_started,
990 req->r_fmode);
991 iput(in);
992 }
993 }
994#endif
995
996 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
997 dout("fill_trace reply is empty!\n");
167c9e35
SW
998 if (rinfo->head->result == 0 && req->r_locked_dir)
999 ceph_invalidate_dir_request(req);
355da1eb
SW
1000 return 0;
1001 }
1002
1003 if (rinfo->head->is_dentry) {
5b1daecd
SW
1004 struct inode *dir = req->r_locked_dir;
1005
6c5e50fa
SW
1006 if (dir) {
1007 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
1008 session, req->r_request_started, -1,
1009 &req->r_caps_reservation);
1010 if (err < 0)
1011 return err;
1012 } else {
1013 WARN_ON_ONCE(1);
1014 }
5b1daecd
SW
1015 }
1016
9358c6d4
SW
1017 /*
1018 * ignore null lease/binding on snapdir ENOENT, or else we
1019 * will have trouble splicing in the virtual snapdir later
1020 */
1021 if (rinfo->head->is_dentry && !req->r_aborted &&
6c5e50fa 1022 req->r_locked_dir &&
9358c6d4 1023 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
3d14c5d2 1024 fsc->mount_options->snapdir_name,
9358c6d4 1025 req->r_dentry->d_name.len))) {
355da1eb
SW
1026 /*
1027 * lookup link rename : null -> possibly existing inode
1028 * mknod symlink mkdir : null -> new inode
1029 * unlink : linked -> null
1030 */
1031 struct inode *dir = req->r_locked_dir;
1032 struct dentry *dn = req->r_dentry;
1033 bool have_dir_cap, have_lease;
1034
1035 BUG_ON(!dn);
1036 BUG_ON(!dir);
1037 BUG_ON(dn->d_parent->d_inode != dir);
1038 BUG_ON(ceph_ino(dir) !=
1039 le64_to_cpu(rinfo->diri.in->ino));
1040 BUG_ON(ceph_snap(dir) !=
1041 le64_to_cpu(rinfo->diri.in->snapid));
1042
355da1eb
SW
1043 /* do we have a lease on the whole dir? */
1044 have_dir_cap =
1045 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1046 CEPH_CAP_FILE_SHARED);
1047
1048 /* do we have a dn lease? */
1049 have_lease = have_dir_cap ||
2f90b852 1050 le32_to_cpu(rinfo->dlease->duration_ms);
355da1eb
SW
1051 if (!have_lease)
1052 dout("fill_trace no dentry lease or dir cap\n");
1053
1054 /* rename? */
1055 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1056 dout(" src %p '%.*s' dst %p '%.*s'\n",
1057 req->r_old_dentry,
1058 req->r_old_dentry->d_name.len,
1059 req->r_old_dentry->d_name.name,
1060 dn, dn->d_name.len, dn->d_name.name);
1061 dout("fill_trace doing d_move %p -> %p\n",
1062 req->r_old_dentry, dn);
c10f5e12 1063
355da1eb
SW
1064 d_move(req->r_old_dentry, dn);
1065 dout(" src %p '%.*s' dst %p '%.*s'\n",
1066 req->r_old_dentry,
1067 req->r_old_dentry->d_name.len,
1068 req->r_old_dentry->d_name.name,
1069 dn, dn->d_name.len, dn->d_name.name);
81a6cf2d 1070
c4a29f26
SW
1071 /* ensure target dentry is invalidated, despite
1072 rehashing bug in vfs_rename_dir */
81a6cf2d
SW
1073 ceph_invalidate_dentry_lease(dn);
1074
09adc80c
SW
1075 /*
1076 * d_move() puts the renamed dentry at the end of
1077 * d_subdirs. We need to assign it an appropriate
2f276c51
YZ
1078 * directory offset so we can behave when dir is
1079 * complete.
09adc80c
SW
1080 */
1081 ceph_set_dentry_offset(req->r_old_dentry);
1082 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1cd3935b 1083 ceph_dentry(req->r_old_dentry)->offset);
81a6cf2d 1084
355da1eb
SW
1085 dn = req->r_old_dentry; /* use old_dentry */
1086 in = dn->d_inode;
1087 }
1088
1089 /* null dentry? */
1090 if (!rinfo->head->is_target) {
1091 dout("fill_trace null dentry\n");
1092 if (dn->d_inode) {
1093 dout("d_delete %p\n", dn);
1094 d_delete(dn);
1095 } else {
1096 dout("d_instantiate %p NULL\n", dn);
1097 d_instantiate(dn, NULL);
1098 if (have_lease && d_unhashed(dn))
1099 d_rehash(dn);
1100 update_dentry_lease(dn, rinfo->dlease,
1101 session,
1102 req->r_request_started);
1103 }
1104 goto done;
1105 }
1106
1107 /* attach proper inode */
1108 ininfo = rinfo->targeti.in;
1109 vino.ino = le64_to_cpu(ininfo->ino);
1110 vino.snap = le64_to_cpu(ininfo->snapid);
d8b16b3d
SW
1111 in = dn->d_inode;
1112 if (!in) {
355da1eb
SW
1113 in = ceph_get_inode(sb, vino);
1114 if (IS_ERR(in)) {
1115 pr_err("fill_trace bad get_inode "
1116 "%llx.%llx\n", vino.ino, vino.snap);
1117 err = PTR_ERR(in);
2744c171 1118 d_drop(dn);
355da1eb
SW
1119 goto done;
1120 }
467c5251 1121 dn = splice_dentry(dn, in, &have_lease, true);
355da1eb
SW
1122 if (IS_ERR(dn)) {
1123 err = PTR_ERR(dn);
1124 goto done;
1125 }
1126 req->r_dentry = dn; /* may have spliced */
70b666c3 1127 ihold(in);
355da1eb
SW
1128 } else if (ceph_ino(in) == vino.ino &&
1129 ceph_snap(in) == vino.snap) {
70b666c3 1130 ihold(in);
355da1eb
SW
1131 } else {
1132 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1133 dn, in, ceph_ino(in), ceph_snap(in),
1134 vino.ino, vino.snap);
1135 have_lease = false;
1136 in = NULL;
1137 }
1138
1139 if (have_lease)
1140 update_dentry_lease(dn, rinfo->dlease, session,
1141 req->r_request_started);
1142 dout(" final dn %p\n", dn);
1143 i++;
79f9f99a
SW
1144 } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1145 req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) {
355da1eb
SW
1146 struct dentry *dn = req->r_dentry;
1147
1148 /* fill out a snapdir LOOKUPSNAP dentry */
1149 BUG_ON(!dn);
1150 BUG_ON(!req->r_locked_dir);
1151 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1152 ininfo = rinfo->targeti.in;
1153 vino.ino = le64_to_cpu(ininfo->ino);
1154 vino.snap = le64_to_cpu(ininfo->snapid);
1155 in = ceph_get_inode(sb, vino);
1156 if (IS_ERR(in)) {
1157 pr_err("fill_inode get_inode badness %llx.%llx\n",
1158 vino.ino, vino.snap);
1159 err = PTR_ERR(in);
1160 d_delete(dn);
1161 goto done;
1162 }
1163 dout(" linking snapped dir %p to dn %p\n", in, dn);
467c5251 1164 dn = splice_dentry(dn, in, NULL, true);
355da1eb
SW
1165 if (IS_ERR(dn)) {
1166 err = PTR_ERR(dn);
1167 goto done;
1168 }
1169 req->r_dentry = dn; /* may have spliced */
70b666c3 1170 ihold(in);
355da1eb
SW
1171 rinfo->head->is_dentry = 1; /* fool notrace handlers */
1172 }
1173
1174 if (rinfo->head->is_target) {
1175 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1176 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1177
1178 if (in == NULL || ceph_ino(in) != vino.ino ||
1179 ceph_snap(in) != vino.snap) {
1180 in = ceph_get_inode(sb, vino);
1181 if (IS_ERR(in)) {
1182 err = PTR_ERR(in);
1183 goto done;
1184 }
1185 }
1186 req->r_target_inode = in;
1187
1188 err = fill_inode(in,
1189 &rinfo->targeti, NULL,
1190 session, req->r_request_started,
1191 (le32_to_cpu(rinfo->head->result) == 0) ?
1192 req->r_fmode : -1,
1193 &req->r_caps_reservation);
1194 if (err < 0) {
1195 pr_err("fill_inode badness %p %llx.%llx\n",
1196 in, ceph_vinop(in));
1197 goto done;
1198 }
1199 }
1200
1201done:
1202 dout("fill_trace done err=%d\n", err);
1203 return err;
1204}
1205
1206/*
1207 * Prepopulate our cache with readdir results, leases, etc.
1208 */
79f9f99a
SW
1209static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1210 struct ceph_mds_session *session)
1211{
1212 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1213 int i, err = 0;
1214
1215 for (i = 0; i < rinfo->dir_nr; i++) {
1216 struct ceph_vino vino;
1217 struct inode *in;
1218 int rc;
1219
1220 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1221 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1222
1223 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1224 if (IS_ERR(in)) {
1225 err = PTR_ERR(in);
1226 dout("new_inode badness got %d\n", err);
1227 continue;
1228 }
1229 rc = fill_inode(in, &rinfo->dir_in[i], NULL, session,
1230 req->r_request_started, -1,
1231 &req->r_caps_reservation);
1232 if (rc < 0) {
1233 pr_err("fill_inode badness on %p got %d\n", in, rc);
1234 err = rc;
1235 continue;
1236 }
1237 }
1238
1239 return err;
1240}
1241
355da1eb
SW
1242int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1243 struct ceph_mds_session *session)
1244{
1245 struct dentry *parent = req->r_dentry;
1246 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1247 struct qstr dname;
1248 struct dentry *dn;
1249 struct inode *in;
1250 int err = 0, i;
1251 struct inode *snapdir = NULL;
1252 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1253 u64 frag = le32_to_cpu(rhead->args.readdir.frag);
1254 struct ceph_dentry_info *di;
1255
79f9f99a
SW
1256 if (req->r_aborted)
1257 return readdir_prepopulate_inodes_only(req, session);
355da1eb
SW
1258
1259 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1260 snapdir = ceph_get_snapdir(parent->d_inode);
1261 parent = d_find_alias(snapdir);
1262 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1263 rinfo->dir_nr, parent);
1264 } else {
1265 dout("readdir_prepopulate %d items under dn %p\n",
1266 rinfo->dir_nr, parent);
1267 if (rinfo->dir_dir)
1268 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1269 }
1270
1271 for (i = 0; i < rinfo->dir_nr; i++) {
1272 struct ceph_vino vino;
1273
1274 dname.name = rinfo->dir_dname[i];
1275 dname.len = rinfo->dir_dname_len[i];
1276 dname.hash = full_name_hash(dname.name, dname.len);
1277
1278 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1279 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1280
1281retry_lookup:
1282 dn = d_lookup(parent, &dname);
1283 dout("d_lookup on parent=%p name=%.*s got %p\n",
1284 parent, dname.len, dname.name, dn);
1285
1286 if (!dn) {
1287 dn = d_alloc(parent, &dname);
1288 dout("d_alloc %p '%.*s' = %p\n", parent,
1289 dname.len, dname.name, dn);
1290 if (dn == NULL) {
1291 dout("d_alloc badness\n");
1292 err = -ENOMEM;
1293 goto out;
1294 }
1295 err = ceph_init_dentry(dn);
8c696737
SW
1296 if (err < 0) {
1297 dput(dn);
355da1eb 1298 goto out;
8c696737 1299 }
355da1eb
SW
1300 } else if (dn->d_inode &&
1301 (ceph_ino(dn->d_inode) != vino.ino ||
1302 ceph_snap(dn->d_inode) != vino.snap)) {
1303 dout(" dn %p points to wrong inode %p\n",
1304 dn, dn->d_inode);
1305 d_delete(dn);
1306 dput(dn);
1307 goto retry_lookup;
1308 } else {
1309 /* reorder parent's d_subdirs */
2fd6b7f5
NP
1310 spin_lock(&parent->d_lock);
1311 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
355da1eb
SW
1312 list_move(&dn->d_u.d_child, &parent->d_subdirs);
1313 spin_unlock(&dn->d_lock);
2fd6b7f5 1314 spin_unlock(&parent->d_lock);
355da1eb
SW
1315 }
1316
1317 di = dn->d_fsdata;
1318 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1319
1320 /* inode */
1321 if (dn->d_inode) {
1322 in = dn->d_inode;
1323 } else {
1324 in = ceph_get_inode(parent->d_sb, vino);
ac1f12ef 1325 if (IS_ERR(in)) {
355da1eb 1326 dout("new_inode badness\n");
2744c171 1327 d_drop(dn);
355da1eb 1328 dput(dn);
ac1f12ef 1329 err = PTR_ERR(in);
355da1eb
SW
1330 goto out;
1331 }
467c5251 1332 dn = splice_dentry(dn, in, NULL, false);
d69ed05a
SW
1333 if (IS_ERR(dn))
1334 dn = NULL;
355da1eb
SW
1335 }
1336
1337 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1338 req->r_request_started, -1,
1339 &req->r_caps_reservation) < 0) {
1340 pr_err("fill_inode badness on %p\n", in);
d69ed05a 1341 goto next_item;
355da1eb 1342 }
d69ed05a
SW
1343 if (dn)
1344 update_dentry_lease(dn, rinfo->dir_dlease[i],
1345 req->r_session,
1346 req->r_request_started);
1347next_item:
1348 if (dn)
1349 dput(dn);
355da1eb
SW
1350 }
1351 req->r_did_prepopulate = true;
1352
1353out:
1354 if (snapdir) {
1355 iput(snapdir);
1356 dput(parent);
1357 }
1358 dout("readdir_prepopulate done\n");
1359 return err;
1360}
1361
1362int ceph_inode_set_size(struct inode *inode, loff_t size)
1363{
1364 struct ceph_inode_info *ci = ceph_inode(inode);
1365 int ret = 0;
1366
be655596 1367 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1368 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1369 inode->i_size = size;
1370 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1371
1372 /* tell the MDS if we are approaching max_size */
1373 if ((size << 1) >= ci->i_max_size &&
1374 (ci->i_reported_size << 1) < ci->i_max_size)
1375 ret = 1;
1376
be655596 1377 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1378 return ret;
1379}
1380
1381/*
1382 * Write back inode data in a worker thread. (This can't be done
1383 * in the message handler context.)
1384 */
3c6f6b79
SW
1385void ceph_queue_writeback(struct inode *inode)
1386{
15a2015f 1387 ihold(inode);
3c6f6b79
SW
1388 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1389 &ceph_inode(inode)->i_wb_work)) {
2c27c9a5 1390 dout("ceph_queue_writeback %p\n", inode);
3c6f6b79 1391 } else {
2c27c9a5 1392 dout("ceph_queue_writeback %p failed\n", inode);
15a2015f 1393 iput(inode);
3c6f6b79
SW
1394 }
1395}
1396
1397static void ceph_writeback_work(struct work_struct *work)
355da1eb
SW
1398{
1399 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1400 i_wb_work);
1401 struct inode *inode = &ci->vfs_inode;
1402
1403 dout("writeback %p\n", inode);
1404 filemap_fdatawrite(&inode->i_data);
1405 iput(inode);
1406}
1407
3c6f6b79
SW
1408/*
1409 * queue an async invalidation
1410 */
1411void ceph_queue_invalidate(struct inode *inode)
1412{
15a2015f 1413 ihold(inode);
3c6f6b79
SW
1414 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1415 &ceph_inode(inode)->i_pg_inv_work)) {
1416 dout("ceph_queue_invalidate %p\n", inode);
3c6f6b79
SW
1417 } else {
1418 dout("ceph_queue_invalidate %p failed\n", inode);
15a2015f 1419 iput(inode);
3c6f6b79
SW
1420 }
1421}
1422
355da1eb
SW
1423/*
1424 * Invalidate inode pages in a worker thread. (This can't be done
1425 * in the message handler context.)
1426 */
3c6f6b79 1427static void ceph_invalidate_work(struct work_struct *work)
355da1eb
SW
1428{
1429 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1430 i_pg_inv_work);
1431 struct inode *inode = &ci->vfs_inode;
1432 u32 orig_gen;
1433 int check = 0;
1434
b0d7c223 1435 mutex_lock(&ci->i_truncate_mutex);
be655596 1436 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1437 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1438 ci->i_rdcache_gen, ci->i_rdcache_revoking);
cd045cb4 1439 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
355da1eb 1440 /* nevermind! */
be655596 1441 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1442 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1443 goto out;
1444 }
1445 orig_gen = ci->i_rdcache_gen;
be655596 1446 spin_unlock(&ci->i_ceph_lock);
355da1eb 1447
b0d7c223 1448 truncate_inode_pages(inode->i_mapping, 0);
355da1eb 1449
be655596 1450 spin_lock(&ci->i_ceph_lock);
cd045cb4
SW
1451 if (orig_gen == ci->i_rdcache_gen &&
1452 orig_gen == ci->i_rdcache_revoking) {
355da1eb
SW
1453 dout("invalidate_pages %p gen %d successful\n", inode,
1454 ci->i_rdcache_gen);
cd045cb4 1455 ci->i_rdcache_revoking--;
355da1eb
SW
1456 check = 1;
1457 } else {
cd045cb4
SW
1458 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1459 inode, orig_gen, ci->i_rdcache_gen,
1460 ci->i_rdcache_revoking);
355da1eb 1461 }
be655596 1462 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1463 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1464
1465 if (check)
1466 ceph_check_caps(ci, 0, NULL);
1467out:
1468 iput(inode);
1469}
1470
1471
1472/*
3f99969f 1473 * called by trunc_wq;
355da1eb
SW
1474 *
1475 * We also truncate in a separate thread as well.
1476 */
3c6f6b79 1477static void ceph_vmtruncate_work(struct work_struct *work)
355da1eb
SW
1478{
1479 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1480 i_vmtruncate_work);
1481 struct inode *inode = &ci->vfs_inode;
1482
1483 dout("vmtruncate_work %p\n", inode);
b415bf4f 1484 __ceph_do_pending_vmtruncate(inode);
355da1eb
SW
1485 iput(inode);
1486}
1487
3c6f6b79
SW
1488/*
1489 * Queue an async vmtruncate. If we fail to queue work, we will handle
1490 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1491 */
1492void ceph_queue_vmtruncate(struct inode *inode)
1493{
1494 struct ceph_inode_info *ci = ceph_inode(inode);
1495
15a2015f 1496 ihold(inode);
640ef79d 1497 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
3c6f6b79
SW
1498 &ci->i_vmtruncate_work)) {
1499 dout("ceph_queue_vmtruncate %p\n", inode);
3c6f6b79
SW
1500 } else {
1501 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1502 inode, ci->i_truncate_pending);
15a2015f 1503 iput(inode);
3c6f6b79
SW
1504 }
1505}
1506
355da1eb 1507/*
355da1eb
SW
1508 * Make sure any pending truncation is applied before doing anything
1509 * that may depend on it.
1510 */
b415bf4f 1511void __ceph_do_pending_vmtruncate(struct inode *inode)
355da1eb
SW
1512{
1513 struct ceph_inode_info *ci = ceph_inode(inode);
1514 u64 to;
a85f50b6 1515 int wrbuffer_refs, finish = 0;
355da1eb 1516
b0d7c223 1517 mutex_lock(&ci->i_truncate_mutex);
355da1eb 1518retry:
be655596 1519 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1520 if (ci->i_truncate_pending == 0) {
1521 dout("__do_pending_vmtruncate %p none pending\n", inode);
be655596 1522 spin_unlock(&ci->i_ceph_lock);
b0d7c223 1523 mutex_unlock(&ci->i_truncate_mutex);
355da1eb
SW
1524 return;
1525 }
1526
1527 /*
1528 * make sure any dirty snapped pages are flushed before we
1529 * possibly truncate them.. so write AND block!
1530 */
1531 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1532 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1533 inode);
be655596 1534 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1535 filemap_write_and_wait_range(&inode->i_data, 0,
1536 inode->i_sb->s_maxbytes);
1537 goto retry;
1538 }
1539
b0d7c223
YZ
1540 /* there should be no reader or writer */
1541 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1542
355da1eb
SW
1543 to = ci->i_truncate_size;
1544 wrbuffer_refs = ci->i_wrbuffer_ref;
1545 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1546 ci->i_truncate_pending, to);
be655596 1547 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1548
1549 truncate_inode_pages(inode->i_mapping, to);
1550
be655596 1551 spin_lock(&ci->i_ceph_lock);
a85f50b6
YZ
1552 if (to == ci->i_truncate_size) {
1553 ci->i_truncate_pending = 0;
1554 finish = 1;
1555 }
be655596 1556 spin_unlock(&ci->i_ceph_lock);
a85f50b6
YZ
1557 if (!finish)
1558 goto retry;
355da1eb 1559
b0d7c223
YZ
1560 mutex_unlock(&ci->i_truncate_mutex);
1561
355da1eb
SW
1562 if (wrbuffer_refs == 0)
1563 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
a85f50b6
YZ
1564
1565 wake_up_all(&ci->i_cap_wq);
355da1eb
SW
1566}
1567
1568
1569/*
1570 * symlinks
1571 */
1572static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1573{
1574 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1575 nd_set_link(nd, ci->i_symlink);
1576 return NULL;
1577}
1578
1579static const struct inode_operations ceph_symlink_iops = {
1580 .readlink = generic_readlink,
1581 .follow_link = ceph_sym_follow_link,
0b932672
YZ
1582 .setattr = ceph_setattr,
1583 .getattr = ceph_getattr,
1584 .setxattr = ceph_setxattr,
1585 .getxattr = ceph_getxattr,
1586 .listxattr = ceph_listxattr,
1587 .removexattr = ceph_removexattr,
355da1eb
SW
1588};
1589
1590/*
1591 * setattr
1592 */
1593int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1594{
1595 struct inode *inode = dentry->d_inode;
1596 struct ceph_inode_info *ci = ceph_inode(inode);
5f21c96d 1597 struct inode *parent_inode;
355da1eb
SW
1598 const unsigned int ia_valid = attr->ia_valid;
1599 struct ceph_mds_request *req;
3d14c5d2 1600 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
355da1eb
SW
1601 int issued;
1602 int release = 0, dirtied = 0;
1603 int mask = 0;
1604 int err = 0;
fca65b4a 1605 int inode_dirty_flags = 0;
355da1eb
SW
1606
1607 if (ceph_snap(inode) != CEPH_NOSNAP)
1608 return -EROFS;
1609
355da1eb
SW
1610 err = inode_change_ok(inode, attr);
1611 if (err != 0)
1612 return err;
1613
1614 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1615 USE_AUTH_MDS);
1616 if (IS_ERR(req))
1617 return PTR_ERR(req);
1618
be655596 1619 spin_lock(&ci->i_ceph_lock);
355da1eb
SW
1620 issued = __ceph_caps_issued(ci, NULL);
1621 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1622
1623 if (ia_valid & ATTR_UID) {
1624 dout("setattr %p uid %d -> %d\n", inode,
bd2bae6a
EB
1625 from_kuid(&init_user_ns, inode->i_uid),
1626 from_kuid(&init_user_ns, attr->ia_uid));
355da1eb
SW
1627 if (issued & CEPH_CAP_AUTH_EXCL) {
1628 inode->i_uid = attr->ia_uid;
1629 dirtied |= CEPH_CAP_AUTH_EXCL;
1630 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
ab871b90
EB
1631 !uid_eq(attr->ia_uid, inode->i_uid)) {
1632 req->r_args.setattr.uid = cpu_to_le32(
1633 from_kuid(&init_user_ns, attr->ia_uid));
355da1eb
SW
1634 mask |= CEPH_SETATTR_UID;
1635 release |= CEPH_CAP_AUTH_SHARED;
1636 }
1637 }
1638 if (ia_valid & ATTR_GID) {
1639 dout("setattr %p gid %d -> %d\n", inode,
bd2bae6a
EB
1640 from_kgid(&init_user_ns, inode->i_gid),
1641 from_kgid(&init_user_ns, attr->ia_gid));
355da1eb
SW
1642 if (issued & CEPH_CAP_AUTH_EXCL) {
1643 inode->i_gid = attr->ia_gid;
1644 dirtied |= CEPH_CAP_AUTH_EXCL;
1645 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
ab871b90
EB
1646 !gid_eq(attr->ia_gid, inode->i_gid)) {
1647 req->r_args.setattr.gid = cpu_to_le32(
1648 from_kgid(&init_user_ns, attr->ia_gid));
355da1eb
SW
1649 mask |= CEPH_SETATTR_GID;
1650 release |= CEPH_CAP_AUTH_SHARED;
1651 }
1652 }
1653 if (ia_valid & ATTR_MODE) {
1654 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1655 attr->ia_mode);
1656 if (issued & CEPH_CAP_AUTH_EXCL) {
1657 inode->i_mode = attr->ia_mode;
1658 dirtied |= CEPH_CAP_AUTH_EXCL;
1659 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1660 attr->ia_mode != inode->i_mode) {
1661 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1662 mask |= CEPH_SETATTR_MODE;
1663 release |= CEPH_CAP_AUTH_SHARED;
1664 }
1665 }
1666
1667 if (ia_valid & ATTR_ATIME) {
1668 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1669 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1670 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1671 if (issued & CEPH_CAP_FILE_EXCL) {
1672 ci->i_time_warp_seq++;
1673 inode->i_atime = attr->ia_atime;
1674 dirtied |= CEPH_CAP_FILE_EXCL;
1675 } else if ((issued & CEPH_CAP_FILE_WR) &&
1676 timespec_compare(&inode->i_atime,
1677 &attr->ia_atime) < 0) {
1678 inode->i_atime = attr->ia_atime;
1679 dirtied |= CEPH_CAP_FILE_WR;
1680 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1681 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1682 ceph_encode_timespec(&req->r_args.setattr.atime,
1683 &attr->ia_atime);
1684 mask |= CEPH_SETATTR_ATIME;
1685 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1686 CEPH_CAP_FILE_WR;
1687 }
1688 }
1689 if (ia_valid & ATTR_MTIME) {
1690 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1691 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1692 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1693 if (issued & CEPH_CAP_FILE_EXCL) {
1694 ci->i_time_warp_seq++;
1695 inode->i_mtime = attr->ia_mtime;
1696 dirtied |= CEPH_CAP_FILE_EXCL;
1697 } else if ((issued & CEPH_CAP_FILE_WR) &&
1698 timespec_compare(&inode->i_mtime,
1699 &attr->ia_mtime) < 0) {
1700 inode->i_mtime = attr->ia_mtime;
1701 dirtied |= CEPH_CAP_FILE_WR;
1702 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1703 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1704 ceph_encode_timespec(&req->r_args.setattr.mtime,
1705 &attr->ia_mtime);
1706 mask |= CEPH_SETATTR_MTIME;
1707 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1708 CEPH_CAP_FILE_WR;
1709 }
1710 }
1711 if (ia_valid & ATTR_SIZE) {
1712 dout("setattr %p size %lld -> %lld\n", inode,
1713 inode->i_size, attr->ia_size);
1714 if (attr->ia_size > inode->i_sb->s_maxbytes) {
1715 err = -EINVAL;
1716 goto out;
1717 }
1718 if ((issued & CEPH_CAP_FILE_EXCL) &&
1719 attr->ia_size > inode->i_size) {
1720 inode->i_size = attr->ia_size;
355da1eb
SW
1721 inode->i_blocks =
1722 (attr->ia_size + (1 << 9) - 1) >> 9;
1723 inode->i_ctime = attr->ia_ctime;
1724 ci->i_reported_size = attr->ia_size;
1725 dirtied |= CEPH_CAP_FILE_EXCL;
1726 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1727 attr->ia_size != inode->i_size) {
1728 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1729 req->r_args.setattr.old_size =
1730 cpu_to_le64(inode->i_size);
1731 mask |= CEPH_SETATTR_SIZE;
1732 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1733 CEPH_CAP_FILE_WR;
1734 }
1735 }
1736
1737 /* these do nothing */
1738 if (ia_valid & ATTR_CTIME) {
1739 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1740 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1741 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1742 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1743 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1744 only ? "ctime only" : "ignored");
1745 inode->i_ctime = attr->ia_ctime;
1746 if (only) {
1747 /*
1748 * if kernel wants to dirty ctime but nothing else,
1749 * we need to choose a cap to dirty under, or do
1750 * a almost-no-op setattr
1751 */
1752 if (issued & CEPH_CAP_AUTH_EXCL)
1753 dirtied |= CEPH_CAP_AUTH_EXCL;
1754 else if (issued & CEPH_CAP_FILE_EXCL)
1755 dirtied |= CEPH_CAP_FILE_EXCL;
1756 else if (issued & CEPH_CAP_XATTR_EXCL)
1757 dirtied |= CEPH_CAP_XATTR_EXCL;
1758 else
1759 mask |= CEPH_SETATTR_CTIME;
1760 }
1761 }
1762 if (ia_valid & ATTR_FILE)
1763 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1764
1765 if (dirtied) {
fca65b4a 1766 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
355da1eb
SW
1767 inode->i_ctime = CURRENT_TIME;
1768 }
1769
1770 release &= issued;
be655596 1771 spin_unlock(&ci->i_ceph_lock);
355da1eb 1772
fca65b4a
SW
1773 if (inode_dirty_flags)
1774 __mark_inode_dirty(inode, inode_dirty_flags);
1775
355da1eb 1776 if (mask) {
70b666c3
SW
1777 req->r_inode = inode;
1778 ihold(inode);
355da1eb
SW
1779 req->r_inode_drop = release;
1780 req->r_args.setattr.mask = cpu_to_le32(mask);
1781 req->r_num_caps = 1;
5f21c96d 1782 parent_inode = ceph_get_dentry_parent_inode(dentry);
355da1eb 1783 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
5f21c96d 1784 iput(parent_inode);
355da1eb
SW
1785 }
1786 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1787 ceph_cap_string(dirtied), mask);
1788
1789 ceph_mdsc_put_request(req);
b0d7c223
YZ
1790 if (mask & CEPH_SETATTR_SIZE)
1791 __ceph_do_pending_vmtruncate(inode);
355da1eb
SW
1792 return err;
1793out:
be655596 1794 spin_unlock(&ci->i_ceph_lock);
355da1eb
SW
1795 ceph_mdsc_put_request(req);
1796 return err;
1797}
1798
1799/*
1800 * Verify that we have a lease on the given mask. If not,
1801 * do a getattr against an mds.
1802 */
1803int ceph_do_getattr(struct inode *inode, int mask)
1804{
3d14c5d2
YS
1805 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1806 struct ceph_mds_client *mdsc = fsc->mdsc;
355da1eb
SW
1807 struct ceph_mds_request *req;
1808 int err;
1809
1810 if (ceph_snap(inode) == CEPH_SNAPDIR) {
1811 dout("do_getattr inode %p SNAPDIR\n", inode);
1812 return 0;
1813 }
1814
b7495fc2 1815 dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
355da1eb
SW
1816 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1817 return 0;
1818
1819 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1820 if (IS_ERR(req))
1821 return PTR_ERR(req);
70b666c3
SW
1822 req->r_inode = inode;
1823 ihold(inode);
355da1eb
SW
1824 req->r_num_caps = 1;
1825 req->r_args.getattr.mask = cpu_to_le32(mask);
1826 err = ceph_mdsc_do_request(mdsc, NULL, req);
1827 ceph_mdsc_put_request(req);
1828 dout("do_getattr result=%d\n", err);
1829 return err;
1830}
1831
1832
1833/*
1834 * Check inode permissions. We verify we have a valid value for
1835 * the AUTH cap, then call the generic handler.
1836 */
10556cb2 1837int ceph_permission(struct inode *inode, int mask)
355da1eb 1838{
b74c79e9
NP
1839 int err;
1840
10556cb2 1841 if (mask & MAY_NOT_BLOCK)
b74c79e9
NP
1842 return -ECHILD;
1843
1844 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
355da1eb
SW
1845
1846 if (!err)
2830ba7f 1847 err = generic_permission(inode, mask);
355da1eb
SW
1848 return err;
1849}
1850
1851/*
1852 * Get all attributes. Hopefully somedata we'll have a statlite()
1853 * and can limit the fields we require to be accurate.
1854 */
1855int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1856 struct kstat *stat)
1857{
1858 struct inode *inode = dentry->d_inode;
232d4b01 1859 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
1860 int err;
1861
1862 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1863 if (!err) {
1864 generic_fillattr(inode, stat);
ad1fee96 1865 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
355da1eb
SW
1866 if (ceph_snap(inode) != CEPH_NOSNAP)
1867 stat->dev = ceph_snap(inode);
1868 else
1869 stat->dev = 0;
232d4b01 1870 if (S_ISDIR(inode->i_mode)) {
1c1266bb
YS
1871 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
1872 RBYTES))
1873 stat->size = ci->i_rbytes;
1874 else
1875 stat->size = ci->i_files + ci->i_subdirs;
232d4b01 1876 stat->blocks = 0;
355da1eb 1877 stat->blksize = 65536;
232d4b01 1878 }
355da1eb
SW
1879 }
1880 return err;
1881}