ceph: avoid d_parent in ceph_dentry_hash; fix ceph_encode_fh() hashing bug
[linux-2.6-block.git] / fs / ceph / inode.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
355da1eb
SW
2
3#include <linux/module.h>
4#include <linux/fs.h>
355da1eb
SW
5#include <linux/slab.h>
6#include <linux/string.h>
7#include <linux/uaccess.h>
8#include <linux/kernel.h>
9#include <linux/namei.h>
10#include <linux/writeback.h>
11#include <linux/vmalloc.h>
c9af9fb6 12#include <linux/pagevec.h>
355da1eb
SW
13
14#include "super.h"
3d14c5d2
YS
15#include "mds_client.h"
16#include <linux/ceph/decode.h>
355da1eb
SW
17
18/*
19 * Ceph inode operations
20 *
21 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
22 * setattr, etc.), xattr helpers, and helpers for assimilating
23 * metadata returned by the MDS into our cache.
24 *
25 * Also define helpers for doing asynchronous writeback, invalidation,
26 * and truncation for the benefit of those who can't afford to block
27 * (typically because they are in the message handler path).
28 */
29
30static const struct inode_operations ceph_symlink_iops;
31
3c6f6b79
SW
32static void ceph_invalidate_work(struct work_struct *work);
33static void ceph_writeback_work(struct work_struct *work);
34static void ceph_vmtruncate_work(struct work_struct *work);
355da1eb
SW
35
36/*
37 * find or create an inode, given the ceph ino number
38 */
ad1fee96
YS
39static int ceph_set_ino_cb(struct inode *inode, void *data)
40{
41 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
42 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
43 return 0;
44}
45
355da1eb
SW
46struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
47{
48 struct inode *inode;
49 ino_t t = ceph_vino_to_ino(vino);
50
51 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
52 if (inode == NULL)
53 return ERR_PTR(-ENOMEM);
54 if (inode->i_state & I_NEW) {
55 dout("get_inode created new inode %p %llx.%llx ino %llx\n",
56 inode, ceph_vinop(inode), (u64)inode->i_ino);
57 unlock_new_inode(inode);
58 }
59
60 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
61 vino.snap, inode);
62 return inode;
63}
64
65/*
66 * get/constuct snapdir inode for a given directory
67 */
68struct inode *ceph_get_snapdir(struct inode *parent)
69{
70 struct ceph_vino vino = {
71 .ino = ceph_ino(parent),
72 .snap = CEPH_SNAPDIR,
73 };
74 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
b377ff13 75 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
76
77 BUG_ON(!S_ISDIR(parent->i_mode));
78 if (IS_ERR(inode))
7e34bc52 79 return inode;
355da1eb
SW
80 inode->i_mode = parent->i_mode;
81 inode->i_uid = parent->i_uid;
82 inode->i_gid = parent->i_gid;
83 inode->i_op = &ceph_dir_iops;
84 inode->i_fop = &ceph_dir_fops;
b377ff13
SW
85 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
86 ci->i_rbytes = 0;
355da1eb
SW
87 return inode;
88}
89
90const struct inode_operations ceph_file_iops = {
91 .permission = ceph_permission,
92 .setattr = ceph_setattr,
93 .getattr = ceph_getattr,
94 .setxattr = ceph_setxattr,
95 .getxattr = ceph_getxattr,
96 .listxattr = ceph_listxattr,
97 .removexattr = ceph_removexattr,
98};
99
100
101/*
102 * We use a 'frag tree' to keep track of the MDS's directory fragments
103 * for a given inode (usually there is just a single fragment). We
104 * need to know when a child frag is delegated to a new MDS, or when
105 * it is flagged as replicated, so we can direct our requests
106 * accordingly.
107 */
108
109/*
110 * find/create a frag in the tree
111 */
112static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
113 u32 f)
114{
115 struct rb_node **p;
116 struct rb_node *parent = NULL;
117 struct ceph_inode_frag *frag;
118 int c;
119
120 p = &ci->i_fragtree.rb_node;
121 while (*p) {
122 parent = *p;
123 frag = rb_entry(parent, struct ceph_inode_frag, node);
124 c = ceph_frag_compare(f, frag->frag);
125 if (c < 0)
126 p = &(*p)->rb_left;
127 else if (c > 0)
128 p = &(*p)->rb_right;
129 else
130 return frag;
131 }
132
133 frag = kmalloc(sizeof(*frag), GFP_NOFS);
134 if (!frag) {
135 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
136 "frag %x\n", &ci->vfs_inode,
137 ceph_vinop(&ci->vfs_inode), f);
138 return ERR_PTR(-ENOMEM);
139 }
140 frag->frag = f;
141 frag->split_by = 0;
142 frag->mds = -1;
143 frag->ndist = 0;
144
145 rb_link_node(&frag->node, parent, p);
146 rb_insert_color(&frag->node, &ci->i_fragtree);
147
148 dout("get_or_create_frag added %llx.%llx frag %x\n",
149 ceph_vinop(&ci->vfs_inode), f);
150 return frag;
151}
152
153/*
154 * find a specific frag @f
155 */
156struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
157{
158 struct rb_node *n = ci->i_fragtree.rb_node;
159
160 while (n) {
161 struct ceph_inode_frag *frag =
162 rb_entry(n, struct ceph_inode_frag, node);
163 int c = ceph_frag_compare(f, frag->frag);
164 if (c < 0)
165 n = n->rb_left;
166 else if (c > 0)
167 n = n->rb_right;
168 else
169 return frag;
170 }
171 return NULL;
172}
173
174/*
175 * Choose frag containing the given value @v. If @pfrag is
176 * specified, copy the frag delegation info to the caller if
177 * it is present.
178 */
179u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
180 struct ceph_inode_frag *pfrag,
181 int *found)
182{
183 u32 t = ceph_frag_make(0, 0);
184 struct ceph_inode_frag *frag;
185 unsigned nway, i;
186 u32 n;
187
188 if (found)
189 *found = 0;
190
191 mutex_lock(&ci->i_fragtree_mutex);
192 while (1) {
193 WARN_ON(!ceph_frag_contains_value(t, v));
194 frag = __ceph_find_frag(ci, t);
195 if (!frag)
196 break; /* t is a leaf */
197 if (frag->split_by == 0) {
198 if (pfrag)
199 memcpy(pfrag, frag, sizeof(*pfrag));
200 if (found)
201 *found = 1;
202 break;
203 }
204
205 /* choose child */
206 nway = 1 << frag->split_by;
207 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
208 frag->split_by, nway);
209 for (i = 0; i < nway; i++) {
210 n = ceph_frag_make_child(t, frag->split_by, i);
211 if (ceph_frag_contains_value(n, v)) {
212 t = n;
213 break;
214 }
215 }
216 BUG_ON(i == nway);
217 }
218 dout("choose_frag(%x) = %x\n", v, t);
219
220 mutex_unlock(&ci->i_fragtree_mutex);
221 return t;
222}
223
224/*
225 * Process dirfrag (delegation) info from the mds. Include leaf
226 * fragment in tree ONLY if ndist > 0. Otherwise, only
227 * branches/splits are included in i_fragtree)
228 */
229static int ceph_fill_dirfrag(struct inode *inode,
230 struct ceph_mds_reply_dirfrag *dirinfo)
231{
232 struct ceph_inode_info *ci = ceph_inode(inode);
233 struct ceph_inode_frag *frag;
234 u32 id = le32_to_cpu(dirinfo->frag);
235 int mds = le32_to_cpu(dirinfo->auth);
236 int ndist = le32_to_cpu(dirinfo->ndist);
237 int i;
238 int err = 0;
239
240 mutex_lock(&ci->i_fragtree_mutex);
241 if (ndist == 0) {
242 /* no delegation info needed. */
243 frag = __ceph_find_frag(ci, id);
244 if (!frag)
245 goto out;
246 if (frag->split_by == 0) {
247 /* tree leaf, remove */
248 dout("fill_dirfrag removed %llx.%llx frag %x"
249 " (no ref)\n", ceph_vinop(inode), id);
250 rb_erase(&frag->node, &ci->i_fragtree);
251 kfree(frag);
252 } else {
253 /* tree branch, keep and clear */
254 dout("fill_dirfrag cleared %llx.%llx frag %x"
255 " referral\n", ceph_vinop(inode), id);
256 frag->mds = -1;
257 frag->ndist = 0;
258 }
259 goto out;
260 }
261
262
263 /* find/add this frag to store mds delegation info */
264 frag = __get_or_create_frag(ci, id);
265 if (IS_ERR(frag)) {
266 /* this is not the end of the world; we can continue
267 with bad/inaccurate delegation info */
268 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
269 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
270 err = -ENOMEM;
271 goto out;
272 }
273
274 frag->mds = mds;
275 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
276 for (i = 0; i < frag->ndist; i++)
277 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
278 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
279 ceph_vinop(inode), frag->frag, frag->ndist);
280
281out:
282 mutex_unlock(&ci->i_fragtree_mutex);
283 return err;
284}
285
286
287/*
288 * initialize a newly allocated inode.
289 */
290struct inode *ceph_alloc_inode(struct super_block *sb)
291{
292 struct ceph_inode_info *ci;
293 int i;
294
295 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
296 if (!ci)
297 return NULL;
298
299 dout("alloc_inode %p\n", &ci->vfs_inode);
300
301 ci->i_version = 0;
302 ci->i_time_warp_seq = 0;
303 ci->i_ceph_flags = 0;
304 ci->i_release_count = 0;
305 ci->i_symlink = NULL;
306
6c0f3af7
SW
307 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
308
355da1eb
SW
309 ci->i_fragtree = RB_ROOT;
310 mutex_init(&ci->i_fragtree_mutex);
311
312 ci->i_xattrs.blob = NULL;
313 ci->i_xattrs.prealloc_blob = NULL;
314 ci->i_xattrs.dirty = false;
315 ci->i_xattrs.index = RB_ROOT;
316 ci->i_xattrs.count = 0;
317 ci->i_xattrs.names_size = 0;
318 ci->i_xattrs.vals_size = 0;
319 ci->i_xattrs.version = 0;
320 ci->i_xattrs.index_version = 0;
321
322 ci->i_caps = RB_ROOT;
323 ci->i_auth_cap = NULL;
324 ci->i_dirty_caps = 0;
325 ci->i_flushing_caps = 0;
326 INIT_LIST_HEAD(&ci->i_dirty_item);
327 INIT_LIST_HEAD(&ci->i_flushing_item);
328 ci->i_cap_flush_seq = 0;
329 ci->i_cap_flush_last_tid = 0;
330 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
331 init_waitqueue_head(&ci->i_cap_wq);
332 ci->i_hold_caps_min = 0;
333 ci->i_hold_caps_max = 0;
334 INIT_LIST_HEAD(&ci->i_cap_delay_list);
335 ci->i_cap_exporting_mds = 0;
336 ci->i_cap_exporting_mseq = 0;
337 ci->i_cap_exporting_issued = 0;
338 INIT_LIST_HEAD(&ci->i_cap_snaps);
339 ci->i_head_snapc = NULL;
340 ci->i_snap_caps = 0;
341
342 for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
343 ci->i_nr_by_mode[i] = 0;
344
345 ci->i_truncate_seq = 0;
346 ci->i_truncate_size = 0;
347 ci->i_truncate_pending = 0;
348
349 ci->i_max_size = 0;
350 ci->i_reported_size = 0;
351 ci->i_wanted_max_size = 0;
352 ci->i_requested_max_size = 0;
353
354 ci->i_pin_ref = 0;
355 ci->i_rd_ref = 0;
356 ci->i_rdcache_ref = 0;
357 ci->i_wr_ref = 0;
d3d0720d 358 ci->i_wb_ref = 0;
355da1eb
SW
359 ci->i_wrbuffer_ref = 0;
360 ci->i_wrbuffer_ref_head = 0;
361 ci->i_shared_gen = 0;
362 ci->i_rdcache_gen = 0;
363 ci->i_rdcache_revoking = 0;
364
365 INIT_LIST_HEAD(&ci->i_unsafe_writes);
366 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
367 spin_lock_init(&ci->i_unsafe_lock);
368
369 ci->i_snap_realm = NULL;
370 INIT_LIST_HEAD(&ci->i_snap_realm_item);
371 INIT_LIST_HEAD(&ci->i_snap_flush_item);
372
3c6f6b79
SW
373 INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
374 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
355da1eb
SW
375
376 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
377
378 return &ci->vfs_inode;
379}
380
fa0d7e3d
NP
381static void ceph_i_callback(struct rcu_head *head)
382{
383 struct inode *inode = container_of(head, struct inode, i_rcu);
384 struct ceph_inode_info *ci = ceph_inode(inode);
385
386 INIT_LIST_HEAD(&inode->i_dentry);
387 kmem_cache_free(ceph_inode_cachep, ci);
388}
389
355da1eb
SW
390void ceph_destroy_inode(struct inode *inode)
391{
392 struct ceph_inode_info *ci = ceph_inode(inode);
393 struct ceph_inode_frag *frag;
394 struct rb_node *n;
395
396 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
397
398 ceph_queue_caps_release(inode);
399
8b218b8a
SW
400 /*
401 * we may still have a snap_realm reference if there are stray
402 * caps in i_cap_exporting_issued or i_snap_caps.
403 */
404 if (ci->i_snap_realm) {
405 struct ceph_mds_client *mdsc =
3d14c5d2 406 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
8b218b8a
SW
407 struct ceph_snap_realm *realm = ci->i_snap_realm;
408
409 dout(" dropping residual ref to snap realm %p\n", realm);
410 spin_lock(&realm->inodes_with_caps_lock);
411 list_del_init(&ci->i_snap_realm_item);
412 spin_unlock(&realm->inodes_with_caps_lock);
413 ceph_put_snap_realm(mdsc, realm);
414 }
415
355da1eb
SW
416 kfree(ci->i_symlink);
417 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
418 frag = rb_entry(n, struct ceph_inode_frag, node);
419 rb_erase(n, &ci->i_fragtree);
420 kfree(frag);
421 }
422
423 __ceph_destroy_xattrs(ci);
b6c1d5b8
SW
424 if (ci->i_xattrs.blob)
425 ceph_buffer_put(ci->i_xattrs.blob);
426 if (ci->i_xattrs.prealloc_blob)
427 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
355da1eb 428
fa0d7e3d 429 call_rcu(&inode->i_rcu, ceph_i_callback);
355da1eb
SW
430}
431
432
433/*
434 * Helpers to fill in size, ctime, mtime, and atime. We have to be
435 * careful because either the client or MDS may have more up to date
436 * info, depending on which capabilities are held, and whether
437 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
438 * and size are monotonically increasing, except when utimes() or
439 * truncate() increments the corresponding _seq values.)
440 */
441int ceph_fill_file_size(struct inode *inode, int issued,
442 u32 truncate_seq, u64 truncate_size, u64 size)
443{
444 struct ceph_inode_info *ci = ceph_inode(inode);
445 int queue_trunc = 0;
446
447 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
448 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
449 dout("size %lld -> %llu\n", inode->i_size, size);
450 inode->i_size = size;
451 inode->i_blocks = (size + (1<<9) - 1) >> 9;
452 ci->i_reported_size = size;
453 if (truncate_seq != ci->i_truncate_seq) {
454 dout("truncate_seq %u -> %u\n",
455 ci->i_truncate_seq, truncate_seq);
456 ci->i_truncate_seq = truncate_seq;
3d497d85
YS
457 /*
458 * If we hold relevant caps, or in the case where we're
459 * not the only client referencing this file and we
460 * don't hold those caps, then we need to check whether
461 * the file is either opened or mmaped
462 */
463 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
2962507c
SW
464 CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
465 CEPH_CAP_FILE_EXCL|
466 CEPH_CAP_FILE_LAZYIO)) ||
3d497d85
YS
467 mapping_mapped(inode->i_mapping) ||
468 __ceph_caps_file_wanted(ci)) {
355da1eb
SW
469 ci->i_truncate_pending++;
470 queue_trunc = 1;
471 }
472 }
473 }
474 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
475 ci->i_truncate_size != truncate_size) {
476 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
477 truncate_size);
478 ci->i_truncate_size = truncate_size;
479 }
480 return queue_trunc;
481}
482
483void ceph_fill_file_time(struct inode *inode, int issued,
484 u64 time_warp_seq, struct timespec *ctime,
485 struct timespec *mtime, struct timespec *atime)
486{
487 struct ceph_inode_info *ci = ceph_inode(inode);
488 int warn = 0;
489
490 if (issued & (CEPH_CAP_FILE_EXCL|
491 CEPH_CAP_FILE_WR|
d8672d64
SW
492 CEPH_CAP_FILE_BUFFER|
493 CEPH_CAP_AUTH_EXCL|
494 CEPH_CAP_XATTR_EXCL)) {
355da1eb
SW
495 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
496 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
497 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
498 ctime->tv_sec, ctime->tv_nsec);
499 inode->i_ctime = *ctime;
500 }
501 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
502 /* the MDS did a utimes() */
503 dout("mtime %ld.%09ld -> %ld.%09ld "
504 "tw %d -> %d\n",
505 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
506 mtime->tv_sec, mtime->tv_nsec,
507 ci->i_time_warp_seq, (int)time_warp_seq);
508
509 inode->i_mtime = *mtime;
510 inode->i_atime = *atime;
511 ci->i_time_warp_seq = time_warp_seq;
512 } else if (time_warp_seq == ci->i_time_warp_seq) {
513 /* nobody did utimes(); take the max */
514 if (timespec_compare(mtime, &inode->i_mtime) > 0) {
515 dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
516 inode->i_mtime.tv_sec,
517 inode->i_mtime.tv_nsec,
518 mtime->tv_sec, mtime->tv_nsec);
519 inode->i_mtime = *mtime;
520 }
521 if (timespec_compare(atime, &inode->i_atime) > 0) {
522 dout("atime %ld.%09ld -> %ld.%09ld inc\n",
523 inode->i_atime.tv_sec,
524 inode->i_atime.tv_nsec,
525 atime->tv_sec, atime->tv_nsec);
526 inode->i_atime = *atime;
527 }
528 } else if (issued & CEPH_CAP_FILE_EXCL) {
529 /* we did a utimes(); ignore mds values */
530 } else {
531 warn = 1;
532 }
533 } else {
d8672d64 534 /* we have no write|excl caps; whatever the MDS says is true */
355da1eb
SW
535 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
536 inode->i_ctime = *ctime;
537 inode->i_mtime = *mtime;
538 inode->i_atime = *atime;
539 ci->i_time_warp_seq = time_warp_seq;
540 } else {
541 warn = 1;
542 }
543 }
544 if (warn) /* time_warp_seq shouldn't go backwards */
545 dout("%p mds time_warp_seq %llu < %u\n",
546 inode, time_warp_seq, ci->i_time_warp_seq);
547}
548
549/*
550 * Populate an inode based on info from mds. May be called on new or
551 * existing inodes.
552 */
553static int fill_inode(struct inode *inode,
554 struct ceph_mds_reply_info_in *iinfo,
555 struct ceph_mds_reply_dirfrag *dirinfo,
556 struct ceph_mds_session *session,
557 unsigned long ttl_from, int cap_fmode,
558 struct ceph_cap_reservation *caps_reservation)
559{
560 struct ceph_mds_reply_inode *info = iinfo->in;
561 struct ceph_inode_info *ci = ceph_inode(inode);
562 int i;
dfabbed6
SW
563 int issued = 0, implemented;
564 int updating_inode = 0;
355da1eb
SW
565 struct timespec mtime, atime, ctime;
566 u32 nsplits;
567 struct ceph_buffer *xattr_blob = NULL;
568 int err = 0;
569 int queue_trunc = 0;
570
571 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
572 inode, ceph_vinop(inode), le64_to_cpu(info->version),
573 ci->i_version);
574
575 /*
576 * prealloc xattr data, if it looks like we'll need it. only
577 * if len > 4 (meaning there are actually xattrs; the first 4
578 * bytes are the xattr count).
579 */
580 if (iinfo->xattr_len > 4) {
b6c1d5b8 581 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
355da1eb
SW
582 if (!xattr_blob)
583 pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
584 iinfo->xattr_len);
585 }
586
587 spin_lock(&inode->i_lock);
588
589 /*
590 * provided version will be odd if inode value is projected,
8bd59e01
SW
591 * even if stable. skip the update if we have newer stable
592 * info (ours>=theirs, e.g. due to racing mds replies), unless
593 * we are getting projected (unstable) info (in which case the
594 * version is odd, and we want ours>theirs).
595 * us them
596 * 2 2 skip
597 * 3 2 skip
598 * 3 3 update
355da1eb
SW
599 */
600 if (le64_to_cpu(info->version) > 0 &&
8bd59e01 601 (ci->i_version & ~1) >= le64_to_cpu(info->version))
355da1eb 602 goto no_change;
dfabbed6
SW
603
604 updating_inode = 1;
355da1eb
SW
605 issued = __ceph_caps_issued(ci, &implemented);
606 issued |= implemented | __ceph_caps_dirty(ci);
607
608 /* update inode */
609 ci->i_version = le64_to_cpu(info->version);
610 inode->i_version++;
611 inode->i_rdev = le32_to_cpu(info->rdev);
612
613 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
614 inode->i_mode = le32_to_cpu(info->mode);
615 inode->i_uid = le32_to_cpu(info->uid);
616 inode->i_gid = le32_to_cpu(info->gid);
617 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
618 inode->i_uid, inode->i_gid);
619 }
620
621 if ((issued & CEPH_CAP_LINK_EXCL) == 0)
622 inode->i_nlink = le32_to_cpu(info->nlink);
623
624 /* be careful with mtime, atime, size */
625 ceph_decode_timespec(&atime, &info->atime);
626 ceph_decode_timespec(&mtime, &info->mtime);
627 ceph_decode_timespec(&ctime, &info->ctime);
628 queue_trunc = ceph_fill_file_size(inode, issued,
629 le32_to_cpu(info->truncate_seq),
630 le64_to_cpu(info->truncate_size),
355da1eb
SW
631 le64_to_cpu(info->size));
632 ceph_fill_file_time(inode, issued,
633 le32_to_cpu(info->time_warp_seq),
634 &ctime, &mtime, &atime);
635
912a9b03
SW
636 /* only update max_size on auth cap */
637 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
638 ci->i_max_size != le64_to_cpu(info->max_size)) {
639 dout("max_size %lld -> %llu\n", ci->i_max_size,
640 le64_to_cpu(info->max_size));
641 ci->i_max_size = le64_to_cpu(info->max_size);
642 }
643
355da1eb
SW
644 ci->i_layout = info->layout;
645 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
646
647 /* xattrs */
648 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
649 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
650 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
651 if (ci->i_xattrs.blob)
652 ceph_buffer_put(ci->i_xattrs.blob);
653 ci->i_xattrs.blob = xattr_blob;
654 if (xattr_blob)
655 memcpy(ci->i_xattrs.blob->vec.iov_base,
656 iinfo->xattr_data, iinfo->xattr_len);
657 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
a6424e48 658 xattr_blob = NULL;
355da1eb
SW
659 }
660
661 inode->i_mapping->a_ops = &ceph_aops;
662 inode->i_mapping->backing_dev_info =
640ef79d 663 &ceph_sb_to_client(inode->i_sb)->backing_dev_info;
355da1eb
SW
664
665 switch (inode->i_mode & S_IFMT) {
666 case S_IFIFO:
667 case S_IFBLK:
668 case S_IFCHR:
669 case S_IFSOCK:
670 init_special_inode(inode, inode->i_mode, inode->i_rdev);
671 inode->i_op = &ceph_file_iops;
672 break;
673 case S_IFREG:
674 inode->i_op = &ceph_file_iops;
675 inode->i_fop = &ceph_file_fops;
676 break;
677 case S_IFLNK:
678 inode->i_op = &ceph_symlink_iops;
679 if (!ci->i_symlink) {
680 int symlen = iinfo->symlink_len;
681 char *sym;
682
683 BUG_ON(symlen != inode->i_size);
684 spin_unlock(&inode->i_lock);
685
686 err = -ENOMEM;
687 sym = kmalloc(symlen+1, GFP_NOFS);
688 if (!sym)
689 goto out;
690 memcpy(sym, iinfo->symlink, symlen);
691 sym[symlen] = 0;
692
693 spin_lock(&inode->i_lock);
694 if (!ci->i_symlink)
695 ci->i_symlink = sym;
696 else
697 kfree(sym); /* lost a race */
698 }
699 break;
700 case S_IFDIR:
701 inode->i_op = &ceph_dir_iops;
702 inode->i_fop = &ceph_dir_fops;
703
14303d20
SW
704 ci->i_dir_layout = iinfo->dir_layout;
705
355da1eb
SW
706 ci->i_files = le64_to_cpu(info->files);
707 ci->i_subdirs = le64_to_cpu(info->subdirs);
708 ci->i_rbytes = le64_to_cpu(info->rbytes);
709 ci->i_rfiles = le64_to_cpu(info->rfiles);
710 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
711 ceph_decode_timespec(&ci->i_rctime, &info->rctime);
355da1eb
SW
712 break;
713 default:
714 pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
715 ceph_vinop(inode), inode->i_mode);
716 }
717
718no_change:
719 spin_unlock(&inode->i_lock);
720
721 /* queue truncate if we saw i_size decrease */
722 if (queue_trunc)
3c6f6b79 723 ceph_queue_vmtruncate(inode);
355da1eb
SW
724
725 /* populate frag tree */
726 /* FIXME: move me up, if/when version reflects fragtree changes */
727 nsplits = le32_to_cpu(info->fragtree.nsplits);
728 mutex_lock(&ci->i_fragtree_mutex);
729 for (i = 0; i < nsplits; i++) {
730 u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
731 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
732
733 if (IS_ERR(frag))
734 continue;
735 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
736 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
737 }
738 mutex_unlock(&ci->i_fragtree_mutex);
739
740 /* were we issued a capability? */
741 if (info->cap.caps) {
742 if (ceph_snap(inode) == CEPH_NOSNAP) {
743 ceph_add_cap(inode, session,
744 le64_to_cpu(info->cap.cap_id),
745 cap_fmode,
746 le32_to_cpu(info->cap.caps),
747 le32_to_cpu(info->cap.wanted),
748 le32_to_cpu(info->cap.seq),
749 le32_to_cpu(info->cap.mseq),
750 le64_to_cpu(info->cap.realm),
751 info->cap.flags,
752 caps_reservation);
753 } else {
754 spin_lock(&inode->i_lock);
755 dout(" %p got snap_caps %s\n", inode,
756 ceph_cap_string(le32_to_cpu(info->cap.caps)));
757 ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
758 if (cap_fmode >= 0)
759 __ceph_get_fmode(ci, cap_fmode);
760 spin_unlock(&inode->i_lock);
761 }
04d000eb
SW
762 } else if (cap_fmode >= 0) {
763 pr_warning("mds issued no caps on %llx.%llx\n",
764 ceph_vinop(inode));
765 __ceph_get_fmode(ci, cap_fmode);
355da1eb
SW
766 }
767
dfabbed6
SW
768 /* set dir completion flag? */
769 if (S_ISDIR(inode->i_mode) &&
770 updating_inode && /* didn't jump to no_change */
771 ci->i_files == 0 && ci->i_subdirs == 0 &&
772 ceph_snap(inode) == CEPH_NOSNAP &&
773 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
774 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
775 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
776 dout(" marking %p complete (empty)\n", inode);
777 /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
778 ci->i_max_offset = 2;
779 }
780
355da1eb
SW
781 /* update delegation info? */
782 if (dirinfo)
783 ceph_fill_dirfrag(inode, dirinfo);
784
785 err = 0;
786
787out:
b6c1d5b8
SW
788 if (xattr_blob)
789 ceph_buffer_put(xattr_blob);
355da1eb
SW
790 return err;
791}
792
793/*
794 * caller should hold session s_mutex.
795 */
796static void update_dentry_lease(struct dentry *dentry,
797 struct ceph_mds_reply_lease *lease,
798 struct ceph_mds_session *session,
799 unsigned long from_time)
800{
801 struct ceph_dentry_info *di = ceph_dentry(dentry);
802 long unsigned duration = le32_to_cpu(lease->duration_ms);
803 long unsigned ttl = from_time + (duration * HZ) / 1000;
804 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
805 struct inode *dir;
806
807 /* only track leases on regular dentries */
808 if (dentry->d_op != &ceph_dentry_ops)
809 return;
810
811 spin_lock(&dentry->d_lock);
2f90b852
SW
812 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
813 dentry, duration, ttl);
355da1eb
SW
814
815 /* make lease_rdcache_gen match directory */
816 dir = dentry->d_parent->d_inode;
817 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
818
2f90b852 819 if (duration == 0)
355da1eb
SW
820 goto out_unlock;
821
822 if (di->lease_gen == session->s_cap_gen &&
823 time_before(ttl, dentry->d_time))
824 goto out_unlock; /* we already have a newer lease. */
825
826 if (di->lease_session && di->lease_session != session)
827 goto out_unlock;
828
829 ceph_dentry_lru_touch(dentry);
830
831 if (!di->lease_session)
832 di->lease_session = ceph_get_mds_session(session);
833 di->lease_gen = session->s_cap_gen;
834 di->lease_seq = le32_to_cpu(lease->seq);
835 di->lease_renew_after = half_ttl;
836 di->lease_renew_from = 0;
837 dentry->d_time = ttl;
838out_unlock:
839 spin_unlock(&dentry->d_lock);
840 return;
841}
842
1cd3935b
SW
843/*
844 * Set dentry's directory position based on the current dir's max, and
845 * order it in d_subdirs, so that dcache_readdir behaves.
846 */
847static void ceph_set_dentry_offset(struct dentry *dn)
848{
849 struct dentry *dir = dn->d_parent;
850 struct inode *inode = dn->d_parent->d_inode;
851 struct ceph_dentry_info *di;
852
853 BUG_ON(!inode);
854
855 di = ceph_dentry(dn);
856
857 spin_lock(&inode->i_lock);
858 if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
859 spin_unlock(&inode->i_lock);
860 return;
861 }
862 di->offset = ceph_inode(inode)->i_max_offset++;
863 spin_unlock(&inode->i_lock);
864
2fd6b7f5
NP
865 spin_lock(&dir->d_lock);
866 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
13a4214c 867 list_move(&dn->d_u.d_child, &dir->d_subdirs);
1cd3935b
SW
868 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
869 dn->d_u.d_child.prev, dn->d_u.d_child.next);
870 spin_unlock(&dn->d_lock);
2fd6b7f5 871 spin_unlock(&dir->d_lock);
1cd3935b
SW
872}
873
355da1eb
SW
874/*
875 * splice a dentry to an inode.
876 * caller must hold directory i_mutex for this to be safe.
877 *
878 * we will only rehash the resulting dentry if @prehash is
879 * true; @prehash will be set to false (for the benefit of
880 * the caller) if we fail.
881 */
882static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
467c5251 883 bool *prehash, bool set_offset)
355da1eb
SW
884{
885 struct dentry *realdn;
886
1cd3935b
SW
887 BUG_ON(dn->d_inode);
888
355da1eb
SW
889 /* dn must be unhashed */
890 if (!d_unhashed(dn))
891 d_drop(dn);
892 realdn = d_materialise_unique(dn, in);
893 if (IS_ERR(realdn)) {
d69ed05a
SW
894 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
895 PTR_ERR(realdn), dn, in, ceph_vinop(in));
355da1eb
SW
896 if (prehash)
897 *prehash = false; /* don't rehash on error */
898 dn = realdn; /* note realdn contains the error */
899 goto out;
900 } else if (realdn) {
901 dout("dn %p (%d) spliced with %p (%d) "
902 "inode %p ino %llx.%llx\n",
b7ab39f6
NP
903 dn, dn->d_count,
904 realdn, realdn->d_count,
355da1eb
SW
905 realdn->d_inode, ceph_vinop(realdn->d_inode));
906 dput(dn);
907 dn = realdn;
908 } else {
909 BUG_ON(!ceph_dentry(dn));
355da1eb
SW
910 dout("dn %p attached to %p ino %llx.%llx\n",
911 dn, dn->d_inode, ceph_vinop(dn->d_inode));
912 }
913 if ((!prehash || *prehash) && d_unhashed(dn))
914 d_rehash(dn);
467c5251
SW
915 if (set_offset)
916 ceph_set_dentry_offset(dn);
355da1eb
SW
917out:
918 return dn;
919}
920
921/*
922 * Incorporate results into the local cache. This is either just
923 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
924 * after a lookup).
925 *
926 * A reply may contain
927 * a directory inode along with a dentry.
928 * and/or a target inode
929 *
930 * Called with snap_rwsem (read).
931 */
932int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
933 struct ceph_mds_session *session)
934{
935 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
936 struct inode *in = NULL;
937 struct ceph_mds_reply_inode *ininfo;
938 struct ceph_vino vino;
3d14c5d2 939 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
355da1eb
SW
940 int i = 0;
941 int err = 0;
942
943 dout("fill_trace %p is_dentry %d is_target %d\n", req,
944 rinfo->head->is_dentry, rinfo->head->is_target);
945
946#if 0
947 /*
948 * Debugging hook:
949 *
950 * If we resend completed ops to a recovering mds, we get no
951 * trace. Since that is very rare, pretend this is the case
952 * to ensure the 'no trace' handlers in the callers behave.
953 *
954 * Fill in inodes unconditionally to avoid breaking cap
955 * invariants.
956 */
957 if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
958 pr_info("fill_trace faking empty trace on %lld %s\n",
959 req->r_tid, ceph_mds_op_name(rinfo->head->op));
960 if (rinfo->head->is_dentry) {
961 rinfo->head->is_dentry = 0;
962 err = fill_inode(req->r_locked_dir,
963 &rinfo->diri, rinfo->dirfrag,
964 session, req->r_request_started, -1);
965 }
966 if (rinfo->head->is_target) {
967 rinfo->head->is_target = 0;
968 ininfo = rinfo->targeti.in;
969 vino.ino = le64_to_cpu(ininfo->ino);
970 vino.snap = le64_to_cpu(ininfo->snapid);
971 in = ceph_get_inode(sb, vino);
972 err = fill_inode(in, &rinfo->targeti, NULL,
973 session, req->r_request_started,
974 req->r_fmode);
975 iput(in);
976 }
977 }
978#endif
979
980 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
981 dout("fill_trace reply is empty!\n");
167c9e35
SW
982 if (rinfo->head->result == 0 && req->r_locked_dir)
983 ceph_invalidate_dir_request(req);
355da1eb
SW
984 return 0;
985 }
986
987 if (rinfo->head->is_dentry) {
5b1daecd
SW
988 struct inode *dir = req->r_locked_dir;
989
990 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
991 session, req->r_request_started, -1,
992 &req->r_caps_reservation);
993 if (err < 0)
994 return err;
995 }
996
9358c6d4
SW
997 /*
998 * ignore null lease/binding on snapdir ENOENT, or else we
999 * will have trouble splicing in the virtual snapdir later
1000 */
1001 if (rinfo->head->is_dentry && !req->r_aborted &&
1002 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
3d14c5d2 1003 fsc->mount_options->snapdir_name,
9358c6d4 1004 req->r_dentry->d_name.len))) {
355da1eb
SW
1005 /*
1006 * lookup link rename : null -> possibly existing inode
1007 * mknod symlink mkdir : null -> new inode
1008 * unlink : linked -> null
1009 */
1010 struct inode *dir = req->r_locked_dir;
1011 struct dentry *dn = req->r_dentry;
1012 bool have_dir_cap, have_lease;
1013
1014 BUG_ON(!dn);
1015 BUG_ON(!dir);
1016 BUG_ON(dn->d_parent->d_inode != dir);
1017 BUG_ON(ceph_ino(dir) !=
1018 le64_to_cpu(rinfo->diri.in->ino));
1019 BUG_ON(ceph_snap(dir) !=
1020 le64_to_cpu(rinfo->diri.in->snapid));
1021
355da1eb
SW
1022 /* do we have a lease on the whole dir? */
1023 have_dir_cap =
1024 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1025 CEPH_CAP_FILE_SHARED);
1026
1027 /* do we have a dn lease? */
1028 have_lease = have_dir_cap ||
2f90b852 1029 le32_to_cpu(rinfo->dlease->duration_ms);
355da1eb
SW
1030 if (!have_lease)
1031 dout("fill_trace no dentry lease or dir cap\n");
1032
1033 /* rename? */
1034 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1035 dout(" src %p '%.*s' dst %p '%.*s'\n",
1036 req->r_old_dentry,
1037 req->r_old_dentry->d_name.len,
1038 req->r_old_dentry->d_name.name,
1039 dn, dn->d_name.len, dn->d_name.name);
1040 dout("fill_trace doing d_move %p -> %p\n",
1041 req->r_old_dentry, dn);
c10f5e12 1042
355da1eb
SW
1043 d_move(req->r_old_dentry, dn);
1044 dout(" src %p '%.*s' dst %p '%.*s'\n",
1045 req->r_old_dentry,
1046 req->r_old_dentry->d_name.len,
1047 req->r_old_dentry->d_name.name,
1048 dn, dn->d_name.len, dn->d_name.name);
81a6cf2d 1049
c4a29f26
SW
1050 /* ensure target dentry is invalidated, despite
1051 rehashing bug in vfs_rename_dir */
81a6cf2d
SW
1052 ceph_invalidate_dentry_lease(dn);
1053
09adc80c
SW
1054 /*
1055 * d_move() puts the renamed dentry at the end of
1056 * d_subdirs. We need to assign it an appropriate
1057 * directory offset so we can behave when holding
1058 * I_COMPLETE.
1059 */
1060 ceph_set_dentry_offset(req->r_old_dentry);
1061 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1cd3935b 1062 ceph_dentry(req->r_old_dentry)->offset);
81a6cf2d 1063
355da1eb
SW
1064 dn = req->r_old_dentry; /* use old_dentry */
1065 in = dn->d_inode;
1066 }
1067
1068 /* null dentry? */
1069 if (!rinfo->head->is_target) {
1070 dout("fill_trace null dentry\n");
1071 if (dn->d_inode) {
1072 dout("d_delete %p\n", dn);
1073 d_delete(dn);
1074 } else {
1075 dout("d_instantiate %p NULL\n", dn);
1076 d_instantiate(dn, NULL);
1077 if (have_lease && d_unhashed(dn))
1078 d_rehash(dn);
1079 update_dentry_lease(dn, rinfo->dlease,
1080 session,
1081 req->r_request_started);
1082 }
1083 goto done;
1084 }
1085
1086 /* attach proper inode */
1087 ininfo = rinfo->targeti.in;
1088 vino.ino = le64_to_cpu(ininfo->ino);
1089 vino.snap = le64_to_cpu(ininfo->snapid);
d8b16b3d
SW
1090 in = dn->d_inode;
1091 if (!in) {
355da1eb
SW
1092 in = ceph_get_inode(sb, vino);
1093 if (IS_ERR(in)) {
1094 pr_err("fill_trace bad get_inode "
1095 "%llx.%llx\n", vino.ino, vino.snap);
1096 err = PTR_ERR(in);
1097 d_delete(dn);
1098 goto done;
1099 }
467c5251 1100 dn = splice_dentry(dn, in, &have_lease, true);
355da1eb
SW
1101 if (IS_ERR(dn)) {
1102 err = PTR_ERR(dn);
1103 goto done;
1104 }
1105 req->r_dentry = dn; /* may have spliced */
70b666c3 1106 ihold(in);
355da1eb
SW
1107 } else if (ceph_ino(in) == vino.ino &&
1108 ceph_snap(in) == vino.snap) {
70b666c3 1109 ihold(in);
355da1eb
SW
1110 } else {
1111 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1112 dn, in, ceph_ino(in), ceph_snap(in),
1113 vino.ino, vino.snap);
1114 have_lease = false;
1115 in = NULL;
1116 }
1117
1118 if (have_lease)
1119 update_dentry_lease(dn, rinfo->dlease, session,
1120 req->r_request_started);
1121 dout(" final dn %p\n", dn);
1122 i++;
1123 } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1124 req->r_op == CEPH_MDS_OP_MKSNAP) {
1125 struct dentry *dn = req->r_dentry;
1126
1127 /* fill out a snapdir LOOKUPSNAP dentry */
1128 BUG_ON(!dn);
1129 BUG_ON(!req->r_locked_dir);
1130 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1131 ininfo = rinfo->targeti.in;
1132 vino.ino = le64_to_cpu(ininfo->ino);
1133 vino.snap = le64_to_cpu(ininfo->snapid);
1134 in = ceph_get_inode(sb, vino);
1135 if (IS_ERR(in)) {
1136 pr_err("fill_inode get_inode badness %llx.%llx\n",
1137 vino.ino, vino.snap);
1138 err = PTR_ERR(in);
1139 d_delete(dn);
1140 goto done;
1141 }
1142 dout(" linking snapped dir %p to dn %p\n", in, dn);
467c5251 1143 dn = splice_dentry(dn, in, NULL, true);
355da1eb
SW
1144 if (IS_ERR(dn)) {
1145 err = PTR_ERR(dn);
1146 goto done;
1147 }
1148 req->r_dentry = dn; /* may have spliced */
70b666c3 1149 ihold(in);
355da1eb
SW
1150 rinfo->head->is_dentry = 1; /* fool notrace handlers */
1151 }
1152
1153 if (rinfo->head->is_target) {
1154 vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1155 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1156
1157 if (in == NULL || ceph_ino(in) != vino.ino ||
1158 ceph_snap(in) != vino.snap) {
1159 in = ceph_get_inode(sb, vino);
1160 if (IS_ERR(in)) {
1161 err = PTR_ERR(in);
1162 goto done;
1163 }
1164 }
1165 req->r_target_inode = in;
1166
1167 err = fill_inode(in,
1168 &rinfo->targeti, NULL,
1169 session, req->r_request_started,
1170 (le32_to_cpu(rinfo->head->result) == 0) ?
1171 req->r_fmode : -1,
1172 &req->r_caps_reservation);
1173 if (err < 0) {
1174 pr_err("fill_inode badness %p %llx.%llx\n",
1175 in, ceph_vinop(in));
1176 goto done;
1177 }
1178 }
1179
1180done:
1181 dout("fill_trace done err=%d\n", err);
1182 return err;
1183}
1184
1185/*
1186 * Prepopulate our cache with readdir results, leases, etc.
1187 */
1188int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1189 struct ceph_mds_session *session)
1190{
1191 struct dentry *parent = req->r_dentry;
1192 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1193 struct qstr dname;
1194 struct dentry *dn;
1195 struct inode *in;
1196 int err = 0, i;
1197 struct inode *snapdir = NULL;
1198 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1199 u64 frag = le32_to_cpu(rhead->args.readdir.frag);
1200 struct ceph_dentry_info *di;
1201
1202 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1203 snapdir = ceph_get_snapdir(parent->d_inode);
1204 parent = d_find_alias(snapdir);
1205 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1206 rinfo->dir_nr, parent);
1207 } else {
1208 dout("readdir_prepopulate %d items under dn %p\n",
1209 rinfo->dir_nr, parent);
1210 if (rinfo->dir_dir)
1211 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1212 }
1213
1214 for (i = 0; i < rinfo->dir_nr; i++) {
1215 struct ceph_vino vino;
1216
1217 dname.name = rinfo->dir_dname[i];
1218 dname.len = rinfo->dir_dname_len[i];
1219 dname.hash = full_name_hash(dname.name, dname.len);
1220
1221 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1222 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1223
1224retry_lookup:
1225 dn = d_lookup(parent, &dname);
1226 dout("d_lookup on parent=%p name=%.*s got %p\n",
1227 parent, dname.len, dname.name, dn);
1228
1229 if (!dn) {
1230 dn = d_alloc(parent, &dname);
1231 dout("d_alloc %p '%.*s' = %p\n", parent,
1232 dname.len, dname.name, dn);
1233 if (dn == NULL) {
1234 dout("d_alloc badness\n");
1235 err = -ENOMEM;
1236 goto out;
1237 }
1238 err = ceph_init_dentry(dn);
8c696737
SW
1239 if (err < 0) {
1240 dput(dn);
355da1eb 1241 goto out;
8c696737 1242 }
355da1eb
SW
1243 } else if (dn->d_inode &&
1244 (ceph_ino(dn->d_inode) != vino.ino ||
1245 ceph_snap(dn->d_inode) != vino.snap)) {
1246 dout(" dn %p points to wrong inode %p\n",
1247 dn, dn->d_inode);
1248 d_delete(dn);
1249 dput(dn);
1250 goto retry_lookup;
1251 } else {
1252 /* reorder parent's d_subdirs */
2fd6b7f5
NP
1253 spin_lock(&parent->d_lock);
1254 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
355da1eb
SW
1255 list_move(&dn->d_u.d_child, &parent->d_subdirs);
1256 spin_unlock(&dn->d_lock);
2fd6b7f5 1257 spin_unlock(&parent->d_lock);
355da1eb
SW
1258 }
1259
1260 di = dn->d_fsdata;
1261 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1262
1263 /* inode */
1264 if (dn->d_inode) {
1265 in = dn->d_inode;
1266 } else {
1267 in = ceph_get_inode(parent->d_sb, vino);
ac1f12ef 1268 if (IS_ERR(in)) {
355da1eb
SW
1269 dout("new_inode badness\n");
1270 d_delete(dn);
1271 dput(dn);
ac1f12ef 1272 err = PTR_ERR(in);
355da1eb
SW
1273 goto out;
1274 }
467c5251 1275 dn = splice_dentry(dn, in, NULL, false);
d69ed05a
SW
1276 if (IS_ERR(dn))
1277 dn = NULL;
355da1eb
SW
1278 }
1279
1280 if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1281 req->r_request_started, -1,
1282 &req->r_caps_reservation) < 0) {
1283 pr_err("fill_inode badness on %p\n", in);
d69ed05a 1284 goto next_item;
355da1eb 1285 }
d69ed05a
SW
1286 if (dn)
1287 update_dentry_lease(dn, rinfo->dir_dlease[i],
1288 req->r_session,
1289 req->r_request_started);
1290next_item:
1291 if (dn)
1292 dput(dn);
355da1eb
SW
1293 }
1294 req->r_did_prepopulate = true;
1295
1296out:
1297 if (snapdir) {
1298 iput(snapdir);
1299 dput(parent);
1300 }
1301 dout("readdir_prepopulate done\n");
1302 return err;
1303}
1304
1305int ceph_inode_set_size(struct inode *inode, loff_t size)
1306{
1307 struct ceph_inode_info *ci = ceph_inode(inode);
1308 int ret = 0;
1309
1310 spin_lock(&inode->i_lock);
1311 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1312 inode->i_size = size;
1313 inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1314
1315 /* tell the MDS if we are approaching max_size */
1316 if ((size << 1) >= ci->i_max_size &&
1317 (ci->i_reported_size << 1) < ci->i_max_size)
1318 ret = 1;
1319
1320 spin_unlock(&inode->i_lock);
1321 return ret;
1322}
1323
1324/*
1325 * Write back inode data in a worker thread. (This can't be done
1326 * in the message handler context.)
1327 */
3c6f6b79
SW
1328void ceph_queue_writeback(struct inode *inode)
1329{
1330 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1331 &ceph_inode(inode)->i_wb_work)) {
2c27c9a5 1332 dout("ceph_queue_writeback %p\n", inode);
70b666c3 1333 ihold(inode);
3c6f6b79 1334 } else {
2c27c9a5 1335 dout("ceph_queue_writeback %p failed\n", inode);
3c6f6b79
SW
1336 }
1337}
1338
1339static void ceph_writeback_work(struct work_struct *work)
355da1eb
SW
1340{
1341 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1342 i_wb_work);
1343 struct inode *inode = &ci->vfs_inode;
1344
1345 dout("writeback %p\n", inode);
1346 filemap_fdatawrite(&inode->i_data);
1347 iput(inode);
1348}
1349
3c6f6b79
SW
1350/*
1351 * queue an async invalidation
1352 */
1353void ceph_queue_invalidate(struct inode *inode)
1354{
1355 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1356 &ceph_inode(inode)->i_pg_inv_work)) {
1357 dout("ceph_queue_invalidate %p\n", inode);
70b666c3 1358 ihold(inode);
3c6f6b79
SW
1359 } else {
1360 dout("ceph_queue_invalidate %p failed\n", inode);
1361 }
1362}
1363
c9af9fb6
YS
1364/*
1365 * invalidate any pages that are not dirty or under writeback. this
1366 * includes pages that are clean and mapped.
1367 */
1368static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
1369{
1370 struct pagevec pvec;
1371 pgoff_t next = 0;
1372 int i;
1373
1374 pagevec_init(&pvec, 0);
1375 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1376 for (i = 0; i < pagevec_count(&pvec); i++) {
1377 struct page *page = pvec.pages[i];
1378 pgoff_t index;
1379 int skip_page =
1380 (PageDirty(page) || PageWriteback(page));
1381
1382 if (!skip_page)
1383 skip_page = !trylock_page(page);
1384
1385 /*
1386 * We really shouldn't be looking at the ->index of an
1387 * unlocked page. But we're not allowed to lock these
1388 * pages. So we rely upon nobody altering the ->index
1389 * of this (pinned-by-us) page.
1390 */
1391 index = page->index;
1392 if (index > next)
1393 next = index;
1394 next++;
1395
1396 if (skip_page)
1397 continue;
1398
1399 generic_error_remove_page(mapping, page);
1400 unlock_page(page);
1401 }
1402 pagevec_release(&pvec);
1403 cond_resched();
1404 }
1405}
1406
355da1eb
SW
1407/*
1408 * Invalidate inode pages in a worker thread. (This can't be done
1409 * in the message handler context.)
1410 */
3c6f6b79 1411static void ceph_invalidate_work(struct work_struct *work)
355da1eb
SW
1412{
1413 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1414 i_pg_inv_work);
1415 struct inode *inode = &ci->vfs_inode;
1416 u32 orig_gen;
1417 int check = 0;
1418
1419 spin_lock(&inode->i_lock);
1420 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1421 ci->i_rdcache_gen, ci->i_rdcache_revoking);
cd045cb4 1422 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
355da1eb 1423 /* nevermind! */
355da1eb
SW
1424 spin_unlock(&inode->i_lock);
1425 goto out;
1426 }
1427 orig_gen = ci->i_rdcache_gen;
1428 spin_unlock(&inode->i_lock);
1429
c9af9fb6 1430 ceph_invalidate_nondirty_pages(inode->i_mapping);
355da1eb
SW
1431
1432 spin_lock(&inode->i_lock);
cd045cb4
SW
1433 if (orig_gen == ci->i_rdcache_gen &&
1434 orig_gen == ci->i_rdcache_revoking) {
355da1eb
SW
1435 dout("invalidate_pages %p gen %d successful\n", inode,
1436 ci->i_rdcache_gen);
cd045cb4 1437 ci->i_rdcache_revoking--;
355da1eb
SW
1438 check = 1;
1439 } else {
cd045cb4
SW
1440 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1441 inode, orig_gen, ci->i_rdcache_gen,
1442 ci->i_rdcache_revoking);
355da1eb
SW
1443 }
1444 spin_unlock(&inode->i_lock);
1445
1446 if (check)
1447 ceph_check_caps(ci, 0, NULL);
1448out:
1449 iput(inode);
1450}
1451
1452
1453/*
1454 * called by trunc_wq; take i_mutex ourselves
1455 *
1456 * We also truncate in a separate thread as well.
1457 */
3c6f6b79 1458static void ceph_vmtruncate_work(struct work_struct *work)
355da1eb
SW
1459{
1460 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1461 i_vmtruncate_work);
1462 struct inode *inode = &ci->vfs_inode;
1463
1464 dout("vmtruncate_work %p\n", inode);
1465 mutex_lock(&inode->i_mutex);
1466 __ceph_do_pending_vmtruncate(inode);
1467 mutex_unlock(&inode->i_mutex);
1468 iput(inode);
1469}
1470
3c6f6b79
SW
1471/*
1472 * Queue an async vmtruncate. If we fail to queue work, we will handle
1473 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1474 */
1475void ceph_queue_vmtruncate(struct inode *inode)
1476{
1477 struct ceph_inode_info *ci = ceph_inode(inode);
1478
640ef79d 1479 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
3c6f6b79
SW
1480 &ci->i_vmtruncate_work)) {
1481 dout("ceph_queue_vmtruncate %p\n", inode);
70b666c3 1482 ihold(inode);
3c6f6b79
SW
1483 } else {
1484 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1485 inode, ci->i_truncate_pending);
1486 }
1487}
1488
355da1eb
SW
1489/*
1490 * called with i_mutex held.
1491 *
1492 * Make sure any pending truncation is applied before doing anything
1493 * that may depend on it.
1494 */
1495void __ceph_do_pending_vmtruncate(struct inode *inode)
1496{
1497 struct ceph_inode_info *ci = ceph_inode(inode);
1498 u64 to;
1499 int wrbuffer_refs, wake = 0;
1500
1501retry:
1502 spin_lock(&inode->i_lock);
1503 if (ci->i_truncate_pending == 0) {
1504 dout("__do_pending_vmtruncate %p none pending\n", inode);
1505 spin_unlock(&inode->i_lock);
1506 return;
1507 }
1508
1509 /*
1510 * make sure any dirty snapped pages are flushed before we
1511 * possibly truncate them.. so write AND block!
1512 */
1513 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1514 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1515 inode);
1516 spin_unlock(&inode->i_lock);
1517 filemap_write_and_wait_range(&inode->i_data, 0,
1518 inode->i_sb->s_maxbytes);
1519 goto retry;
1520 }
1521
1522 to = ci->i_truncate_size;
1523 wrbuffer_refs = ci->i_wrbuffer_ref;
1524 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1525 ci->i_truncate_pending, to);
1526 spin_unlock(&inode->i_lock);
1527
1528 truncate_inode_pages(inode->i_mapping, to);
1529
1530 spin_lock(&inode->i_lock);
1531 ci->i_truncate_pending--;
1532 if (ci->i_truncate_pending == 0)
1533 wake = 1;
1534 spin_unlock(&inode->i_lock);
1535
1536 if (wrbuffer_refs == 0)
1537 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1538 if (wake)
03066f23 1539 wake_up_all(&ci->i_cap_wq);
355da1eb
SW
1540}
1541
1542
1543/*
1544 * symlinks
1545 */
1546static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1547{
1548 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1549 nd_set_link(nd, ci->i_symlink);
1550 return NULL;
1551}
1552
1553static const struct inode_operations ceph_symlink_iops = {
1554 .readlink = generic_readlink,
1555 .follow_link = ceph_sym_follow_link,
1556};
1557
1558/*
1559 * setattr
1560 */
1561int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1562{
1563 struct inode *inode = dentry->d_inode;
1564 struct ceph_inode_info *ci = ceph_inode(inode);
5f21c96d 1565 struct inode *parent_inode;
355da1eb
SW
1566 const unsigned int ia_valid = attr->ia_valid;
1567 struct ceph_mds_request *req;
3d14c5d2 1568 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
355da1eb
SW
1569 int issued;
1570 int release = 0, dirtied = 0;
1571 int mask = 0;
1572 int err = 0;
fca65b4a 1573 int inode_dirty_flags = 0;
355da1eb
SW
1574
1575 if (ceph_snap(inode) != CEPH_NOSNAP)
1576 return -EROFS;
1577
1578 __ceph_do_pending_vmtruncate(inode);
1579
1580 err = inode_change_ok(inode, attr);
1581 if (err != 0)
1582 return err;
1583
1584 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1585 USE_AUTH_MDS);
1586 if (IS_ERR(req))
1587 return PTR_ERR(req);
1588
1589 spin_lock(&inode->i_lock);
1590 issued = __ceph_caps_issued(ci, NULL);
1591 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1592
1593 if (ia_valid & ATTR_UID) {
1594 dout("setattr %p uid %d -> %d\n", inode,
1595 inode->i_uid, attr->ia_uid);
1596 if (issued & CEPH_CAP_AUTH_EXCL) {
1597 inode->i_uid = attr->ia_uid;
1598 dirtied |= CEPH_CAP_AUTH_EXCL;
1599 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1600 attr->ia_uid != inode->i_uid) {
1601 req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
1602 mask |= CEPH_SETATTR_UID;
1603 release |= CEPH_CAP_AUTH_SHARED;
1604 }
1605 }
1606 if (ia_valid & ATTR_GID) {
1607 dout("setattr %p gid %d -> %d\n", inode,
1608 inode->i_gid, attr->ia_gid);
1609 if (issued & CEPH_CAP_AUTH_EXCL) {
1610 inode->i_gid = attr->ia_gid;
1611 dirtied |= CEPH_CAP_AUTH_EXCL;
1612 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1613 attr->ia_gid != inode->i_gid) {
1614 req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
1615 mask |= CEPH_SETATTR_GID;
1616 release |= CEPH_CAP_AUTH_SHARED;
1617 }
1618 }
1619 if (ia_valid & ATTR_MODE) {
1620 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1621 attr->ia_mode);
1622 if (issued & CEPH_CAP_AUTH_EXCL) {
1623 inode->i_mode = attr->ia_mode;
1624 dirtied |= CEPH_CAP_AUTH_EXCL;
1625 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1626 attr->ia_mode != inode->i_mode) {
1627 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1628 mask |= CEPH_SETATTR_MODE;
1629 release |= CEPH_CAP_AUTH_SHARED;
1630 }
1631 }
1632
1633 if (ia_valid & ATTR_ATIME) {
1634 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1635 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1636 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1637 if (issued & CEPH_CAP_FILE_EXCL) {
1638 ci->i_time_warp_seq++;
1639 inode->i_atime = attr->ia_atime;
1640 dirtied |= CEPH_CAP_FILE_EXCL;
1641 } else if ((issued & CEPH_CAP_FILE_WR) &&
1642 timespec_compare(&inode->i_atime,
1643 &attr->ia_atime) < 0) {
1644 inode->i_atime = attr->ia_atime;
1645 dirtied |= CEPH_CAP_FILE_WR;
1646 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1647 !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1648 ceph_encode_timespec(&req->r_args.setattr.atime,
1649 &attr->ia_atime);
1650 mask |= CEPH_SETATTR_ATIME;
1651 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1652 CEPH_CAP_FILE_WR;
1653 }
1654 }
1655 if (ia_valid & ATTR_MTIME) {
1656 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1657 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1658 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1659 if (issued & CEPH_CAP_FILE_EXCL) {
1660 ci->i_time_warp_seq++;
1661 inode->i_mtime = attr->ia_mtime;
1662 dirtied |= CEPH_CAP_FILE_EXCL;
1663 } else if ((issued & CEPH_CAP_FILE_WR) &&
1664 timespec_compare(&inode->i_mtime,
1665 &attr->ia_mtime) < 0) {
1666 inode->i_mtime = attr->ia_mtime;
1667 dirtied |= CEPH_CAP_FILE_WR;
1668 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1669 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1670 ceph_encode_timespec(&req->r_args.setattr.mtime,
1671 &attr->ia_mtime);
1672 mask |= CEPH_SETATTR_MTIME;
1673 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1674 CEPH_CAP_FILE_WR;
1675 }
1676 }
1677 if (ia_valid & ATTR_SIZE) {
1678 dout("setattr %p size %lld -> %lld\n", inode,
1679 inode->i_size, attr->ia_size);
1680 if (attr->ia_size > inode->i_sb->s_maxbytes) {
1681 err = -EINVAL;
1682 goto out;
1683 }
1684 if ((issued & CEPH_CAP_FILE_EXCL) &&
1685 attr->ia_size > inode->i_size) {
1686 inode->i_size = attr->ia_size;
355da1eb
SW
1687 inode->i_blocks =
1688 (attr->ia_size + (1 << 9) - 1) >> 9;
1689 inode->i_ctime = attr->ia_ctime;
1690 ci->i_reported_size = attr->ia_size;
1691 dirtied |= CEPH_CAP_FILE_EXCL;
1692 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1693 attr->ia_size != inode->i_size) {
1694 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1695 req->r_args.setattr.old_size =
1696 cpu_to_le64(inode->i_size);
1697 mask |= CEPH_SETATTR_SIZE;
1698 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1699 CEPH_CAP_FILE_WR;
1700 }
1701 }
1702
1703 /* these do nothing */
1704 if (ia_valid & ATTR_CTIME) {
1705 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1706 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1707 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1708 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1709 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1710 only ? "ctime only" : "ignored");
1711 inode->i_ctime = attr->ia_ctime;
1712 if (only) {
1713 /*
1714 * if kernel wants to dirty ctime but nothing else,
1715 * we need to choose a cap to dirty under, or do
1716 * a almost-no-op setattr
1717 */
1718 if (issued & CEPH_CAP_AUTH_EXCL)
1719 dirtied |= CEPH_CAP_AUTH_EXCL;
1720 else if (issued & CEPH_CAP_FILE_EXCL)
1721 dirtied |= CEPH_CAP_FILE_EXCL;
1722 else if (issued & CEPH_CAP_XATTR_EXCL)
1723 dirtied |= CEPH_CAP_XATTR_EXCL;
1724 else
1725 mask |= CEPH_SETATTR_CTIME;
1726 }
1727 }
1728 if (ia_valid & ATTR_FILE)
1729 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1730
1731 if (dirtied) {
fca65b4a 1732 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
355da1eb
SW
1733 inode->i_ctime = CURRENT_TIME;
1734 }
1735
1736 release &= issued;
1737 spin_unlock(&inode->i_lock);
1738
fca65b4a
SW
1739 if (inode_dirty_flags)
1740 __mark_inode_dirty(inode, inode_dirty_flags);
1741
355da1eb 1742 if (mask) {
70b666c3
SW
1743 req->r_inode = inode;
1744 ihold(inode);
355da1eb
SW
1745 req->r_inode_drop = release;
1746 req->r_args.setattr.mask = cpu_to_le32(mask);
1747 req->r_num_caps = 1;
5f21c96d 1748 parent_inode = ceph_get_dentry_parent_inode(dentry);
355da1eb 1749 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
5f21c96d 1750 iput(parent_inode);
355da1eb
SW
1751 }
1752 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1753 ceph_cap_string(dirtied), mask);
1754
1755 ceph_mdsc_put_request(req);
1756 __ceph_do_pending_vmtruncate(inode);
1757 return err;
1758out:
1759 spin_unlock(&inode->i_lock);
1760 ceph_mdsc_put_request(req);
1761 return err;
1762}
1763
1764/*
1765 * Verify that we have a lease on the given mask. If not,
1766 * do a getattr against an mds.
1767 */
1768int ceph_do_getattr(struct inode *inode, int mask)
1769{
3d14c5d2
YS
1770 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1771 struct ceph_mds_client *mdsc = fsc->mdsc;
355da1eb
SW
1772 struct ceph_mds_request *req;
1773 int err;
1774
1775 if (ceph_snap(inode) == CEPH_SNAPDIR) {
1776 dout("do_getattr inode %p SNAPDIR\n", inode);
1777 return 0;
1778 }
1779
b7495fc2 1780 dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
355da1eb
SW
1781 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1782 return 0;
1783
1784 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1785 if (IS_ERR(req))
1786 return PTR_ERR(req);
70b666c3
SW
1787 req->r_inode = inode;
1788 ihold(inode);
355da1eb
SW
1789 req->r_num_caps = 1;
1790 req->r_args.getattr.mask = cpu_to_le32(mask);
1791 err = ceph_mdsc_do_request(mdsc, NULL, req);
1792 ceph_mdsc_put_request(req);
1793 dout("do_getattr result=%d\n", err);
1794 return err;
1795}
1796
1797
1798/*
1799 * Check inode permissions. We verify we have a valid value for
1800 * the AUTH cap, then call the generic handler.
1801 */
b74c79e9 1802int ceph_permission(struct inode *inode, int mask, unsigned int flags)
355da1eb 1803{
b74c79e9
NP
1804 int err;
1805
1806 if (flags & IPERM_FLAG_RCU)
1807 return -ECHILD;
1808
1809 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
355da1eb
SW
1810
1811 if (!err)
b74c79e9 1812 err = generic_permission(inode, mask, flags, NULL);
355da1eb
SW
1813 return err;
1814}
1815
1816/*
1817 * Get all attributes. Hopefully somedata we'll have a statlite()
1818 * and can limit the fields we require to be accurate.
1819 */
1820int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1821 struct kstat *stat)
1822{
1823 struct inode *inode = dentry->d_inode;
232d4b01 1824 struct ceph_inode_info *ci = ceph_inode(inode);
355da1eb
SW
1825 int err;
1826
1827 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1828 if (!err) {
1829 generic_fillattr(inode, stat);
ad1fee96 1830 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
355da1eb
SW
1831 if (ceph_snap(inode) != CEPH_NOSNAP)
1832 stat->dev = ceph_snap(inode);
1833 else
1834 stat->dev = 0;
232d4b01 1835 if (S_ISDIR(inode->i_mode)) {
1c1266bb
YS
1836 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
1837 RBYTES))
1838 stat->size = ci->i_rbytes;
1839 else
1840 stat->size = ci->i_files + ci->i_subdirs;
232d4b01 1841 stat->blocks = 0;
355da1eb 1842 stat->blksize = 65536;
232d4b01 1843 }
355da1eb
SW
1844 }
1845 return err;
1846}