Merge branches 'pm-cpuidle', 'pm-core' and 'pm-sleep'
[linux-block.git] / fs / ceph / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
4
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15
16 #include "super.h"
17 #include "mds_client.h"
18 #include "cache.h"
19 #include "io.h"
20 #include "metric.h"
21
22 static __le32 ceph_flags_sys2wire(u32 flags)
23 {
24         u32 wire_flags = 0;
25
26         switch (flags & O_ACCMODE) {
27         case O_RDONLY:
28                 wire_flags |= CEPH_O_RDONLY;
29                 break;
30         case O_WRONLY:
31                 wire_flags |= CEPH_O_WRONLY;
32                 break;
33         case O_RDWR:
34                 wire_flags |= CEPH_O_RDWR;
35                 break;
36         }
37
38         flags &= ~O_ACCMODE;
39
40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
41
42         ceph_sys2wire(O_CREAT);
43         ceph_sys2wire(O_EXCL);
44         ceph_sys2wire(O_TRUNC);
45         ceph_sys2wire(O_DIRECTORY);
46         ceph_sys2wire(O_NOFOLLOW);
47
48 #undef ceph_sys2wire
49
50         if (flags)
51                 dout("unused open flags: %x\n", flags);
52
53         return cpu_to_le32(wire_flags);
54 }
55
56 /*
57  * Ceph file operations
58  *
59  * Implement basic open/close functionality, and implement
60  * read/write.
61  *
62  * We implement three modes of file I/O:
63  *  - buffered uses the generic_file_aio_{read,write} helpers
64  *
65  *  - synchronous is used when there is multi-client read/write
66  *    sharing, avoids the page cache, and synchronously waits for an
67  *    ack from the OSD.
68  *
69  *  - direct io takes the variant of the sync path that references
70  *    user pages directly.
71  *
72  * fsync() flushes and waits on dirty pages, but just queues metadata
73  * for writeback: since the MDS can recover size and mtime there is no
74  * need to wait for MDS acknowledgement.
75  */
76
77 /*
78  * How many pages to get in one call to iov_iter_get_pages().  This
79  * determines the size of the on-stack array used as a buffer.
80  */
81 #define ITER_GET_BVECS_PAGES    64
82
83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84                                 struct bio_vec *bvecs)
85 {
86         size_t size = 0;
87         int bvec_idx = 0;
88
89         if (maxsize > iov_iter_count(iter))
90                 maxsize = iov_iter_count(iter);
91
92         while (size < maxsize) {
93                 struct page *pages[ITER_GET_BVECS_PAGES];
94                 ssize_t bytes;
95                 size_t start;
96                 int idx = 0;
97
98                 bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
99                                            ITER_GET_BVECS_PAGES, &start);
100                 if (bytes < 0)
101                         return size ?: bytes;
102
103                 size += bytes;
104
105                 for ( ; bytes; idx++, bvec_idx++) {
106                         struct bio_vec bv = {
107                                 .bv_page = pages[idx],
108                                 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
109                                 .bv_offset = start,
110                         };
111
112                         bvecs[bvec_idx] = bv;
113                         bytes -= bv.bv_len;
114                         start = 0;
115                 }
116         }
117
118         return size;
119 }
120
121 /*
122  * iov_iter_get_pages() only considers one iov_iter segment, no matter
123  * what maxsize or maxpages are given.  For ITER_BVEC that is a single
124  * page.
125  *
126  * Attempt to get up to @maxsize bytes worth of pages from @iter.
127  * Return the number of bytes in the created bio_vec array, or an error.
128  */
129 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
130                                     struct bio_vec **bvecs, int *num_bvecs)
131 {
132         struct bio_vec *bv;
133         size_t orig_count = iov_iter_count(iter);
134         ssize_t bytes;
135         int npages;
136
137         iov_iter_truncate(iter, maxsize);
138         npages = iov_iter_npages(iter, INT_MAX);
139         iov_iter_reexpand(iter, orig_count);
140
141         /*
142          * __iter_get_bvecs() may populate only part of the array -- zero it
143          * out.
144          */
145         bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
146         if (!bv)
147                 return -ENOMEM;
148
149         bytes = __iter_get_bvecs(iter, maxsize, bv);
150         if (bytes < 0) {
151                 /*
152                  * No pages were pinned -- just free the array.
153                  */
154                 kvfree(bv);
155                 return bytes;
156         }
157
158         *bvecs = bv;
159         *num_bvecs = npages;
160         return bytes;
161 }
162
163 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
164 {
165         int i;
166
167         for (i = 0; i < num_bvecs; i++) {
168                 if (bvecs[i].bv_page) {
169                         if (should_dirty)
170                                 set_page_dirty_lock(bvecs[i].bv_page);
171                         put_page(bvecs[i].bv_page);
172                 }
173         }
174         kvfree(bvecs);
175 }
176
177 /*
178  * Prepare an open request.  Preallocate ceph_cap to avoid an
179  * inopportune ENOMEM later.
180  */
181 static struct ceph_mds_request *
182 prepare_open_request(struct super_block *sb, int flags, int create_mode)
183 {
184         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
185         struct ceph_mds_request *req;
186         int want_auth = USE_ANY_MDS;
187         int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
188
189         if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
190                 want_auth = USE_AUTH_MDS;
191
192         req = ceph_mdsc_create_request(mdsc, op, want_auth);
193         if (IS_ERR(req))
194                 goto out;
195         req->r_fmode = ceph_flags_to_mode(flags);
196         req->r_args.open.flags = ceph_flags_sys2wire(flags);
197         req->r_args.open.mode = cpu_to_le32(create_mode);
198 out:
199         return req;
200 }
201
202 static int ceph_init_file_info(struct inode *inode, struct file *file,
203                                         int fmode, bool isdir)
204 {
205         struct ceph_inode_info *ci = ceph_inode(inode);
206         struct ceph_mount_options *opt =
207                 ceph_inode_to_client(&ci->netfs.inode)->mount_options;
208         struct ceph_file_info *fi;
209         int ret;
210
211         dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
212                         inode->i_mode, isdir ? "dir" : "regular");
213         BUG_ON(inode->i_fop->release != ceph_release);
214
215         if (isdir) {
216                 struct ceph_dir_file_info *dfi =
217                         kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
218                 if (!dfi)
219                         return -ENOMEM;
220
221                 file->private_data = dfi;
222                 fi = &dfi->file_info;
223                 dfi->next_offset = 2;
224                 dfi->readdir_cache_idx = -1;
225         } else {
226                 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
227                 if (!fi)
228                         return -ENOMEM;
229
230                 if (opt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
231                         fi->flags |= CEPH_F_SYNC;
232
233                 file->private_data = fi;
234         }
235
236         ceph_get_fmode(ci, fmode, 1);
237         fi->fmode = fmode;
238
239         spin_lock_init(&fi->rw_contexts_lock);
240         INIT_LIST_HEAD(&fi->rw_contexts);
241         fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
242
243         if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
244                 ret = ceph_uninline_data(file);
245                 if (ret < 0)
246                         goto error;
247         }
248
249         return 0;
250
251 error:
252         ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
253         ceph_put_fmode(ci, fi->fmode, 1);
254         kmem_cache_free(ceph_file_cachep, fi);
255         /* wake up anyone waiting for caps on this inode */
256         wake_up_all(&ci->i_cap_wq);
257         return ret;
258 }
259
260 /*
261  * initialize private struct file data.
262  * if we fail, clean up by dropping fmode reference on the ceph_inode
263  */
264 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
265 {
266         int ret = 0;
267
268         switch (inode->i_mode & S_IFMT) {
269         case S_IFREG:
270                 ceph_fscache_use_cookie(inode, file->f_mode & FMODE_WRITE);
271                 fallthrough;
272         case S_IFDIR:
273                 ret = ceph_init_file_info(inode, file, fmode,
274                                                 S_ISDIR(inode->i_mode));
275                 break;
276
277         case S_IFLNK:
278                 dout("init_file %p %p 0%o (symlink)\n", inode, file,
279                      inode->i_mode);
280                 break;
281
282         default:
283                 dout("init_file %p %p 0%o (special)\n", inode, file,
284                      inode->i_mode);
285                 /*
286                  * we need to drop the open ref now, since we don't
287                  * have .release set to ceph_release.
288                  */
289                 BUG_ON(inode->i_fop->release == ceph_release);
290
291                 /* call the proper open fop */
292                 ret = inode->i_fop->open(inode, file);
293         }
294         return ret;
295 }
296
297 /*
298  * try renew caps after session gets killed.
299  */
300 int ceph_renew_caps(struct inode *inode, int fmode)
301 {
302         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
303         struct ceph_inode_info *ci = ceph_inode(inode);
304         struct ceph_mds_request *req;
305         int err, flags, wanted;
306
307         spin_lock(&ci->i_ceph_lock);
308         __ceph_touch_fmode(ci, mdsc, fmode);
309         wanted = __ceph_caps_file_wanted(ci);
310         if (__ceph_is_any_real_caps(ci) &&
311             (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
312                 int issued = __ceph_caps_issued(ci, NULL);
313                 spin_unlock(&ci->i_ceph_lock);
314                 dout("renew caps %p want %s issued %s updating mds_wanted\n",
315                      inode, ceph_cap_string(wanted), ceph_cap_string(issued));
316                 ceph_check_caps(ci, 0);
317                 return 0;
318         }
319         spin_unlock(&ci->i_ceph_lock);
320
321         flags = 0;
322         if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
323                 flags = O_RDWR;
324         else if (wanted & CEPH_CAP_FILE_RD)
325                 flags = O_RDONLY;
326         else if (wanted & CEPH_CAP_FILE_WR)
327                 flags = O_WRONLY;
328 #ifdef O_LAZY
329         if (wanted & CEPH_CAP_FILE_LAZYIO)
330                 flags |= O_LAZY;
331 #endif
332
333         req = prepare_open_request(inode->i_sb, flags, 0);
334         if (IS_ERR(req)) {
335                 err = PTR_ERR(req);
336                 goto out;
337         }
338
339         req->r_inode = inode;
340         ihold(inode);
341         req->r_num_caps = 1;
342
343         err = ceph_mdsc_do_request(mdsc, NULL, req);
344         ceph_mdsc_put_request(req);
345 out:
346         dout("renew caps %p open result=%d\n", inode, err);
347         return err < 0 ? err : 0;
348 }
349
350 /*
351  * If we already have the requisite capabilities, we can satisfy
352  * the open request locally (no need to request new caps from the
353  * MDS).  We do, however, need to inform the MDS (asynchronously)
354  * if our wanted caps set expands.
355  */
356 int ceph_open(struct inode *inode, struct file *file)
357 {
358         struct ceph_inode_info *ci = ceph_inode(inode);
359         struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
360         struct ceph_mds_client *mdsc = fsc->mdsc;
361         struct ceph_mds_request *req;
362         struct ceph_file_info *fi = file->private_data;
363         int err;
364         int flags, fmode, wanted;
365
366         if (fi) {
367                 dout("open file %p is already opened\n", file);
368                 return 0;
369         }
370
371         /* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
372         flags = file->f_flags & ~(O_CREAT|O_EXCL);
373         if (S_ISDIR(inode->i_mode))
374                 flags = O_DIRECTORY;  /* mds likes to know */
375
376         dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
377              ceph_vinop(inode), file, flags, file->f_flags);
378         fmode = ceph_flags_to_mode(flags);
379         wanted = ceph_caps_for_mode(fmode);
380
381         /* snapped files are read-only */
382         if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
383                 return -EROFS;
384
385         /* trivially open snapdir */
386         if (ceph_snap(inode) == CEPH_SNAPDIR) {
387                 return ceph_init_file(inode, file, fmode);
388         }
389
390         /*
391          * No need to block if we have caps on the auth MDS (for
392          * write) or any MDS (for read).  Update wanted set
393          * asynchronously.
394          */
395         spin_lock(&ci->i_ceph_lock);
396         if (__ceph_is_any_real_caps(ci) &&
397             (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
398                 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
399                 int issued = __ceph_caps_issued(ci, NULL);
400
401                 dout("open %p fmode %d want %s issued %s using existing\n",
402                      inode, fmode, ceph_cap_string(wanted),
403                      ceph_cap_string(issued));
404                 __ceph_touch_fmode(ci, mdsc, fmode);
405                 spin_unlock(&ci->i_ceph_lock);
406
407                 /* adjust wanted? */
408                 if ((issued & wanted) != wanted &&
409                     (mds_wanted & wanted) != wanted &&
410                     ceph_snap(inode) != CEPH_SNAPDIR)
411                         ceph_check_caps(ci, 0);
412
413                 return ceph_init_file(inode, file, fmode);
414         } else if (ceph_snap(inode) != CEPH_NOSNAP &&
415                    (ci->i_snap_caps & wanted) == wanted) {
416                 __ceph_touch_fmode(ci, mdsc, fmode);
417                 spin_unlock(&ci->i_ceph_lock);
418                 return ceph_init_file(inode, file, fmode);
419         }
420
421         spin_unlock(&ci->i_ceph_lock);
422
423         dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
424         req = prepare_open_request(inode->i_sb, flags, 0);
425         if (IS_ERR(req)) {
426                 err = PTR_ERR(req);
427                 goto out;
428         }
429         req->r_inode = inode;
430         ihold(inode);
431
432         req->r_num_caps = 1;
433         err = ceph_mdsc_do_request(mdsc, NULL, req);
434         if (!err)
435                 err = ceph_init_file(inode, file, req->r_fmode);
436         ceph_mdsc_put_request(req);
437         dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
438 out:
439         return err;
440 }
441
442 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
443 static void
444 cache_file_layout(struct inode *dst, struct inode *src)
445 {
446         struct ceph_inode_info *cdst = ceph_inode(dst);
447         struct ceph_inode_info *csrc = ceph_inode(src);
448
449         spin_lock(&cdst->i_ceph_lock);
450         if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
451             !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
452                 memcpy(&cdst->i_cached_layout, &csrc->i_layout,
453                         sizeof(cdst->i_cached_layout));
454                 rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
455                                    ceph_try_get_string(csrc->i_layout.pool_ns));
456         }
457         spin_unlock(&cdst->i_ceph_lock);
458 }
459
460 /*
461  * Try to set up an async create. We need caps, a file layout, and inode number,
462  * and either a lease on the dentry or complete dir info. If any of those
463  * criteria are not satisfied, then return false and the caller can go
464  * synchronous.
465  */
466 static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
467                                  struct ceph_file_layout *lo, u64 *pino)
468 {
469         struct ceph_inode_info *ci = ceph_inode(dir);
470         struct ceph_dentry_info *di = ceph_dentry(dentry);
471         int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
472         u64 ino;
473
474         spin_lock(&ci->i_ceph_lock);
475         /* No auth cap means no chance for Dc caps */
476         if (!ci->i_auth_cap)
477                 goto no_async;
478
479         /* Any delegated inos? */
480         if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
481                 goto no_async;
482
483         if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
484                 goto no_async;
485
486         if ((__ceph_caps_issued(ci, NULL) & want) != want)
487                 goto no_async;
488
489         if (d_in_lookup(dentry)) {
490                 if (!__ceph_dir_is_complete(ci))
491                         goto no_async;
492                 spin_lock(&dentry->d_lock);
493                 di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
494                 spin_unlock(&dentry->d_lock);
495         } else if (atomic_read(&ci->i_shared_gen) !=
496                    READ_ONCE(di->lease_shared_gen)) {
497                 goto no_async;
498         }
499
500         ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
501         if (!ino)
502                 goto no_async;
503
504         *pino = ino;
505         ceph_take_cap_refs(ci, want, false);
506         memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
507         rcu_assign_pointer(lo->pool_ns,
508                            ceph_try_get_string(ci->i_cached_layout.pool_ns));
509         got = want;
510 no_async:
511         spin_unlock(&ci->i_ceph_lock);
512         return got;
513 }
514
515 static void restore_deleg_ino(struct inode *dir, u64 ino)
516 {
517         struct ceph_inode_info *ci = ceph_inode(dir);
518         struct ceph_mds_session *s = NULL;
519
520         spin_lock(&ci->i_ceph_lock);
521         if (ci->i_auth_cap)
522                 s = ceph_get_mds_session(ci->i_auth_cap->session);
523         spin_unlock(&ci->i_ceph_lock);
524         if (s) {
525                 int err = ceph_restore_deleg_ino(s, ino);
526                 if (err)
527                         pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
528                                 ino, err);
529                 ceph_put_mds_session(s);
530         }
531 }
532
533 static void wake_async_create_waiters(struct inode *inode,
534                                       struct ceph_mds_session *session)
535 {
536         struct ceph_inode_info *ci = ceph_inode(inode);
537         bool check_cap = false;
538
539         spin_lock(&ci->i_ceph_lock);
540         if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
541                 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
542                 wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
543
544                 if (ci->i_ceph_flags & CEPH_I_ASYNC_CHECK_CAPS) {
545                         ci->i_ceph_flags &= ~CEPH_I_ASYNC_CHECK_CAPS;
546                         check_cap = true;
547                 }
548         }
549         ceph_kick_flushing_inode_caps(session, ci);
550         spin_unlock(&ci->i_ceph_lock);
551
552         if (check_cap)
553                 ceph_check_caps(ci, CHECK_CAPS_FLUSH);
554 }
555
556 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
557                                  struct ceph_mds_request *req)
558 {
559         struct dentry *dentry = req->r_dentry;
560         struct inode *dinode = d_inode(dentry);
561         struct inode *tinode = req->r_target_inode;
562         int result = req->r_err ? req->r_err :
563                         le32_to_cpu(req->r_reply_info.head->result);
564
565         WARN_ON_ONCE(dinode && tinode && dinode != tinode);
566
567         /* MDS changed -- caller must resubmit */
568         if (result == -EJUKEBOX)
569                 goto out;
570
571         mapping_set_error(req->r_parent->i_mapping, result);
572
573         if (result) {
574                 int pathlen = 0;
575                 u64 base = 0;
576                 char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
577                                                   &base, 0);
578
579                 pr_warn("async create failure path=(%llx)%s result=%d!\n",
580                         base, IS_ERR(path) ? "<<bad>>" : path, result);
581                 ceph_mdsc_free_path(path, pathlen);
582
583                 ceph_dir_clear_complete(req->r_parent);
584                 if (!d_unhashed(dentry))
585                         d_drop(dentry);
586
587                 if (dinode) {
588                         mapping_set_error(dinode->i_mapping, result);
589                         ceph_inode_shutdown(dinode);
590                         wake_async_create_waiters(dinode, req->r_session);
591                 }
592         }
593
594         if (tinode) {
595                 u64 ino = ceph_vino(tinode).ino;
596
597                 if (req->r_deleg_ino != ino)
598                         pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
599                                 __func__, req->r_err, req->r_deleg_ino, ino);
600
601                 mapping_set_error(tinode->i_mapping, result);
602                 wake_async_create_waiters(tinode, req->r_session);
603         } else if (!result) {
604                 pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
605                         req->r_deleg_ino);
606         }
607 out:
608         ceph_mdsc_release_dir_caps(req);
609 }
610
611 static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
612                                     struct file *file, umode_t mode,
613                                     struct ceph_mds_request *req,
614                                     struct ceph_acl_sec_ctx *as_ctx,
615                                     struct ceph_file_layout *lo)
616 {
617         int ret;
618         char xattr_buf[4];
619         struct ceph_mds_reply_inode in = { };
620         struct ceph_mds_reply_info_in iinfo = { .in = &in };
621         struct ceph_inode_info *ci = ceph_inode(dir);
622         struct ceph_dentry_info *di = ceph_dentry(dentry);
623         struct inode *inode;
624         struct timespec64 now;
625         struct ceph_string *pool_ns;
626         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
627         struct ceph_vino vino = { .ino = req->r_deleg_ino,
628                                   .snap = CEPH_NOSNAP };
629
630         ktime_get_real_ts64(&now);
631
632         inode = ceph_get_inode(dentry->d_sb, vino);
633         if (IS_ERR(inode))
634                 return PTR_ERR(inode);
635
636         iinfo.inline_version = CEPH_INLINE_NONE;
637         iinfo.change_attr = 1;
638         ceph_encode_timespec64(&iinfo.btime, &now);
639
640         if (req->r_pagelist) {
641                 iinfo.xattr_len = req->r_pagelist->length;
642                 iinfo.xattr_data = req->r_pagelist->mapped_tail;
643         } else {
644                 /* fake it */
645                 iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
646                 iinfo.xattr_data = xattr_buf;
647                 memset(iinfo.xattr_data, 0, iinfo.xattr_len);
648         }
649
650         in.ino = cpu_to_le64(vino.ino);
651         in.snapid = cpu_to_le64(CEPH_NOSNAP);
652         in.version = cpu_to_le64(1);    // ???
653         in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
654         in.cap.cap_id = cpu_to_le64(1);
655         in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
656         in.cap.flags = CEPH_CAP_FLAG_AUTH;
657         in.ctime = in.mtime = in.atime = iinfo.btime;
658         in.truncate_seq = cpu_to_le32(1);
659         in.truncate_size = cpu_to_le64(-1ULL);
660         in.xattr_version = cpu_to_le64(1);
661         in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
662         if (dir->i_mode & S_ISGID) {
663                 in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
664
665                 /* Directories always inherit the setgid bit. */
666                 if (S_ISDIR(mode))
667                         mode |= S_ISGID;
668         } else {
669                 in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
670         }
671         in.mode = cpu_to_le32((u32)mode);
672
673         in.nlink = cpu_to_le32(1);
674         in.max_size = cpu_to_le64(lo->stripe_unit);
675
676         ceph_file_layout_to_legacy(lo, &in.layout);
677         /* lo is private, so pool_ns can't change */
678         pool_ns = rcu_dereference_raw(lo->pool_ns);
679         if (pool_ns) {
680                 iinfo.pool_ns_len = pool_ns->len;
681                 iinfo.pool_ns_data = pool_ns->str;
682         }
683
684         down_read(&mdsc->snap_rwsem);
685         ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
686                               req->r_fmode, NULL);
687         up_read(&mdsc->snap_rwsem);
688         if (ret) {
689                 dout("%s failed to fill inode: %d\n", __func__, ret);
690                 ceph_dir_clear_complete(dir);
691                 if (!d_unhashed(dentry))
692                         d_drop(dentry);
693                 if (inode->i_state & I_NEW)
694                         discard_new_inode(inode);
695         } else {
696                 struct dentry *dn;
697
698                 dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
699                         vino.ino, ceph_ino(dir), dentry->d_name.name);
700                 ceph_dir_clear_ordered(dir);
701                 ceph_init_inode_acls(inode, as_ctx);
702                 if (inode->i_state & I_NEW) {
703                         /*
704                          * If it's not I_NEW, then someone created this before
705                          * we got here. Assume the server is aware of it at
706                          * that point and don't worry about setting
707                          * CEPH_I_ASYNC_CREATE.
708                          */
709                         ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
710                         unlock_new_inode(inode);
711                 }
712                 if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
713                         if (!d_unhashed(dentry))
714                                 d_drop(dentry);
715                         dn = d_splice_alias(inode, dentry);
716                         WARN_ON_ONCE(dn && dn != dentry);
717                 }
718                 file->f_mode |= FMODE_CREATED;
719                 ret = finish_open(file, dentry, ceph_open);
720         }
721
722         spin_lock(&dentry->d_lock);
723         di->flags &= ~CEPH_DENTRY_ASYNC_CREATE;
724         wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT);
725         spin_unlock(&dentry->d_lock);
726
727         return ret;
728 }
729
730 /*
731  * Do a lookup + open with a single request.  If we get a non-existent
732  * file or symlink, return 1 so the VFS can retry.
733  */
734 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
735                      struct file *file, unsigned flags, umode_t mode)
736 {
737         struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
738         struct ceph_mds_client *mdsc = fsc->mdsc;
739         struct ceph_mds_request *req;
740         struct dentry *dn;
741         struct ceph_acl_sec_ctx as_ctx = {};
742         bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
743         int mask;
744         int err;
745
746         dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
747              dir, dentry, dentry,
748              d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
749
750         if (dentry->d_name.len > NAME_MAX)
751                 return -ENAMETOOLONG;
752
753         err = ceph_wait_on_conflict_unlink(dentry);
754         if (err)
755                 return err;
756         /*
757          * Do not truncate the file, since atomic_open is called before the
758          * permission check. The caller will do the truncation afterward.
759          */
760         flags &= ~O_TRUNC;
761
762         if (flags & O_CREAT) {
763                 if (ceph_quota_is_max_files_exceeded(dir))
764                         return -EDQUOT;
765                 err = ceph_pre_init_acls(dir, &mode, &as_ctx);
766                 if (err < 0)
767                         return err;
768                 err = ceph_security_init_secctx(dentry, mode, &as_ctx);
769                 if (err < 0)
770                         goto out_ctx;
771                 /* Async create can't handle more than a page of xattrs */
772                 if (as_ctx.pagelist &&
773                     !list_is_singular(&as_ctx.pagelist->head))
774                         try_async = false;
775         } else if (!d_in_lookup(dentry)) {
776                 /* If it's not being looked up, it's negative */
777                 return -ENOENT;
778         }
779 retry:
780         /* do the open */
781         req = prepare_open_request(dir->i_sb, flags, mode);
782         if (IS_ERR(req)) {
783                 err = PTR_ERR(req);
784                 goto out_ctx;
785         }
786         req->r_dentry = dget(dentry);
787         req->r_num_caps = 2;
788         mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
789         if (ceph_security_xattr_wanted(dir))
790                 mask |= CEPH_CAP_XATTR_SHARED;
791         req->r_args.open.mask = cpu_to_le32(mask);
792         req->r_parent = dir;
793         ihold(dir);
794
795         if (flags & O_CREAT) {
796                 struct ceph_file_layout lo;
797
798                 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
799                 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
800                 if (as_ctx.pagelist) {
801                         req->r_pagelist = as_ctx.pagelist;
802                         as_ctx.pagelist = NULL;
803                 }
804                 if (try_async &&
805                     (req->r_dir_caps =
806                       try_prep_async_create(dir, dentry, &lo,
807                                             &req->r_deleg_ino))) {
808                         struct ceph_dentry_info *di = ceph_dentry(dentry);
809
810                         set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
811                         req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
812                         req->r_callback = ceph_async_create_cb;
813
814                         spin_lock(&dentry->d_lock);
815                         di->flags |= CEPH_DENTRY_ASYNC_CREATE;
816                         spin_unlock(&dentry->d_lock);
817
818                         err = ceph_mdsc_submit_request(mdsc, dir, req);
819                         if (!err) {
820                                 err = ceph_finish_async_create(dir, dentry,
821                                                         file, mode, req,
822                                                         &as_ctx, &lo);
823                         } else if (err == -EJUKEBOX) {
824                                 restore_deleg_ino(dir, req->r_deleg_ino);
825                                 ceph_mdsc_put_request(req);
826                                 try_async = false;
827                                 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
828                                 goto retry;
829                         }
830                         ceph_put_string(rcu_dereference_raw(lo.pool_ns));
831                         goto out_req;
832                 }
833         }
834
835         set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
836         err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
837         if (err == -ENOENT) {
838                 dentry = ceph_handle_snapdir(req, dentry);
839                 if (IS_ERR(dentry)) {
840                         err = PTR_ERR(dentry);
841                         goto out_req;
842                 }
843                 err = 0;
844         }
845
846         if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
847                 err = ceph_handle_notrace_create(dir, dentry);
848
849         if (d_in_lookup(dentry)) {
850                 dn = ceph_finish_lookup(req, dentry, err);
851                 if (IS_ERR(dn))
852                         err = PTR_ERR(dn);
853         } else {
854                 /* we were given a hashed negative dentry */
855                 dn = NULL;
856         }
857         if (err)
858                 goto out_req;
859         if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
860                 /* make vfs retry on splice, ENOENT, or symlink */
861                 dout("atomic_open finish_no_open on dn %p\n", dn);
862                 err = finish_no_open(file, dn);
863         } else {
864                 dout("atomic_open finish_open on dn %p\n", dn);
865                 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
866                         struct inode *newino = d_inode(dentry);
867
868                         cache_file_layout(dir, newino);
869                         ceph_init_inode_acls(newino, &as_ctx);
870                         file->f_mode |= FMODE_CREATED;
871                 }
872                 err = finish_open(file, dentry, ceph_open);
873         }
874 out_req:
875         ceph_mdsc_put_request(req);
876 out_ctx:
877         ceph_release_acl_sec_ctx(&as_ctx);
878         dout("atomic_open result=%d\n", err);
879         return err;
880 }
881
882 int ceph_release(struct inode *inode, struct file *file)
883 {
884         struct ceph_inode_info *ci = ceph_inode(inode);
885
886         if (S_ISDIR(inode->i_mode)) {
887                 struct ceph_dir_file_info *dfi = file->private_data;
888                 dout("release inode %p dir file %p\n", inode, file);
889                 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
890
891                 ceph_put_fmode(ci, dfi->file_info.fmode, 1);
892
893                 if (dfi->last_readdir)
894                         ceph_mdsc_put_request(dfi->last_readdir);
895                 kfree(dfi->last_name);
896                 kfree(dfi->dir_info);
897                 kmem_cache_free(ceph_dir_file_cachep, dfi);
898         } else {
899                 struct ceph_file_info *fi = file->private_data;
900                 dout("release inode %p regular file %p\n", inode, file);
901                 WARN_ON(!list_empty(&fi->rw_contexts));
902
903                 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
904                 ceph_put_fmode(ci, fi->fmode, 1);
905
906                 kmem_cache_free(ceph_file_cachep, fi);
907         }
908
909         /* wake up anyone waiting for caps on this inode */
910         wake_up_all(&ci->i_cap_wq);
911         return 0;
912 }
913
914 enum {
915         HAVE_RETRIED = 1,
916         CHECK_EOF =    2,
917         READ_INLINE =  3,
918 };
919
920 /*
921  * Completely synchronous read and write methods.  Direct from __user
922  * buffer to osd, or directly to user pages (if O_DIRECT).
923  *
924  * If the read spans object boundary, just do multiple reads.  (That's not
925  * atomic, but good enough for now.)
926  *
927  * If we get a short result from the OSD, check against i_size; we need to
928  * only return a short read to the caller if we hit EOF.
929  */
930 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
931                               int *retry_op)
932 {
933         struct file *file = iocb->ki_filp;
934         struct inode *inode = file_inode(file);
935         struct ceph_inode_info *ci = ceph_inode(inode);
936         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
937         struct ceph_osd_client *osdc = &fsc->client->osdc;
938         ssize_t ret;
939         u64 off = iocb->ki_pos;
940         u64 len = iov_iter_count(to);
941         u64 i_size = i_size_read(inode);
942
943         dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
944              (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
945
946         if (!len)
947                 return 0;
948         /*
949          * flush any page cache pages in this range.  this
950          * will make concurrent normal and sync io slow,
951          * but it will at least behave sensibly when they are
952          * in sequence.
953          */
954         ret = filemap_write_and_wait_range(inode->i_mapping,
955                                            off, off + len - 1);
956         if (ret < 0)
957                 return ret;
958
959         ret = 0;
960         while ((len = iov_iter_count(to)) > 0) {
961                 struct ceph_osd_request *req;
962                 struct page **pages;
963                 int num_pages;
964                 size_t page_off;
965                 bool more;
966                 int idx;
967                 size_t left;
968
969                 req = ceph_osdc_new_request(osdc, &ci->i_layout,
970                                         ci->i_vino, off, &len, 0, 1,
971                                         CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
972                                         NULL, ci->i_truncate_seq,
973                                         ci->i_truncate_size, false);
974                 if (IS_ERR(req)) {
975                         ret = PTR_ERR(req);
976                         break;
977                 }
978
979                 more = len < iov_iter_count(to);
980
981                 num_pages = calc_pages_for(off, len);
982                 page_off = off & ~PAGE_MASK;
983                 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
984                 if (IS_ERR(pages)) {
985                         ceph_osdc_put_request(req);
986                         ret = PTR_ERR(pages);
987                         break;
988                 }
989
990                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
991                                                  false, false);
992                 ceph_osdc_start_request(osdc, req);
993                 ret = ceph_osdc_wait_request(osdc, req);
994
995                 ceph_update_read_metrics(&fsc->mdsc->metric,
996                                          req->r_start_latency,
997                                          req->r_end_latency,
998                                          len, ret);
999
1000                 ceph_osdc_put_request(req);
1001
1002                 i_size = i_size_read(inode);
1003                 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
1004                      off, len, ret, i_size, (more ? " MORE" : ""));
1005
1006                 if (ret == -ENOENT)
1007                         ret = 0;
1008                 if (ret >= 0 && ret < len && (off + ret < i_size)) {
1009                         int zlen = min(len - ret, i_size - off - ret);
1010                         int zoff = page_off + ret;
1011                         dout("sync_read zero gap %llu~%llu\n",
1012                              off + ret, off + ret + zlen);
1013                         ceph_zero_page_vector_range(zoff, zlen, pages);
1014                         ret += zlen;
1015                 }
1016
1017                 idx = 0;
1018                 left = ret > 0 ? ret : 0;
1019                 while (left > 0) {
1020                         size_t len, copied;
1021                         page_off = off & ~PAGE_MASK;
1022                         len = min_t(size_t, left, PAGE_SIZE - page_off);
1023                         SetPageUptodate(pages[idx]);
1024                         copied = copy_page_to_iter(pages[idx++],
1025                                                    page_off, len, to);
1026                         off += copied;
1027                         left -= copied;
1028                         if (copied < len) {
1029                                 ret = -EFAULT;
1030                                 break;
1031                         }
1032                 }
1033                 ceph_release_page_vector(pages, num_pages);
1034
1035                 if (ret < 0) {
1036                         if (ret == -EBLOCKLISTED)
1037                                 fsc->blocklisted = true;
1038                         break;
1039                 }
1040
1041                 if (off >= i_size || !more)
1042                         break;
1043         }
1044
1045         if (off > iocb->ki_pos) {
1046                 if (off >= i_size) {
1047                         *retry_op = CHECK_EOF;
1048                         ret = i_size - iocb->ki_pos;
1049                         iocb->ki_pos = i_size;
1050                 } else {
1051                         ret = off - iocb->ki_pos;
1052                         iocb->ki_pos = off;
1053                 }
1054         }
1055
1056         dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
1057         return ret;
1058 }
1059
1060 struct ceph_aio_request {
1061         struct kiocb *iocb;
1062         size_t total_len;
1063         bool write;
1064         bool should_dirty;
1065         int error;
1066         struct list_head osd_reqs;
1067         unsigned num_reqs;
1068         atomic_t pending_reqs;
1069         struct timespec64 mtime;
1070         struct ceph_cap_flush *prealloc_cf;
1071 };
1072
1073 struct ceph_aio_work {
1074         struct work_struct work;
1075         struct ceph_osd_request *req;
1076 };
1077
1078 static void ceph_aio_retry_work(struct work_struct *work);
1079
1080 static void ceph_aio_complete(struct inode *inode,
1081                               struct ceph_aio_request *aio_req)
1082 {
1083         struct ceph_inode_info *ci = ceph_inode(inode);
1084         int ret;
1085
1086         if (!atomic_dec_and_test(&aio_req->pending_reqs))
1087                 return;
1088
1089         if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1090                 inode_dio_end(inode);
1091
1092         ret = aio_req->error;
1093         if (!ret)
1094                 ret = aio_req->total_len;
1095
1096         dout("ceph_aio_complete %p rc %d\n", inode, ret);
1097
1098         if (ret >= 0 && aio_req->write) {
1099                 int dirty;
1100
1101                 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1102                 if (endoff > i_size_read(inode)) {
1103                         if (ceph_inode_set_size(inode, endoff))
1104                                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY);
1105                 }
1106
1107                 spin_lock(&ci->i_ceph_lock);
1108                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1109                                                &aio_req->prealloc_cf);
1110                 spin_unlock(&ci->i_ceph_lock);
1111                 if (dirty)
1112                         __mark_inode_dirty(inode, dirty);
1113
1114         }
1115
1116         ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1117                                                 CEPH_CAP_FILE_RD));
1118
1119         aio_req->iocb->ki_complete(aio_req->iocb, ret);
1120
1121         ceph_free_cap_flush(aio_req->prealloc_cf);
1122         kfree(aio_req);
1123 }
1124
1125 static void ceph_aio_complete_req(struct ceph_osd_request *req)
1126 {
1127         int rc = req->r_result;
1128         struct inode *inode = req->r_inode;
1129         struct ceph_aio_request *aio_req = req->r_priv;
1130         struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1131         struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1132         unsigned int len = osd_data->bvec_pos.iter.bi_size;
1133
1134         BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1135         BUG_ON(!osd_data->num_bvecs);
1136
1137         dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
1138
1139         if (rc == -EOLDSNAPC) {
1140                 struct ceph_aio_work *aio_work;
1141                 BUG_ON(!aio_req->write);
1142
1143                 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1144                 if (aio_work) {
1145                         INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1146                         aio_work->req = req;
1147                         queue_work(ceph_inode_to_client(inode)->inode_wq,
1148                                    &aio_work->work);
1149                         return;
1150                 }
1151                 rc = -ENOMEM;
1152         } else if (!aio_req->write) {
1153                 if (rc == -ENOENT)
1154                         rc = 0;
1155                 if (rc >= 0 && len > rc) {
1156                         struct iov_iter i;
1157                         int zlen = len - rc;
1158
1159                         /*
1160                          * If read is satisfied by single OSD request,
1161                          * it can pass EOF. Otherwise read is within
1162                          * i_size.
1163                          */
1164                         if (aio_req->num_reqs == 1) {
1165                                 loff_t i_size = i_size_read(inode);
1166                                 loff_t endoff = aio_req->iocb->ki_pos + rc;
1167                                 if (endoff < i_size)
1168                                         zlen = min_t(size_t, zlen,
1169                                                      i_size - endoff);
1170                                 aio_req->total_len = rc + zlen;
1171                         }
1172
1173                         iov_iter_bvec(&i, ITER_DEST, osd_data->bvec_pos.bvecs,
1174                                       osd_data->num_bvecs, len);
1175                         iov_iter_advance(&i, rc);
1176                         iov_iter_zero(zlen, &i);
1177                 }
1178         }
1179
1180         /* r_start_latency == 0 means the request was not submitted */
1181         if (req->r_start_latency) {
1182                 if (aio_req->write)
1183                         ceph_update_write_metrics(metric, req->r_start_latency,
1184                                                   req->r_end_latency, len, rc);
1185                 else
1186                         ceph_update_read_metrics(metric, req->r_start_latency,
1187                                                  req->r_end_latency, len, rc);
1188         }
1189
1190         put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1191                   aio_req->should_dirty);
1192         ceph_osdc_put_request(req);
1193
1194         if (rc < 0)
1195                 cmpxchg(&aio_req->error, 0, rc);
1196
1197         ceph_aio_complete(inode, aio_req);
1198         return;
1199 }
1200
1201 static void ceph_aio_retry_work(struct work_struct *work)
1202 {
1203         struct ceph_aio_work *aio_work =
1204                 container_of(work, struct ceph_aio_work, work);
1205         struct ceph_osd_request *orig_req = aio_work->req;
1206         struct ceph_aio_request *aio_req = orig_req->r_priv;
1207         struct inode *inode = orig_req->r_inode;
1208         struct ceph_inode_info *ci = ceph_inode(inode);
1209         struct ceph_snap_context *snapc;
1210         struct ceph_osd_request *req;
1211         int ret;
1212
1213         spin_lock(&ci->i_ceph_lock);
1214         if (__ceph_have_pending_cap_snap(ci)) {
1215                 struct ceph_cap_snap *capsnap =
1216                         list_last_entry(&ci->i_cap_snaps,
1217                                         struct ceph_cap_snap,
1218                                         ci_item);
1219                 snapc = ceph_get_snap_context(capsnap->context);
1220         } else {
1221                 BUG_ON(!ci->i_head_snapc);
1222                 snapc = ceph_get_snap_context(ci->i_head_snapc);
1223         }
1224         spin_unlock(&ci->i_ceph_lock);
1225
1226         req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1227                         false, GFP_NOFS);
1228         if (!req) {
1229                 ret = -ENOMEM;
1230                 req = orig_req;
1231                 goto out;
1232         }
1233
1234         req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1235         ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1236         ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1237
1238         req->r_ops[0] = orig_req->r_ops[0];
1239
1240         req->r_mtime = aio_req->mtime;
1241         req->r_data_offset = req->r_ops[0].extent.offset;
1242
1243         ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1244         if (ret) {
1245                 ceph_osdc_put_request(req);
1246                 req = orig_req;
1247                 goto out;
1248         }
1249
1250         ceph_osdc_put_request(orig_req);
1251
1252         req->r_callback = ceph_aio_complete_req;
1253         req->r_inode = inode;
1254         req->r_priv = aio_req;
1255
1256         ceph_osdc_start_request(req->r_osdc, req);
1257 out:
1258         if (ret < 0) {
1259                 req->r_result = ret;
1260                 ceph_aio_complete_req(req);
1261         }
1262
1263         ceph_put_snap_context(snapc);
1264         kfree(aio_work);
1265 }
1266
1267 static ssize_t
1268 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1269                        struct ceph_snap_context *snapc,
1270                        struct ceph_cap_flush **pcf)
1271 {
1272         struct file *file = iocb->ki_filp;
1273         struct inode *inode = file_inode(file);
1274         struct ceph_inode_info *ci = ceph_inode(inode);
1275         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1276         struct ceph_client_metric *metric = &fsc->mdsc->metric;
1277         struct ceph_vino vino;
1278         struct ceph_osd_request *req;
1279         struct bio_vec *bvecs;
1280         struct ceph_aio_request *aio_req = NULL;
1281         int num_pages = 0;
1282         int flags;
1283         int ret = 0;
1284         struct timespec64 mtime = current_time(inode);
1285         size_t count = iov_iter_count(iter);
1286         loff_t pos = iocb->ki_pos;
1287         bool write = iov_iter_rw(iter) == WRITE;
1288         bool should_dirty = !write && user_backed_iter(iter);
1289
1290         if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1291                 return -EROFS;
1292
1293         dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1294              (write ? "write" : "read"), file, pos, (unsigned)count,
1295              snapc, snapc ? snapc->seq : 0);
1296
1297         if (write) {
1298                 int ret2;
1299
1300                 ceph_fscache_invalidate(inode, true);
1301
1302                 ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1303                                         pos >> PAGE_SHIFT,
1304                                         (pos + count - 1) >> PAGE_SHIFT);
1305                 if (ret2 < 0)
1306                         dout("invalidate_inode_pages2_range returned %d\n", ret2);
1307
1308                 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1309         } else {
1310                 flags = CEPH_OSD_FLAG_READ;
1311         }
1312
1313         while (iov_iter_count(iter) > 0) {
1314                 u64 size = iov_iter_count(iter);
1315                 ssize_t len;
1316
1317                 if (write)
1318                         size = min_t(u64, size, fsc->mount_options->wsize);
1319                 else
1320                         size = min_t(u64, size, fsc->mount_options->rsize);
1321
1322                 vino = ceph_vino(inode);
1323                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1324                                             vino, pos, &size, 0,
1325                                             1,
1326                                             write ? CEPH_OSD_OP_WRITE :
1327                                                     CEPH_OSD_OP_READ,
1328                                             flags, snapc,
1329                                             ci->i_truncate_seq,
1330                                             ci->i_truncate_size,
1331                                             false);
1332                 if (IS_ERR(req)) {
1333                         ret = PTR_ERR(req);
1334                         break;
1335                 }
1336
1337                 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1338                 if (len < 0) {
1339                         ceph_osdc_put_request(req);
1340                         ret = len;
1341                         break;
1342                 }
1343                 if (len != size)
1344                         osd_req_op_extent_update(req, 0, len);
1345
1346                 /*
1347                  * To simplify error handling, allow AIO when IO within i_size
1348                  * or IO can be satisfied by single OSD request.
1349                  */
1350                 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1351                     (len == count || pos + count <= i_size_read(inode))) {
1352                         aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1353                         if (aio_req) {
1354                                 aio_req->iocb = iocb;
1355                                 aio_req->write = write;
1356                                 aio_req->should_dirty = should_dirty;
1357                                 INIT_LIST_HEAD(&aio_req->osd_reqs);
1358                                 if (write) {
1359                                         aio_req->mtime = mtime;
1360                                         swap(aio_req->prealloc_cf, *pcf);
1361                                 }
1362                         }
1363                         /* ignore error */
1364                 }
1365
1366                 if (write) {
1367                         /*
1368                          * throw out any page cache pages in this range. this
1369                          * may block.
1370                          */
1371                         truncate_inode_pages_range(inode->i_mapping, pos,
1372                                                    PAGE_ALIGN(pos + len) - 1);
1373
1374                         req->r_mtime = mtime;
1375                 }
1376
1377                 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1378
1379                 if (aio_req) {
1380                         aio_req->total_len += len;
1381                         aio_req->num_reqs++;
1382                         atomic_inc(&aio_req->pending_reqs);
1383
1384                         req->r_callback = ceph_aio_complete_req;
1385                         req->r_inode = inode;
1386                         req->r_priv = aio_req;
1387                         list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1388
1389                         pos += len;
1390                         continue;
1391                 }
1392
1393                 ceph_osdc_start_request(req->r_osdc, req);
1394                 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1395
1396                 if (write)
1397                         ceph_update_write_metrics(metric, req->r_start_latency,
1398                                                   req->r_end_latency, len, ret);
1399                 else
1400                         ceph_update_read_metrics(metric, req->r_start_latency,
1401                                                  req->r_end_latency, len, ret);
1402
1403                 size = i_size_read(inode);
1404                 if (!write) {
1405                         if (ret == -ENOENT)
1406                                 ret = 0;
1407                         if (ret >= 0 && ret < len && pos + ret < size) {
1408                                 struct iov_iter i;
1409                                 int zlen = min_t(size_t, len - ret,
1410                                                  size - pos - ret);
1411
1412                                 iov_iter_bvec(&i, ITER_DEST, bvecs, num_pages, len);
1413                                 iov_iter_advance(&i, ret);
1414                                 iov_iter_zero(zlen, &i);
1415                                 ret += zlen;
1416                         }
1417                         if (ret >= 0)
1418                                 len = ret;
1419                 }
1420
1421                 put_bvecs(bvecs, num_pages, should_dirty);
1422                 ceph_osdc_put_request(req);
1423                 if (ret < 0)
1424                         break;
1425
1426                 pos += len;
1427                 if (!write && pos >= size)
1428                         break;
1429
1430                 if (write && pos > size) {
1431                         if (ceph_inode_set_size(inode, pos))
1432                                 ceph_check_caps(ceph_inode(inode),
1433                                                 CHECK_CAPS_AUTHONLY);
1434                 }
1435         }
1436
1437         if (aio_req) {
1438                 LIST_HEAD(osd_reqs);
1439
1440                 if (aio_req->num_reqs == 0) {
1441                         kfree(aio_req);
1442                         return ret;
1443                 }
1444
1445                 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1446                                               CEPH_CAP_FILE_RD);
1447
1448                 list_splice(&aio_req->osd_reqs, &osd_reqs);
1449                 inode_dio_begin(inode);
1450                 while (!list_empty(&osd_reqs)) {
1451                         req = list_first_entry(&osd_reqs,
1452                                                struct ceph_osd_request,
1453                                                r_private_item);
1454                         list_del_init(&req->r_private_item);
1455                         if (ret >= 0)
1456                                 ceph_osdc_start_request(req->r_osdc, req);
1457                         if (ret < 0) {
1458                                 req->r_result = ret;
1459                                 ceph_aio_complete_req(req);
1460                         }
1461                 }
1462                 return -EIOCBQUEUED;
1463         }
1464
1465         if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1466                 ret = pos - iocb->ki_pos;
1467                 iocb->ki_pos = pos;
1468         }
1469         return ret;
1470 }
1471
1472 /*
1473  * Synchronous write, straight from __user pointer or user pages.
1474  *
1475  * If write spans object boundary, just do multiple writes.  (For a
1476  * correct atomic write, we should e.g. take write locks on all
1477  * objects, rollback on failure, etc.)
1478  */
1479 static ssize_t
1480 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1481                 struct ceph_snap_context *snapc)
1482 {
1483         struct file *file = iocb->ki_filp;
1484         struct inode *inode = file_inode(file);
1485         struct ceph_inode_info *ci = ceph_inode(inode);
1486         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1487         struct ceph_vino vino;
1488         struct ceph_osd_request *req;
1489         struct page **pages;
1490         u64 len;
1491         int num_pages;
1492         int written = 0;
1493         int flags;
1494         int ret;
1495         bool check_caps = false;
1496         struct timespec64 mtime = current_time(inode);
1497         size_t count = iov_iter_count(from);
1498
1499         if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1500                 return -EROFS;
1501
1502         dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1503              file, pos, (unsigned)count, snapc, snapc->seq);
1504
1505         ret = filemap_write_and_wait_range(inode->i_mapping,
1506                                            pos, pos + count - 1);
1507         if (ret < 0)
1508                 return ret;
1509
1510         ceph_fscache_invalidate(inode, false);
1511         ret = invalidate_inode_pages2_range(inode->i_mapping,
1512                                             pos >> PAGE_SHIFT,
1513                                             (pos + count - 1) >> PAGE_SHIFT);
1514         if (ret < 0)
1515                 dout("invalidate_inode_pages2_range returned %d\n", ret);
1516
1517         flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1518
1519         while ((len = iov_iter_count(from)) > 0) {
1520                 size_t left;
1521                 int n;
1522
1523                 vino = ceph_vino(inode);
1524                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1525                                             vino, pos, &len, 0, 1,
1526                                             CEPH_OSD_OP_WRITE, flags, snapc,
1527                                             ci->i_truncate_seq,
1528                                             ci->i_truncate_size,
1529                                             false);
1530                 if (IS_ERR(req)) {
1531                         ret = PTR_ERR(req);
1532                         break;
1533                 }
1534
1535                 /*
1536                  * write from beginning of first page,
1537                  * regardless of io alignment
1538                  */
1539                 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1540
1541                 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1542                 if (IS_ERR(pages)) {
1543                         ret = PTR_ERR(pages);
1544                         goto out;
1545                 }
1546
1547                 left = len;
1548                 for (n = 0; n < num_pages; n++) {
1549                         size_t plen = min_t(size_t, left, PAGE_SIZE);
1550                         ret = copy_page_from_iter(pages[n], 0, plen, from);
1551                         if (ret != plen) {
1552                                 ret = -EFAULT;
1553                                 break;
1554                         }
1555                         left -= ret;
1556                 }
1557
1558                 if (ret < 0) {
1559                         ceph_release_page_vector(pages, num_pages);
1560                         goto out;
1561                 }
1562
1563                 req->r_inode = inode;
1564
1565                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1566                                                 false, true);
1567
1568                 req->r_mtime = mtime;
1569                 ceph_osdc_start_request(&fsc->client->osdc, req);
1570                 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1571
1572                 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1573                                           req->r_end_latency, len, ret);
1574 out:
1575                 ceph_osdc_put_request(req);
1576                 if (ret != 0) {
1577                         ceph_set_error_write(ci);
1578                         break;
1579                 }
1580
1581                 ceph_clear_error_write(ci);
1582                 pos += len;
1583                 written += len;
1584                 if (pos > i_size_read(inode)) {
1585                         check_caps = ceph_inode_set_size(inode, pos);
1586                         if (check_caps)
1587                                 ceph_check_caps(ceph_inode(inode),
1588                                                 CHECK_CAPS_AUTHONLY);
1589                 }
1590
1591         }
1592
1593         if (ret != -EOLDSNAPC && written > 0) {
1594                 ret = written;
1595                 iocb->ki_pos = pos;
1596         }
1597         return ret;
1598 }
1599
1600 /*
1601  * Wrap generic_file_aio_read with checks for cap bits on the inode.
1602  * Atomically grab references, so that those bits are not released
1603  * back to the MDS mid-read.
1604  *
1605  * Hmm, the sync read case isn't actually async... should it be?
1606  */
1607 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1608 {
1609         struct file *filp = iocb->ki_filp;
1610         struct ceph_file_info *fi = filp->private_data;
1611         size_t len = iov_iter_count(to);
1612         struct inode *inode = file_inode(filp);
1613         struct ceph_inode_info *ci = ceph_inode(inode);
1614         bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1615         ssize_t ret;
1616         int want = 0, got = 0;
1617         int retry_op = 0, read = 0;
1618
1619 again:
1620         dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1621              inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1622
1623         if (ceph_inode_is_shutdown(inode))
1624                 return -ESTALE;
1625
1626         if (direct_lock)
1627                 ceph_start_io_direct(inode);
1628         else
1629                 ceph_start_io_read(inode);
1630
1631         if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1632                 want |= CEPH_CAP_FILE_CACHE;
1633         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1634                 want |= CEPH_CAP_FILE_LAZYIO;
1635
1636         ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
1637         if (ret < 0) {
1638                 if (direct_lock)
1639                         ceph_end_io_direct(inode);
1640                 else
1641                         ceph_end_io_read(inode);
1642                 return ret;
1643         }
1644
1645         if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1646             (iocb->ki_flags & IOCB_DIRECT) ||
1647             (fi->flags & CEPH_F_SYNC)) {
1648
1649                 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1650                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1651                      ceph_cap_string(got));
1652
1653                 if (!ceph_has_inline_data(ci)) {
1654                         if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1655                                 ret = ceph_direct_read_write(iocb, to,
1656                                                              NULL, NULL);
1657                                 if (ret >= 0 && ret < len)
1658                                         retry_op = CHECK_EOF;
1659                         } else {
1660                                 ret = ceph_sync_read(iocb, to, &retry_op);
1661                         }
1662                 } else {
1663                         retry_op = READ_INLINE;
1664                 }
1665         } else {
1666                 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1667                 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1668                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1669                      ceph_cap_string(got));
1670                 ceph_add_rw_context(fi, &rw_ctx);
1671                 ret = generic_file_read_iter(iocb, to);
1672                 ceph_del_rw_context(fi, &rw_ctx);
1673         }
1674
1675         dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1676              inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1677         ceph_put_cap_refs(ci, got);
1678
1679         if (direct_lock)
1680                 ceph_end_io_direct(inode);
1681         else
1682                 ceph_end_io_read(inode);
1683
1684         if (retry_op > HAVE_RETRIED && ret >= 0) {
1685                 int statret;
1686                 struct page *page = NULL;
1687                 loff_t i_size;
1688                 if (retry_op == READ_INLINE) {
1689                         page = __page_cache_alloc(GFP_KERNEL);
1690                         if (!page)
1691                                 return -ENOMEM;
1692                 }
1693
1694                 statret = __ceph_do_getattr(inode, page,
1695                                             CEPH_STAT_CAP_INLINE_DATA, !!page);
1696                 if (statret < 0) {
1697                         if (page)
1698                                 __free_page(page);
1699                         if (statret == -ENODATA) {
1700                                 BUG_ON(retry_op != READ_INLINE);
1701                                 goto again;
1702                         }
1703                         return statret;
1704                 }
1705
1706                 i_size = i_size_read(inode);
1707                 if (retry_op == READ_INLINE) {
1708                         BUG_ON(ret > 0 || read > 0);
1709                         if (iocb->ki_pos < i_size &&
1710                             iocb->ki_pos < PAGE_SIZE) {
1711                                 loff_t end = min_t(loff_t, i_size,
1712                                                    iocb->ki_pos + len);
1713                                 end = min_t(loff_t, end, PAGE_SIZE);
1714                                 if (statret < end)
1715                                         zero_user_segment(page, statret, end);
1716                                 ret = copy_page_to_iter(page,
1717                                                 iocb->ki_pos & ~PAGE_MASK,
1718                                                 end - iocb->ki_pos, to);
1719                                 iocb->ki_pos += ret;
1720                                 read += ret;
1721                         }
1722                         if (iocb->ki_pos < i_size && read < len) {
1723                                 size_t zlen = min_t(size_t, len - read,
1724                                                     i_size - iocb->ki_pos);
1725                                 ret = iov_iter_zero(zlen, to);
1726                                 iocb->ki_pos += ret;
1727                                 read += ret;
1728                         }
1729                         __free_pages(page, 0);
1730                         return read;
1731                 }
1732
1733                 /* hit EOF or hole? */
1734                 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1735                     ret < len) {
1736                         dout("sync_read hit hole, ppos %lld < size %lld"
1737                              ", reading more\n", iocb->ki_pos, i_size);
1738
1739                         read += ret;
1740                         len -= ret;
1741                         retry_op = HAVE_RETRIED;
1742                         goto again;
1743                 }
1744         }
1745
1746         if (ret >= 0)
1747                 ret += read;
1748
1749         return ret;
1750 }
1751
1752 /*
1753  * Take cap references to avoid releasing caps to MDS mid-write.
1754  *
1755  * If we are synchronous, and write with an old snap context, the OSD
1756  * may return EOLDSNAPC.  In that case, retry the write.. _after_
1757  * dropping our cap refs and allowing the pending snap to logically
1758  * complete _before_ this write occurs.
1759  *
1760  * If we are near ENOSPC, write synchronously.
1761  */
1762 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1763 {
1764         struct file *file = iocb->ki_filp;
1765         struct ceph_file_info *fi = file->private_data;
1766         struct inode *inode = file_inode(file);
1767         struct ceph_inode_info *ci = ceph_inode(inode);
1768         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1769         struct ceph_osd_client *osdc = &fsc->client->osdc;
1770         struct ceph_cap_flush *prealloc_cf;
1771         ssize_t count, written = 0;
1772         int err, want = 0, got;
1773         bool direct_lock = false;
1774         u32 map_flags;
1775         u64 pool_flags;
1776         loff_t pos;
1777         loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1778
1779         if (ceph_inode_is_shutdown(inode))
1780                 return -ESTALE;
1781
1782         if (ceph_snap(inode) != CEPH_NOSNAP)
1783                 return -EROFS;
1784
1785         prealloc_cf = ceph_alloc_cap_flush();
1786         if (!prealloc_cf)
1787                 return -ENOMEM;
1788
1789         if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1790                 direct_lock = true;
1791
1792 retry_snap:
1793         if (direct_lock)
1794                 ceph_start_io_direct(inode);
1795         else
1796                 ceph_start_io_write(inode);
1797
1798         /* We can write back this queue in page reclaim */
1799         current->backing_dev_info = inode_to_bdi(inode);
1800
1801         if (iocb->ki_flags & IOCB_APPEND) {
1802                 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1803                 if (err < 0)
1804                         goto out;
1805         }
1806
1807         err = generic_write_checks(iocb, from);
1808         if (err <= 0)
1809                 goto out;
1810
1811         pos = iocb->ki_pos;
1812         if (unlikely(pos >= limit)) {
1813                 err = -EFBIG;
1814                 goto out;
1815         } else {
1816                 iov_iter_truncate(from, limit - pos);
1817         }
1818
1819         count = iov_iter_count(from);
1820         if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1821                 err = -EDQUOT;
1822                 goto out;
1823         }
1824
1825         down_read(&osdc->lock);
1826         map_flags = osdc->osdmap->flags;
1827         pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1828         up_read(&osdc->lock);
1829         if ((map_flags & CEPH_OSDMAP_FULL) ||
1830             (pool_flags & CEPH_POOL_FLAG_FULL)) {
1831                 err = -ENOSPC;
1832                 goto out;
1833         }
1834
1835         err = file_remove_privs(file);
1836         if (err)
1837                 goto out;
1838
1839         dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1840              inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1841         if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1842                 want |= CEPH_CAP_FILE_BUFFER;
1843         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1844                 want |= CEPH_CAP_FILE_LAZYIO;
1845         got = 0;
1846         err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
1847         if (err < 0)
1848                 goto out;
1849
1850         err = file_update_time(file);
1851         if (err)
1852                 goto out_caps;
1853
1854         inode_inc_iversion_raw(inode);
1855
1856         dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1857              inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1858
1859         if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1860             (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1861             (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1862                 struct ceph_snap_context *snapc;
1863                 struct iov_iter data;
1864
1865                 spin_lock(&ci->i_ceph_lock);
1866                 if (__ceph_have_pending_cap_snap(ci)) {
1867                         struct ceph_cap_snap *capsnap =
1868                                         list_last_entry(&ci->i_cap_snaps,
1869                                                         struct ceph_cap_snap,
1870                                                         ci_item);
1871                         snapc = ceph_get_snap_context(capsnap->context);
1872                 } else {
1873                         BUG_ON(!ci->i_head_snapc);
1874                         snapc = ceph_get_snap_context(ci->i_head_snapc);
1875                 }
1876                 spin_unlock(&ci->i_ceph_lock);
1877
1878                 /* we might need to revert back to that point */
1879                 data = *from;
1880                 if (iocb->ki_flags & IOCB_DIRECT)
1881                         written = ceph_direct_read_write(iocb, &data, snapc,
1882                                                          &prealloc_cf);
1883                 else
1884                         written = ceph_sync_write(iocb, &data, pos, snapc);
1885                 if (direct_lock)
1886                         ceph_end_io_direct(inode);
1887                 else
1888                         ceph_end_io_write(inode);
1889                 if (written > 0)
1890                         iov_iter_advance(from, written);
1891                 ceph_put_snap_context(snapc);
1892         } else {
1893                 /*
1894                  * No need to acquire the i_truncate_mutex. Because
1895                  * the MDS revokes Fwb caps before sending truncate
1896                  * message to us. We can't get Fwb cap while there
1897                  * are pending vmtruncate. So write and vmtruncate
1898                  * can not run at the same time
1899                  */
1900                 written = generic_perform_write(iocb, from);
1901                 if (likely(written >= 0))
1902                         iocb->ki_pos = pos + written;
1903                 ceph_end_io_write(inode);
1904         }
1905
1906         if (written >= 0) {
1907                 int dirty;
1908
1909                 spin_lock(&ci->i_ceph_lock);
1910                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1911                                                &prealloc_cf);
1912                 spin_unlock(&ci->i_ceph_lock);
1913                 if (dirty)
1914                         __mark_inode_dirty(inode, dirty);
1915                 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1916                         ceph_check_caps(ci, CHECK_CAPS_FLUSH);
1917         }
1918
1919         dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1920              inode, ceph_vinop(inode), pos, (unsigned)count,
1921              ceph_cap_string(got));
1922         ceph_put_cap_refs(ci, got);
1923
1924         if (written == -EOLDSNAPC) {
1925                 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1926                      inode, ceph_vinop(inode), pos, (unsigned)count);
1927                 goto retry_snap;
1928         }
1929
1930         if (written >= 0) {
1931                 if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1932                     (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1933                         iocb->ki_flags |= IOCB_DSYNC;
1934                 written = generic_write_sync(iocb, written);
1935         }
1936
1937         goto out_unlocked;
1938 out_caps:
1939         ceph_put_cap_refs(ci, got);
1940 out:
1941         if (direct_lock)
1942                 ceph_end_io_direct(inode);
1943         else
1944                 ceph_end_io_write(inode);
1945 out_unlocked:
1946         ceph_free_cap_flush(prealloc_cf);
1947         current->backing_dev_info = NULL;
1948         return written ? written : err;
1949 }
1950
1951 /*
1952  * llseek.  be sure to verify file size on SEEK_END.
1953  */
1954 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1955 {
1956         if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1957                 struct inode *inode = file_inode(file);
1958                 int ret;
1959
1960                 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1961                 if (ret < 0)
1962                         return ret;
1963         }
1964         return generic_file_llseek(file, offset, whence);
1965 }
1966
1967 static inline void ceph_zero_partial_page(
1968         struct inode *inode, loff_t offset, unsigned size)
1969 {
1970         struct page *page;
1971         pgoff_t index = offset >> PAGE_SHIFT;
1972
1973         page = find_lock_page(inode->i_mapping, index);
1974         if (page) {
1975                 wait_on_page_writeback(page);
1976                 zero_user(page, offset & (PAGE_SIZE - 1), size);
1977                 unlock_page(page);
1978                 put_page(page);
1979         }
1980 }
1981
1982 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1983                                       loff_t length)
1984 {
1985         loff_t nearly = round_up(offset, PAGE_SIZE);
1986         if (offset < nearly) {
1987                 loff_t size = nearly - offset;
1988                 if (length < size)
1989                         size = length;
1990                 ceph_zero_partial_page(inode, offset, size);
1991                 offset += size;
1992                 length -= size;
1993         }
1994         if (length >= PAGE_SIZE) {
1995                 loff_t size = round_down(length, PAGE_SIZE);
1996                 truncate_pagecache_range(inode, offset, offset + size - 1);
1997                 offset += size;
1998                 length -= size;
1999         }
2000         if (length)
2001                 ceph_zero_partial_page(inode, offset, length);
2002 }
2003
2004 static int ceph_zero_partial_object(struct inode *inode,
2005                                     loff_t offset, loff_t *length)
2006 {
2007         struct ceph_inode_info *ci = ceph_inode(inode);
2008         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2009         struct ceph_osd_request *req;
2010         int ret = 0;
2011         loff_t zero = 0;
2012         int op;
2013
2014         if (ceph_inode_is_shutdown(inode))
2015                 return -EIO;
2016
2017         if (!length) {
2018                 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
2019                 length = &zero;
2020         } else {
2021                 op = CEPH_OSD_OP_ZERO;
2022         }
2023
2024         req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2025                                         ceph_vino(inode),
2026                                         offset, length,
2027                                         0, 1, op,
2028                                         CEPH_OSD_FLAG_WRITE,
2029                                         NULL, 0, 0, false);
2030         if (IS_ERR(req)) {
2031                 ret = PTR_ERR(req);
2032                 goto out;
2033         }
2034
2035         req->r_mtime = inode->i_mtime;
2036         ceph_osdc_start_request(&fsc->client->osdc, req);
2037         ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2038         if (ret == -ENOENT)
2039                 ret = 0;
2040         ceph_osdc_put_request(req);
2041
2042 out:
2043         return ret;
2044 }
2045
2046 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2047 {
2048         int ret = 0;
2049         struct ceph_inode_info *ci = ceph_inode(inode);
2050         s32 stripe_unit = ci->i_layout.stripe_unit;
2051         s32 stripe_count = ci->i_layout.stripe_count;
2052         s32 object_size = ci->i_layout.object_size;
2053         u64 object_set_size = object_size * stripe_count;
2054         u64 nearly, t;
2055
2056         /* round offset up to next period boundary */
2057         nearly = offset + object_set_size - 1;
2058         t = nearly;
2059         nearly -= do_div(t, object_set_size);
2060
2061         while (length && offset < nearly) {
2062                 loff_t size = length;
2063                 ret = ceph_zero_partial_object(inode, offset, &size);
2064                 if (ret < 0)
2065                         return ret;
2066                 offset += size;
2067                 length -= size;
2068         }
2069         while (length >= object_set_size) {
2070                 int i;
2071                 loff_t pos = offset;
2072                 for (i = 0; i < stripe_count; ++i) {
2073                         ret = ceph_zero_partial_object(inode, pos, NULL);
2074                         if (ret < 0)
2075                                 return ret;
2076                         pos += stripe_unit;
2077                 }
2078                 offset += object_set_size;
2079                 length -= object_set_size;
2080         }
2081         while (length) {
2082                 loff_t size = length;
2083                 ret = ceph_zero_partial_object(inode, offset, &size);
2084                 if (ret < 0)
2085                         return ret;
2086                 offset += size;
2087                 length -= size;
2088         }
2089         return ret;
2090 }
2091
2092 static long ceph_fallocate(struct file *file, int mode,
2093                                 loff_t offset, loff_t length)
2094 {
2095         struct ceph_file_info *fi = file->private_data;
2096         struct inode *inode = file_inode(file);
2097         struct ceph_inode_info *ci = ceph_inode(inode);
2098         struct ceph_cap_flush *prealloc_cf;
2099         int want, got = 0;
2100         int dirty;
2101         int ret = 0;
2102         loff_t endoff = 0;
2103         loff_t size;
2104
2105         if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2106                 return -EOPNOTSUPP;
2107
2108         if (!S_ISREG(inode->i_mode))
2109                 return -EOPNOTSUPP;
2110
2111         prealloc_cf = ceph_alloc_cap_flush();
2112         if (!prealloc_cf)
2113                 return -ENOMEM;
2114
2115         inode_lock(inode);
2116
2117         if (ceph_snap(inode) != CEPH_NOSNAP) {
2118                 ret = -EROFS;
2119                 goto unlock;
2120         }
2121
2122         size = i_size_read(inode);
2123
2124         /* Are we punching a hole beyond EOF? */
2125         if (offset >= size)
2126                 goto unlock;
2127         if ((offset + length) > size)
2128                 length = size - offset;
2129
2130         if (fi->fmode & CEPH_FILE_MODE_LAZY)
2131                 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2132         else
2133                 want = CEPH_CAP_FILE_BUFFER;
2134
2135         ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2136         if (ret < 0)
2137                 goto unlock;
2138
2139         filemap_invalidate_lock(inode->i_mapping);
2140         ceph_fscache_invalidate(inode, false);
2141         ceph_zero_pagecache_range(inode, offset, length);
2142         ret = ceph_zero_objects(inode, offset, length);
2143
2144         if (!ret) {
2145                 spin_lock(&ci->i_ceph_lock);
2146                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2147                                                &prealloc_cf);
2148                 spin_unlock(&ci->i_ceph_lock);
2149                 if (dirty)
2150                         __mark_inode_dirty(inode, dirty);
2151         }
2152         filemap_invalidate_unlock(inode->i_mapping);
2153
2154         ceph_put_cap_refs(ci, got);
2155 unlock:
2156         inode_unlock(inode);
2157         ceph_free_cap_flush(prealloc_cf);
2158         return ret;
2159 }
2160
2161 /*
2162  * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2163  * src_ci.  Two attempts are made to obtain both caps, and an error is return if
2164  * this fails; zero is returned on success.
2165  */
2166 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2167                           struct file *dst_filp,
2168                           loff_t dst_endoff, int *dst_got)
2169 {
2170         int ret = 0;
2171         bool retrying = false;
2172
2173 retry_caps:
2174         ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2175                             dst_endoff, dst_got);
2176         if (ret < 0)
2177                 return ret;
2178
2179         /*
2180          * Since we're already holding the FILE_WR capability for the dst file,
2181          * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
2182          * retry dance instead to try to get both capabilities.
2183          */
2184         ret = ceph_try_get_caps(file_inode(src_filp),
2185                                 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2186                                 false, src_got);
2187         if (ret <= 0) {
2188                 /* Start by dropping dst_ci caps and getting src_ci caps */
2189                 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2190                 if (retrying) {
2191                         if (!ret)
2192                                 /* ceph_try_get_caps masks EAGAIN */
2193                                 ret = -EAGAIN;
2194                         return ret;
2195                 }
2196                 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2197                                     CEPH_CAP_FILE_SHARED, -1, src_got);
2198                 if (ret < 0)
2199                         return ret;
2200                 /*... drop src_ci caps too, and retry */
2201                 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2202                 retrying = true;
2203                 goto retry_caps;
2204         }
2205         return ret;
2206 }
2207
2208 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2209                            struct ceph_inode_info *dst_ci, int dst_got)
2210 {
2211         ceph_put_cap_refs(src_ci, src_got);
2212         ceph_put_cap_refs(dst_ci, dst_got);
2213 }
2214
2215 /*
2216  * This function does several size-related checks, returning an error if:
2217  *  - source file is smaller than off+len
2218  *  - destination file size is not OK (inode_newsize_ok())
2219  *  - max bytes quotas is exceeded
2220  */
2221 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2222                            loff_t src_off, loff_t dst_off, size_t len)
2223 {
2224         loff_t size, endoff;
2225
2226         size = i_size_read(src_inode);
2227         /*
2228          * Don't copy beyond source file EOF.  Instead of simply setting length
2229          * to (size - src_off), just drop to VFS default implementation, as the
2230          * local i_size may be stale due to other clients writing to the source
2231          * inode.
2232          */
2233         if (src_off + len > size) {
2234                 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2235                      src_off, len, size);
2236                 return -EOPNOTSUPP;
2237         }
2238         size = i_size_read(dst_inode);
2239
2240         endoff = dst_off + len;
2241         if (inode_newsize_ok(dst_inode, endoff))
2242                 return -EOPNOTSUPP;
2243
2244         if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2245                 return -EDQUOT;
2246
2247         return 0;
2248 }
2249
2250 static struct ceph_osd_request *
2251 ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
2252                             u64 src_snapid,
2253                             struct ceph_object_id *src_oid,
2254                             struct ceph_object_locator *src_oloc,
2255                             struct ceph_object_id *dst_oid,
2256                             struct ceph_object_locator *dst_oloc,
2257                             u32 truncate_seq, u64 truncate_size)
2258 {
2259         struct ceph_osd_request *req;
2260         int ret;
2261         u32 src_fadvise_flags =
2262                 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2263                 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
2264         u32 dst_fadvise_flags =
2265                 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2266                 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
2267
2268         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
2269         if (!req)
2270                 return ERR_PTR(-ENOMEM);
2271
2272         req->r_flags = CEPH_OSD_FLAG_WRITE;
2273
2274         ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
2275         ceph_oid_copy(&req->r_t.base_oid, dst_oid);
2276
2277         ret = osd_req_op_copy_from_init(req, src_snapid, 0,
2278                                         src_oid, src_oloc,
2279                                         src_fadvise_flags,
2280                                         dst_fadvise_flags,
2281                                         truncate_seq,
2282                                         truncate_size,
2283                                         CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2284         if (ret)
2285                 goto out;
2286
2287         ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
2288         if (ret)
2289                 goto out;
2290
2291         return req;
2292
2293 out:
2294         ceph_osdc_put_request(req);
2295         return ERR_PTR(ret);
2296 }
2297
2298 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2299                                     struct ceph_inode_info *dst_ci, u64 *dst_off,
2300                                     struct ceph_fs_client *fsc,
2301                                     size_t len, unsigned int flags)
2302 {
2303         struct ceph_object_locator src_oloc, dst_oloc;
2304         struct ceph_object_id src_oid, dst_oid;
2305         struct ceph_osd_client *osdc;
2306         struct ceph_osd_request *req;
2307         size_t bytes = 0;
2308         u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2309         u32 src_objlen, dst_objlen;
2310         u32 object_size = src_ci->i_layout.object_size;
2311         int ret;
2312
2313         src_oloc.pool = src_ci->i_layout.pool_id;
2314         src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2315         dst_oloc.pool = dst_ci->i_layout.pool_id;
2316         dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2317         osdc = &fsc->client->osdc;
2318
2319         while (len >= object_size) {
2320                 ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2321                                               object_size, &src_objnum,
2322                                               &src_objoff, &src_objlen);
2323                 ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2324                                               object_size, &dst_objnum,
2325                                               &dst_objoff, &dst_objlen);
2326                 ceph_oid_init(&src_oid);
2327                 ceph_oid_printf(&src_oid, "%llx.%08llx",
2328                                 src_ci->i_vino.ino, src_objnum);
2329                 ceph_oid_init(&dst_oid);
2330                 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2331                                 dst_ci->i_vino.ino, dst_objnum);
2332                 /* Do an object remote copy */
2333                 req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
2334                                                   &src_oid, &src_oloc,
2335                                                   &dst_oid, &dst_oloc,
2336                                                   dst_ci->i_truncate_seq,
2337                                                   dst_ci->i_truncate_size);
2338                 if (IS_ERR(req))
2339                         ret = PTR_ERR(req);
2340                 else {
2341                         ceph_osdc_start_request(osdc, req);
2342                         ret = ceph_osdc_wait_request(osdc, req);
2343                         ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
2344                                                      req->r_start_latency,
2345                                                      req->r_end_latency,
2346                                                      object_size, ret);
2347                         ceph_osdc_put_request(req);
2348                 }
2349                 if (ret) {
2350                         if (ret == -EOPNOTSUPP) {
2351                                 fsc->have_copy_from2 = false;
2352                                 pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2353                         }
2354                         dout("ceph_osdc_copy_from returned %d\n", ret);
2355                         if (!bytes)
2356                                 bytes = ret;
2357                         goto out;
2358                 }
2359                 len -= object_size;
2360                 bytes += object_size;
2361                 *src_off += object_size;
2362                 *dst_off += object_size;
2363         }
2364
2365 out:
2366         ceph_oloc_destroy(&src_oloc);
2367         ceph_oloc_destroy(&dst_oloc);
2368         return bytes;
2369 }
2370
2371 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2372                                       struct file *dst_file, loff_t dst_off,
2373                                       size_t len, unsigned int flags)
2374 {
2375         struct inode *src_inode = file_inode(src_file);
2376         struct inode *dst_inode = file_inode(dst_file);
2377         struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2378         struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2379         struct ceph_cap_flush *prealloc_cf;
2380         struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2381         loff_t size;
2382         ssize_t ret = -EIO, bytes;
2383         u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2384         u32 src_objlen, dst_objlen;
2385         int src_got = 0, dst_got = 0, err, dirty;
2386
2387         if (src_inode->i_sb != dst_inode->i_sb) {
2388                 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2389
2390                 if (ceph_fsid_compare(&src_fsc->client->fsid,
2391                                       &dst_fsc->client->fsid)) {
2392                         dout("Copying files across clusters: src: %pU dst: %pU\n",
2393                              &src_fsc->client->fsid, &dst_fsc->client->fsid);
2394                         return -EXDEV;
2395                 }
2396         }
2397         if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2398                 return -EROFS;
2399
2400         /*
2401          * Some of the checks below will return -EOPNOTSUPP, which will force a
2402          * fallback to the default VFS copy_file_range implementation.  This is
2403          * desirable in several cases (for ex, the 'len' is smaller than the
2404          * size of the objects, or in cases where that would be more
2405          * efficient).
2406          */
2407
2408         if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2409                 return -EOPNOTSUPP;
2410
2411         if (!src_fsc->have_copy_from2)
2412                 return -EOPNOTSUPP;
2413
2414         /*
2415          * Striped file layouts require that we copy partial objects, but the
2416          * OSD copy-from operation only supports full-object copies.  Limit
2417          * this to non-striped file layouts for now.
2418          */
2419         if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2420             (src_ci->i_layout.stripe_count != 1) ||
2421             (dst_ci->i_layout.stripe_count != 1) ||
2422             (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2423                 dout("Invalid src/dst files layout\n");
2424                 return -EOPNOTSUPP;
2425         }
2426
2427         if (len < src_ci->i_layout.object_size)
2428                 return -EOPNOTSUPP; /* no remote copy will be done */
2429
2430         prealloc_cf = ceph_alloc_cap_flush();
2431         if (!prealloc_cf)
2432                 return -ENOMEM;
2433
2434         /* Start by sync'ing the source and destination files */
2435         ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2436         if (ret < 0) {
2437                 dout("failed to write src file (%zd)\n", ret);
2438                 goto out;
2439         }
2440         ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2441         if (ret < 0) {
2442                 dout("failed to write dst file (%zd)\n", ret);
2443                 goto out;
2444         }
2445
2446         /*
2447          * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2448          * clients may have dirty data in their caches.  And OSDs know nothing
2449          * about caps, so they can't safely do the remote object copies.
2450          */
2451         err = get_rd_wr_caps(src_file, &src_got,
2452                              dst_file, (dst_off + len), &dst_got);
2453         if (err < 0) {
2454                 dout("get_rd_wr_caps returned %d\n", err);
2455                 ret = -EOPNOTSUPP;
2456                 goto out;
2457         }
2458
2459         ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2460         if (ret < 0)
2461                 goto out_caps;
2462
2463         /* Drop dst file cached pages */
2464         ceph_fscache_invalidate(dst_inode, false);
2465         ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2466                                             dst_off >> PAGE_SHIFT,
2467                                             (dst_off + len) >> PAGE_SHIFT);
2468         if (ret < 0) {
2469                 dout("Failed to invalidate inode pages (%zd)\n", ret);
2470                 ret = 0; /* XXX */
2471         }
2472         ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2473                                       src_ci->i_layout.object_size,
2474                                       &src_objnum, &src_objoff, &src_objlen);
2475         ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2476                                       dst_ci->i_layout.object_size,
2477                                       &dst_objnum, &dst_objoff, &dst_objlen);
2478         /* object-level offsets need to the same */
2479         if (src_objoff != dst_objoff) {
2480                 ret = -EOPNOTSUPP;
2481                 goto out_caps;
2482         }
2483
2484         /*
2485          * Do a manual copy if the object offset isn't object aligned.
2486          * 'src_objlen' contains the bytes left until the end of the object,
2487          * starting at the src_off
2488          */
2489         if (src_objoff) {
2490                 dout("Initial partial copy of %u bytes\n", src_objlen);
2491
2492                 /*
2493                  * we need to temporarily drop all caps as we'll be calling
2494                  * {read,write}_iter, which will get caps again.
2495                  */
2496                 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2497                 ret = do_splice_direct(src_file, &src_off, dst_file,
2498                                        &dst_off, src_objlen, flags);
2499                 /* Abort on short copies or on error */
2500                 if (ret < src_objlen) {
2501                         dout("Failed partial copy (%zd)\n", ret);
2502                         goto out;
2503                 }
2504                 len -= ret;
2505                 err = get_rd_wr_caps(src_file, &src_got,
2506                                      dst_file, (dst_off + len), &dst_got);
2507                 if (err < 0)
2508                         goto out;
2509                 err = is_file_size_ok(src_inode, dst_inode,
2510                                       src_off, dst_off, len);
2511                 if (err < 0)
2512                         goto out_caps;
2513         }
2514
2515         size = i_size_read(dst_inode);
2516         bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2517                                      src_fsc, len, flags);
2518         if (bytes <= 0) {
2519                 if (!ret)
2520                         ret = bytes;
2521                 goto out_caps;
2522         }
2523         dout("Copied %zu bytes out of %zu\n", bytes, len);
2524         len -= bytes;
2525         ret += bytes;
2526
2527         file_update_time(dst_file);
2528         inode_inc_iversion_raw(dst_inode);
2529
2530         if (dst_off > size) {
2531                 /* Let the MDS know about dst file size change */
2532                 if (ceph_inode_set_size(dst_inode, dst_off) ||
2533                     ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2534                         ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_FLUSH);
2535         }
2536         /* Mark Fw dirty */
2537         spin_lock(&dst_ci->i_ceph_lock);
2538         dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2539         spin_unlock(&dst_ci->i_ceph_lock);
2540         if (dirty)
2541                 __mark_inode_dirty(dst_inode, dirty);
2542
2543 out_caps:
2544         put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2545
2546         /*
2547          * Do the final manual copy if we still have some bytes left, unless
2548          * there were errors in remote object copies (len >= object_size).
2549          */
2550         if (len && (len < src_ci->i_layout.object_size)) {
2551                 dout("Final partial copy of %zu bytes\n", len);
2552                 bytes = do_splice_direct(src_file, &src_off, dst_file,
2553                                          &dst_off, len, flags);
2554                 if (bytes > 0)
2555                         ret += bytes;
2556                 else
2557                         dout("Failed partial copy (%zd)\n", bytes);
2558         }
2559
2560 out:
2561         ceph_free_cap_flush(prealloc_cf);
2562
2563         return ret;
2564 }
2565
2566 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2567                                     struct file *dst_file, loff_t dst_off,
2568                                     size_t len, unsigned int flags)
2569 {
2570         ssize_t ret;
2571
2572         ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2573                                      len, flags);
2574
2575         if (ret == -EOPNOTSUPP || ret == -EXDEV)
2576                 ret = generic_copy_file_range(src_file, src_off, dst_file,
2577                                               dst_off, len, flags);
2578         return ret;
2579 }
2580
2581 const struct file_operations ceph_file_fops = {
2582         .open = ceph_open,
2583         .release = ceph_release,
2584         .llseek = ceph_llseek,
2585         .read_iter = ceph_read_iter,
2586         .write_iter = ceph_write_iter,
2587         .mmap = ceph_mmap,
2588         .fsync = ceph_fsync,
2589         .lock = ceph_lock,
2590         .setlease = simple_nosetlease,
2591         .flock = ceph_flock,
2592         .splice_read = generic_file_splice_read,
2593         .splice_write = iter_file_splice_write,
2594         .unlocked_ioctl = ceph_ioctl,
2595         .compat_ioctl = compat_ptr_ioctl,
2596         .fallocate      = ceph_fallocate,
2597         .copy_file_range = ceph_copy_file_range,
2598 };