1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/file.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/writeback.h>
11 #include <linux/falloc.h>
14 #include "mds_client.h"
17 static __le32 ceph_flags_sys2wire(u32 flags)
21 switch (flags & O_ACCMODE) {
23 wire_flags |= CEPH_O_RDONLY;
26 wire_flags |= CEPH_O_WRONLY;
29 wire_flags |= CEPH_O_RDWR;
35 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
37 ceph_sys2wire(O_CREAT);
38 ceph_sys2wire(O_EXCL);
39 ceph_sys2wire(O_TRUNC);
40 ceph_sys2wire(O_DIRECTORY);
41 ceph_sys2wire(O_NOFOLLOW);
46 dout("unused open flags: %x\n", flags);
48 return cpu_to_le32(wire_flags);
52 * Ceph file operations
54 * Implement basic open/close functionality, and implement
57 * We implement three modes of file I/O:
58 * - buffered uses the generic_file_aio_{read,write} helpers
60 * - synchronous is used when there is multi-client read/write
61 * sharing, avoids the page cache, and synchronously waits for an
64 * - direct io takes the variant of the sync path that references
65 * user pages directly.
67 * fsync() flushes and waits on dirty pages, but just queues metadata
68 * for writeback: since the MDS can recover size and mtime there is no
69 * need to wait for MDS acknowledgement.
73 * Calculate the length sum of direct io vectors that can
74 * be combined into one page vector.
76 static size_t dio_get_pagev_size(const struct iov_iter *it)
78 const struct iovec *iov = it->iov;
79 const struct iovec *iovend = iov + it->nr_segs;
82 size = iov->iov_len - it->iov_offset;
84 * An iov can be page vectored when both the current tail
85 * and the next base are page aligned.
87 while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
88 (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
91 dout("dio_get_pagevlen len = %zu\n", size);
96 * Allocate a page vector based on (@it, @nbytes).
97 * The return value is the tuple describing a page vector,
98 * that is (@pages, @page_align, @num_pages).
100 static struct page **
101 dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
102 size_t *page_align, int *num_pages)
104 struct iov_iter tmp_it = *it;
107 int ret = 0, idx, npages;
109 align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
111 npages = calc_pages_for(align, nbytes);
112 pages = kvmalloc(sizeof(*pages) * npages, GFP_KERNEL);
114 return ERR_PTR(-ENOMEM);
116 for (idx = 0; idx < npages; ) {
118 ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
119 npages - idx, &start);
123 iov_iter_advance(&tmp_it, ret);
125 idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
131 dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
134 ceph_put_page_vector(pages, idx, false);
139 * Prepare an open request. Preallocate ceph_cap to avoid an
140 * inopportune ENOMEM later.
142 static struct ceph_mds_request *
143 prepare_open_request(struct super_block *sb, int flags, int create_mode)
145 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
146 struct ceph_mds_client *mdsc = fsc->mdsc;
147 struct ceph_mds_request *req;
148 int want_auth = USE_ANY_MDS;
149 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
151 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
152 want_auth = USE_AUTH_MDS;
154 req = ceph_mdsc_create_request(mdsc, op, want_auth);
157 req->r_fmode = ceph_flags_to_mode(flags);
158 req->r_args.open.flags = ceph_flags_sys2wire(flags);
159 req->r_args.open.mode = cpu_to_le32(create_mode);
164 static int ceph_init_file_info(struct inode *inode, struct file *file,
165 int fmode, bool isdir)
167 struct ceph_file_info *fi;
169 dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
170 inode->i_mode, isdir ? "dir" : "regular");
171 BUG_ON(inode->i_fop->release != ceph_release);
174 struct ceph_dir_file_info *dfi =
175 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
177 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
181 file->private_data = dfi;
182 fi = &dfi->file_info;
183 dfi->next_offset = 2;
184 dfi->readdir_cache_idx = -1;
186 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
188 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
192 file->private_data = fi;
196 spin_lock_init(&fi->rw_contexts_lock);
197 INIT_LIST_HEAD(&fi->rw_contexts);
203 * initialize private struct file data.
204 * if we fail, clean up by dropping fmode reference on the ceph_inode
206 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
210 switch (inode->i_mode & S_IFMT) {
212 ceph_fscache_register_inode_cookie(inode);
213 ceph_fscache_file_set_cookie(inode, file);
215 ret = ceph_init_file_info(inode, file, fmode,
216 S_ISDIR(inode->i_mode));
222 dout("init_file %p %p 0%o (symlink)\n", inode, file,
224 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
228 dout("init_file %p %p 0%o (special)\n", inode, file,
231 * we need to drop the open ref now, since we don't
232 * have .release set to ceph_release.
234 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
235 BUG_ON(inode->i_fop->release == ceph_release);
237 /* call the proper open fop */
238 ret = inode->i_fop->open(inode, file);
244 * try renew caps after session gets killed.
246 int ceph_renew_caps(struct inode *inode)
248 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
249 struct ceph_inode_info *ci = ceph_inode(inode);
250 struct ceph_mds_request *req;
251 int err, flags, wanted;
253 spin_lock(&ci->i_ceph_lock);
254 wanted = __ceph_caps_file_wanted(ci);
255 if (__ceph_is_any_real_caps(ci) &&
256 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
257 int issued = __ceph_caps_issued(ci, NULL);
258 spin_unlock(&ci->i_ceph_lock);
259 dout("renew caps %p want %s issued %s updating mds_wanted\n",
260 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
261 ceph_check_caps(ci, 0, NULL);
264 spin_unlock(&ci->i_ceph_lock);
267 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
269 else if (wanted & CEPH_CAP_FILE_RD)
271 else if (wanted & CEPH_CAP_FILE_WR)
274 if (wanted & CEPH_CAP_FILE_LAZYIO)
278 req = prepare_open_request(inode->i_sb, flags, 0);
284 req->r_inode = inode;
289 err = ceph_mdsc_do_request(mdsc, NULL, req);
290 ceph_mdsc_put_request(req);
292 dout("renew caps %p open result=%d\n", inode, err);
293 return err < 0 ? err : 0;
297 * If we already have the requisite capabilities, we can satisfy
298 * the open request locally (no need to request new caps from the
299 * MDS). We do, however, need to inform the MDS (asynchronously)
300 * if our wanted caps set expands.
302 int ceph_open(struct inode *inode, struct file *file)
304 struct ceph_inode_info *ci = ceph_inode(inode);
305 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
306 struct ceph_mds_client *mdsc = fsc->mdsc;
307 struct ceph_mds_request *req;
308 struct ceph_file_info *fi = file->private_data;
310 int flags, fmode, wanted;
313 dout("open file %p is already opened\n", file);
317 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
318 flags = file->f_flags & ~(O_CREAT|O_EXCL);
319 if (S_ISDIR(inode->i_mode))
320 flags = O_DIRECTORY; /* mds likes to know */
322 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
323 ceph_vinop(inode), file, flags, file->f_flags);
324 fmode = ceph_flags_to_mode(flags);
325 wanted = ceph_caps_for_mode(fmode);
327 /* snapped files are read-only */
328 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
331 /* trivially open snapdir */
332 if (ceph_snap(inode) == CEPH_SNAPDIR) {
333 spin_lock(&ci->i_ceph_lock);
334 __ceph_get_fmode(ci, fmode);
335 spin_unlock(&ci->i_ceph_lock);
336 return ceph_init_file(inode, file, fmode);
340 * No need to block if we have caps on the auth MDS (for
341 * write) or any MDS (for read). Update wanted set
344 spin_lock(&ci->i_ceph_lock);
345 if (__ceph_is_any_real_caps(ci) &&
346 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
347 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
348 int issued = __ceph_caps_issued(ci, NULL);
350 dout("open %p fmode %d want %s issued %s using existing\n",
351 inode, fmode, ceph_cap_string(wanted),
352 ceph_cap_string(issued));
353 __ceph_get_fmode(ci, fmode);
354 spin_unlock(&ci->i_ceph_lock);
357 if ((issued & wanted) != wanted &&
358 (mds_wanted & wanted) != wanted &&
359 ceph_snap(inode) != CEPH_SNAPDIR)
360 ceph_check_caps(ci, 0, NULL);
362 return ceph_init_file(inode, file, fmode);
363 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
364 (ci->i_snap_caps & wanted) == wanted) {
365 __ceph_get_fmode(ci, fmode);
366 spin_unlock(&ci->i_ceph_lock);
367 return ceph_init_file(inode, file, fmode);
370 spin_unlock(&ci->i_ceph_lock);
372 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
373 req = prepare_open_request(inode->i_sb, flags, 0);
378 req->r_inode = inode;
382 err = ceph_mdsc_do_request(mdsc, NULL, req);
384 err = ceph_init_file(inode, file, req->r_fmode);
385 ceph_mdsc_put_request(req);
386 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
393 * Do a lookup + open with a single request. If we get a non-existent
394 * file or symlink, return 1 so the VFS can retry.
396 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
397 struct file *file, unsigned flags, umode_t mode,
400 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
401 struct ceph_mds_client *mdsc = fsc->mdsc;
402 struct ceph_mds_request *req;
404 struct ceph_acls_info acls = {};
408 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
410 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
412 if (dentry->d_name.len > NAME_MAX)
413 return -ENAMETOOLONG;
415 if (flags & O_CREAT) {
416 if (ceph_quota_is_max_files_exceeded(dir))
418 err = ceph_pre_init_acls(dir, &mode, &acls);
424 req = prepare_open_request(dir->i_sb, flags, mode);
429 req->r_dentry = dget(dentry);
431 if (flags & O_CREAT) {
432 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
433 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
435 req->r_pagelist = acls.pagelist;
436 acls.pagelist = NULL;
440 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
441 if (ceph_security_xattr_wanted(dir))
442 mask |= CEPH_CAP_XATTR_SHARED;
443 req->r_args.open.mask = cpu_to_le32(mask);
446 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
447 err = ceph_mdsc_do_request(mdsc,
448 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
450 err = ceph_handle_snapdir(req, dentry, err);
454 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
455 err = ceph_handle_notrace_create(dir, dentry);
457 if (d_in_lookup(dentry)) {
458 dn = ceph_finish_lookup(req, dentry, err);
462 /* we were given a hashed negative dentry */
467 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
468 /* make vfs retry on splice, ENOENT, or symlink */
469 dout("atomic_open finish_no_open on dn %p\n", dn);
470 err = finish_no_open(file, dn);
472 dout("atomic_open finish_open on dn %p\n", dn);
473 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
474 ceph_init_inode_acls(d_inode(dentry), &acls);
475 *opened |= FILE_CREATED;
477 err = finish_open(file, dentry, ceph_open, opened);
480 if (!req->r_err && req->r_target_inode)
481 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
482 ceph_mdsc_put_request(req);
484 ceph_release_acls_info(&acls);
485 dout("atomic_open result=%d\n", err);
489 int ceph_release(struct inode *inode, struct file *file)
491 struct ceph_inode_info *ci = ceph_inode(inode);
493 if (S_ISDIR(inode->i_mode)) {
494 struct ceph_dir_file_info *dfi = file->private_data;
495 dout("release inode %p dir file %p\n", inode, file);
496 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
498 ceph_put_fmode(ci, dfi->file_info.fmode);
500 if (dfi->last_readdir)
501 ceph_mdsc_put_request(dfi->last_readdir);
502 kfree(dfi->last_name);
503 kfree(dfi->dir_info);
504 kmem_cache_free(ceph_dir_file_cachep, dfi);
506 struct ceph_file_info *fi = file->private_data;
507 dout("release inode %p regular file %p\n", inode, file);
508 WARN_ON(!list_empty(&fi->rw_contexts));
510 ceph_put_fmode(ci, fi->fmode);
511 kmem_cache_free(ceph_file_cachep, fi);
514 /* wake up anyone waiting for caps on this inode */
515 wake_up_all(&ci->i_cap_wq);
526 * Read a range of bytes striped over one or more objects. Iterate over
527 * objects we stripe over. (That's not atomic, but good enough for now.)
529 * If we get a short result from the OSD, check against i_size; we need to
530 * only return a short read to the caller if we hit EOF.
532 static int striped_read(struct inode *inode,
534 struct page **pages, int num_pages,
535 int page_align, int *checkeof)
537 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
538 struct ceph_inode_info *ci = ceph_inode(inode);
543 bool hit_stripe, was_short;
546 * we may need to do multiple reads. not atomic, unfortunately.
550 page_idx = (page_align + read) >> PAGE_SHIFT;
551 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
552 &ci->i_layout, pos, &this_len,
553 ci->i_truncate_seq, ci->i_truncate_size,
554 pages + page_idx, num_pages - page_idx,
555 ((page_align + read) & ~PAGE_MASK));
558 hit_stripe = this_len < len;
559 was_short = ret >= 0 && ret < this_len;
560 dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
561 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
563 i_size = i_size_read(inode);
565 if (was_short && (pos + ret < i_size)) {
566 int zlen = min(this_len - ret, i_size - pos - ret);
567 int zoff = page_align + read + ret;
568 dout(" zero gap %llu to %llu\n",
569 pos + ret, pos + ret + zlen);
570 ceph_zero_page_vector_range(zoff, zlen, pages);
578 /* hit stripe and need continue*/
579 if (len && hit_stripe && pos < i_size)
585 /* did we bounce off eof? */
586 if (pos + len > i_size)
587 *checkeof = CHECK_EOF;
590 dout("striped_read returns %d\n", ret);
595 * Completely synchronous read and write methods. Direct from __user
596 * buffer to osd, or directly to user pages (if O_DIRECT).
598 * If the read spans object boundary, just do multiple reads.
600 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
603 struct file *file = iocb->ki_filp;
604 struct inode *inode = file_inode(file);
606 u64 off = iocb->ki_pos;
609 size_t len = iov_iter_count(to);
611 dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
612 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
617 * flush any page cache pages in this range. this
618 * will make concurrent normal and sync io slow,
619 * but it will at least behave sensibly when they are
622 ret = filemap_write_and_wait_range(inode->i_mapping, off,
627 if (unlikely(to->type & ITER_PIPE)) {
629 ret = iov_iter_get_pages_alloc(to, &pages, len,
633 num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
635 ret = striped_read(inode, off, ret, pages, num_pages,
638 iov_iter_advance(to, ret);
641 iov_iter_advance(to, 0);
643 ceph_put_page_vector(pages, num_pages, false);
645 num_pages = calc_pages_for(off, len);
646 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
648 return PTR_ERR(pages);
650 ret = striped_read(inode, off, len, pages, num_pages,
651 (off & ~PAGE_MASK), checkeof);
657 size_t page_off = off & ~PAGE_MASK;
658 size_t copy = min_t(size_t, left,
659 PAGE_SIZE - page_off);
660 l = copy_page_to_iter(pages[k++], page_off,
668 ceph_release_page_vector(pages, num_pages);
671 if (off > iocb->ki_pos) {
672 ret = off - iocb->ki_pos;
676 dout("sync_read result %zd\n", ret);
680 struct ceph_aio_request {
686 struct list_head osd_reqs;
688 atomic_t pending_reqs;
689 struct timespec mtime;
690 struct ceph_cap_flush *prealloc_cf;
693 struct ceph_aio_work {
694 struct work_struct work;
695 struct ceph_osd_request *req;
698 static void ceph_aio_retry_work(struct work_struct *work);
700 static void ceph_aio_complete(struct inode *inode,
701 struct ceph_aio_request *aio_req)
703 struct ceph_inode_info *ci = ceph_inode(inode);
706 if (!atomic_dec_and_test(&aio_req->pending_reqs))
709 ret = aio_req->error;
711 ret = aio_req->total_len;
713 dout("ceph_aio_complete %p rc %d\n", inode, ret);
715 if (ret >= 0 && aio_req->write) {
718 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
719 if (endoff > i_size_read(inode)) {
720 if (ceph_inode_set_size(inode, endoff))
721 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
724 spin_lock(&ci->i_ceph_lock);
725 ci->i_inline_version = CEPH_INLINE_NONE;
726 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
727 &aio_req->prealloc_cf);
728 spin_unlock(&ci->i_ceph_lock);
730 __mark_inode_dirty(inode, dirty);
734 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
737 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
739 ceph_free_cap_flush(aio_req->prealloc_cf);
743 static void ceph_aio_complete_req(struct ceph_osd_request *req)
745 int rc = req->r_result;
746 struct inode *inode = req->r_inode;
747 struct ceph_aio_request *aio_req = req->r_priv;
748 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
749 int num_pages = calc_pages_for((u64)osd_data->alignment,
752 dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
753 inode, rc, osd_data->length);
755 if (rc == -EOLDSNAPC) {
756 struct ceph_aio_work *aio_work;
757 BUG_ON(!aio_req->write);
759 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
761 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
763 queue_work(ceph_inode_to_client(inode)->wb_wq,
768 } else if (!aio_req->write) {
771 if (rc >= 0 && osd_data->length > rc) {
772 int zoff = osd_data->alignment + rc;
773 int zlen = osd_data->length - rc;
775 * If read is satisfied by single OSD request,
776 * it can pass EOF. Otherwise read is within
779 if (aio_req->num_reqs == 1) {
780 loff_t i_size = i_size_read(inode);
781 loff_t endoff = aio_req->iocb->ki_pos + rc;
783 zlen = min_t(size_t, zlen,
785 aio_req->total_len = rc + zlen;
789 ceph_zero_page_vector_range(zoff, zlen,
794 ceph_put_page_vector(osd_data->pages, num_pages, aio_req->should_dirty);
795 ceph_osdc_put_request(req);
798 cmpxchg(&aio_req->error, 0, rc);
800 ceph_aio_complete(inode, aio_req);
804 static void ceph_aio_retry_work(struct work_struct *work)
806 struct ceph_aio_work *aio_work =
807 container_of(work, struct ceph_aio_work, work);
808 struct ceph_osd_request *orig_req = aio_work->req;
809 struct ceph_aio_request *aio_req = orig_req->r_priv;
810 struct inode *inode = orig_req->r_inode;
811 struct ceph_inode_info *ci = ceph_inode(inode);
812 struct ceph_snap_context *snapc;
813 struct ceph_osd_request *req;
816 spin_lock(&ci->i_ceph_lock);
817 if (__ceph_have_pending_cap_snap(ci)) {
818 struct ceph_cap_snap *capsnap =
819 list_last_entry(&ci->i_cap_snaps,
820 struct ceph_cap_snap,
822 snapc = ceph_get_snap_context(capsnap->context);
824 BUG_ON(!ci->i_head_snapc);
825 snapc = ceph_get_snap_context(ci->i_head_snapc);
827 spin_unlock(&ci->i_ceph_lock);
829 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
837 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
838 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
839 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
841 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
843 ceph_osdc_put_request(req);
848 req->r_ops[0] = orig_req->r_ops[0];
850 req->r_mtime = aio_req->mtime;
851 req->r_data_offset = req->r_ops[0].extent.offset;
853 ceph_osdc_put_request(orig_req);
855 req->r_callback = ceph_aio_complete_req;
856 req->r_inode = inode;
857 req->r_priv = aio_req;
858 req->r_abort_on_full = true;
860 ret = ceph_osdc_start_request(req->r_osdc, req, false);
864 ceph_aio_complete_req(req);
867 ceph_put_snap_context(snapc);
872 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
873 struct ceph_snap_context *snapc,
874 struct ceph_cap_flush **pcf)
876 struct file *file = iocb->ki_filp;
877 struct inode *inode = file_inode(file);
878 struct ceph_inode_info *ci = ceph_inode(inode);
879 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
880 struct ceph_vino vino;
881 struct ceph_osd_request *req;
883 struct ceph_aio_request *aio_req = NULL;
887 struct timespec mtime = current_time(inode);
888 size_t count = iov_iter_count(iter);
889 loff_t pos = iocb->ki_pos;
890 bool write = iov_iter_rw(iter) == WRITE;
891 bool should_dirty = !write && iter_is_iovec(iter);
893 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
896 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
897 (write ? "write" : "read"), file, pos, (unsigned)count,
900 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
905 int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
907 (pos + count) >> PAGE_SHIFT);
909 dout("invalidate_inode_pages2_range returned %d\n", ret2);
911 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
913 flags = CEPH_OSD_FLAG_READ;
916 while (iov_iter_count(iter) > 0) {
917 u64 size = dio_get_pagev_size(iter);
921 vino = ceph_vino(inode);
922 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
925 write ? CEPH_OSD_OP_WRITE :
937 size = min_t(u64, size, fsc->mount_options->wsize);
939 size = min_t(u64, size, fsc->mount_options->rsize);
942 pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
944 ceph_osdc_put_request(req);
945 ret = PTR_ERR(pages);
950 * To simplify error handling, allow AIO when IO within i_size
951 * or IO can be satisfied by single OSD request.
953 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
954 (len == count || pos + count <= i_size_read(inode))) {
955 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
957 aio_req->iocb = iocb;
958 aio_req->write = write;
959 aio_req->should_dirty = should_dirty;
960 INIT_LIST_HEAD(&aio_req->osd_reqs);
962 aio_req->mtime = mtime;
963 swap(aio_req->prealloc_cf, *pcf);
971 * throw out any page cache pages in this range. this
974 truncate_inode_pages_range(inode->i_mapping, pos,
975 (pos+len) | (PAGE_SIZE - 1));
977 req->r_mtime = mtime;
980 osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
984 aio_req->total_len += len;
986 atomic_inc(&aio_req->pending_reqs);
988 req->r_callback = ceph_aio_complete_req;
989 req->r_inode = inode;
990 req->r_priv = aio_req;
991 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
994 iov_iter_advance(iter, len);
998 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1000 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1002 size = i_size_read(inode);
1006 if (ret >= 0 && ret < len && pos + ret < size) {
1007 int zlen = min_t(size_t, len - ret,
1009 ceph_zero_page_vector_range(start + ret, zlen,
1017 ceph_put_page_vector(pages, num_pages, should_dirty);
1019 ceph_osdc_put_request(req);
1024 iov_iter_advance(iter, len);
1026 if (!write && pos >= size)
1029 if (write && pos > size) {
1030 if (ceph_inode_set_size(inode, pos))
1031 ceph_check_caps(ceph_inode(inode),
1032 CHECK_CAPS_AUTHONLY,
1038 LIST_HEAD(osd_reqs);
1040 if (aio_req->num_reqs == 0) {
1045 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1048 list_splice(&aio_req->osd_reqs, &osd_reqs);
1049 while (!list_empty(&osd_reqs)) {
1050 req = list_first_entry(&osd_reqs,
1051 struct ceph_osd_request,
1053 list_del_init(&req->r_unsafe_item);
1055 ret = ceph_osdc_start_request(req->r_osdc,
1058 req->r_result = ret;
1059 ceph_aio_complete_req(req);
1062 return -EIOCBQUEUED;
1065 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1066 ret = pos - iocb->ki_pos;
1073 * Synchronous write, straight from __user pointer or user pages.
1075 * If write spans object boundary, just do multiple writes. (For a
1076 * correct atomic write, we should e.g. take write locks on all
1077 * objects, rollback on failure, etc.)
1080 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1081 struct ceph_snap_context *snapc)
1083 struct file *file = iocb->ki_filp;
1084 struct inode *inode = file_inode(file);
1085 struct ceph_inode_info *ci = ceph_inode(inode);
1086 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1087 struct ceph_vino vino;
1088 struct ceph_osd_request *req;
1089 struct page **pages;
1095 bool check_caps = false;
1096 struct timespec mtime = current_time(inode);
1097 size_t count = iov_iter_count(from);
1099 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1102 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1103 file, pos, (unsigned)count, snapc, snapc->seq);
1105 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1109 ret = invalidate_inode_pages2_range(inode->i_mapping,
1111 (pos + count) >> PAGE_SHIFT);
1113 dout("invalidate_inode_pages2_range returned %d\n", ret);
1115 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1117 while ((len = iov_iter_count(from)) > 0) {
1121 vino = ceph_vino(inode);
1122 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1123 vino, pos, &len, 0, 1,
1124 CEPH_OSD_OP_WRITE, flags, snapc,
1126 ci->i_truncate_size,
1134 * write from beginning of first page,
1135 * regardless of io alignment
1137 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1139 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1140 if (IS_ERR(pages)) {
1141 ret = PTR_ERR(pages);
1146 for (n = 0; n < num_pages; n++) {
1147 size_t plen = min_t(size_t, left, PAGE_SIZE);
1148 ret = copy_page_from_iter(pages[n], 0, plen, from);
1157 ceph_release_page_vector(pages, num_pages);
1161 req->r_inode = inode;
1163 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1166 req->r_mtime = mtime;
1167 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1169 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1172 ceph_osdc_put_request(req);
1174 ceph_set_error_write(ci);
1178 ceph_clear_error_write(ci);
1181 if (pos > i_size_read(inode)) {
1182 check_caps = ceph_inode_set_size(inode, pos);
1184 ceph_check_caps(ceph_inode(inode),
1185 CHECK_CAPS_AUTHONLY,
1191 if (ret != -EOLDSNAPC && written > 0) {
1199 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1200 * Atomically grab references, so that those bits are not released
1201 * back to the MDS mid-read.
1203 * Hmm, the sync read case isn't actually async... should it be?
1205 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1207 struct file *filp = iocb->ki_filp;
1208 struct ceph_file_info *fi = filp->private_data;
1209 size_t len = iov_iter_count(to);
1210 struct inode *inode = file_inode(filp);
1211 struct ceph_inode_info *ci = ceph_inode(inode);
1212 struct page *pinned_page = NULL;
1215 int retry_op = 0, read = 0;
1218 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1219 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1221 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1222 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1224 want = CEPH_CAP_FILE_CACHE;
1225 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1229 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1230 (iocb->ki_flags & IOCB_DIRECT) ||
1231 (fi->flags & CEPH_F_SYNC)) {
1233 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1234 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1235 ceph_cap_string(got));
1237 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1238 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1239 ret = ceph_direct_read_write(iocb, to,
1241 if (ret >= 0 && ret < len)
1242 retry_op = CHECK_EOF;
1244 ret = ceph_sync_read(iocb, to, &retry_op);
1247 retry_op = READ_INLINE;
1250 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1251 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1252 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1253 ceph_cap_string(got));
1254 ceph_add_rw_context(fi, &rw_ctx);
1255 ret = generic_file_read_iter(iocb, to);
1256 ceph_del_rw_context(fi, &rw_ctx);
1258 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1259 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1261 put_page(pinned_page);
1264 ceph_put_cap_refs(ci, got);
1265 if (retry_op > HAVE_RETRIED && ret >= 0) {
1267 struct page *page = NULL;
1269 if (retry_op == READ_INLINE) {
1270 page = __page_cache_alloc(GFP_KERNEL);
1275 statret = __ceph_do_getattr(inode, page,
1276 CEPH_STAT_CAP_INLINE_DATA, !!page);
1280 if (statret == -ENODATA) {
1281 BUG_ON(retry_op != READ_INLINE);
1287 i_size = i_size_read(inode);
1288 if (retry_op == READ_INLINE) {
1289 BUG_ON(ret > 0 || read > 0);
1290 if (iocb->ki_pos < i_size &&
1291 iocb->ki_pos < PAGE_SIZE) {
1292 loff_t end = min_t(loff_t, i_size,
1293 iocb->ki_pos + len);
1294 end = min_t(loff_t, end, PAGE_SIZE);
1296 zero_user_segment(page, statret, end);
1297 ret = copy_page_to_iter(page,
1298 iocb->ki_pos & ~PAGE_MASK,
1299 end - iocb->ki_pos, to);
1300 iocb->ki_pos += ret;
1303 if (iocb->ki_pos < i_size && read < len) {
1304 size_t zlen = min_t(size_t, len - read,
1305 i_size - iocb->ki_pos);
1306 ret = iov_iter_zero(zlen, to);
1307 iocb->ki_pos += ret;
1310 __free_pages(page, 0);
1314 /* hit EOF or hole? */
1315 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1317 dout("sync_read hit hole, ppos %lld < size %lld"
1318 ", reading more\n", iocb->ki_pos, i_size);
1322 retry_op = HAVE_RETRIED;
1334 * Take cap references to avoid releasing caps to MDS mid-write.
1336 * If we are synchronous, and write with an old snap context, the OSD
1337 * may return EOLDSNAPC. In that case, retry the write.. _after_
1338 * dropping our cap refs and allowing the pending snap to logically
1339 * complete _before_ this write occurs.
1341 * If we are near ENOSPC, write synchronously.
1343 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1345 struct file *file = iocb->ki_filp;
1346 struct ceph_file_info *fi = file->private_data;
1347 struct inode *inode = file_inode(file);
1348 struct ceph_inode_info *ci = ceph_inode(inode);
1349 struct ceph_osd_client *osdc =
1350 &ceph_sb_to_client(inode->i_sb)->client->osdc;
1351 struct ceph_cap_flush *prealloc_cf;
1352 ssize_t count, written = 0;
1356 if (ceph_snap(inode) != CEPH_NOSNAP)
1359 prealloc_cf = ceph_alloc_cap_flush();
1366 /* We can write back this queue in page reclaim */
1367 current->backing_dev_info = inode_to_bdi(inode);
1369 if (iocb->ki_flags & IOCB_APPEND) {
1370 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1375 err = generic_write_checks(iocb, from);
1380 count = iov_iter_count(from);
1381 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1386 err = file_remove_privs(file);
1390 err = file_update_time(file);
1394 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1395 err = ceph_uninline_data(file, NULL);
1400 /* FIXME: not complete since it doesn't account for being at quota */
1401 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
1406 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1407 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1408 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1409 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1411 want = CEPH_CAP_FILE_BUFFER;
1413 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1418 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1419 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1421 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1422 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1423 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1424 struct ceph_snap_context *snapc;
1425 struct iov_iter data;
1426 inode_unlock(inode);
1428 spin_lock(&ci->i_ceph_lock);
1429 if (__ceph_have_pending_cap_snap(ci)) {
1430 struct ceph_cap_snap *capsnap =
1431 list_last_entry(&ci->i_cap_snaps,
1432 struct ceph_cap_snap,
1434 snapc = ceph_get_snap_context(capsnap->context);
1436 BUG_ON(!ci->i_head_snapc);
1437 snapc = ceph_get_snap_context(ci->i_head_snapc);
1439 spin_unlock(&ci->i_ceph_lock);
1441 /* we might need to revert back to that point */
1443 if (iocb->ki_flags & IOCB_DIRECT)
1444 written = ceph_direct_read_write(iocb, &data, snapc,
1447 written = ceph_sync_write(iocb, &data, pos, snapc);
1449 iov_iter_advance(from, written);
1450 ceph_put_snap_context(snapc);
1453 * No need to acquire the i_truncate_mutex. Because
1454 * the MDS revokes Fwb caps before sending truncate
1455 * message to us. We can't get Fwb cap while there
1456 * are pending vmtruncate. So write and vmtruncate
1457 * can not run at the same time
1459 written = generic_perform_write(file, from, pos);
1460 if (likely(written >= 0))
1461 iocb->ki_pos = pos + written;
1462 inode_unlock(inode);
1468 spin_lock(&ci->i_ceph_lock);
1469 ci->i_inline_version = CEPH_INLINE_NONE;
1470 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1472 spin_unlock(&ci->i_ceph_lock);
1474 __mark_inode_dirty(inode, dirty);
1475 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1476 ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
1479 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1480 inode, ceph_vinop(inode), pos, (unsigned)count,
1481 ceph_cap_string(got));
1482 ceph_put_cap_refs(ci, got);
1484 if (written == -EOLDSNAPC) {
1485 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1486 inode, ceph_vinop(inode), pos, (unsigned)count);
1491 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
1492 iocb->ki_flags |= IOCB_DSYNC;
1493 written = generic_write_sync(iocb, written);
1499 inode_unlock(inode);
1501 ceph_free_cap_flush(prealloc_cf);
1502 current->backing_dev_info = NULL;
1503 return written ? written : err;
1507 * llseek. be sure to verify file size on SEEK_END.
1509 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1511 struct inode *inode = file->f_mapping->host;
1517 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1518 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1523 i_size = i_size_read(inode);
1530 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1531 * position-querying operation. Avoid rewriting the "same"
1532 * f_pos value back to the file because a concurrent read(),
1533 * write() or lseek() might have altered it
1539 offset += file->f_pos;
1542 if (offset < 0 || offset >= i_size) {
1548 if (offset < 0 || offset >= i_size) {
1556 ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1559 inode_unlock(inode);
1563 static inline void ceph_zero_partial_page(
1564 struct inode *inode, loff_t offset, unsigned size)
1567 pgoff_t index = offset >> PAGE_SHIFT;
1569 page = find_lock_page(inode->i_mapping, index);
1571 wait_on_page_writeback(page);
1572 zero_user(page, offset & (PAGE_SIZE - 1), size);
1578 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1581 loff_t nearly = round_up(offset, PAGE_SIZE);
1582 if (offset < nearly) {
1583 loff_t size = nearly - offset;
1586 ceph_zero_partial_page(inode, offset, size);
1590 if (length >= PAGE_SIZE) {
1591 loff_t size = round_down(length, PAGE_SIZE);
1592 truncate_pagecache_range(inode, offset, offset + size - 1);
1597 ceph_zero_partial_page(inode, offset, length);
1600 static int ceph_zero_partial_object(struct inode *inode,
1601 loff_t offset, loff_t *length)
1603 struct ceph_inode_info *ci = ceph_inode(inode);
1604 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1605 struct ceph_osd_request *req;
1611 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1614 op = CEPH_OSD_OP_ZERO;
1617 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1621 CEPH_OSD_FLAG_WRITE,
1628 req->r_mtime = inode->i_mtime;
1629 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1631 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1635 ceph_osdc_put_request(req);
1641 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1644 struct ceph_inode_info *ci = ceph_inode(inode);
1645 s32 stripe_unit = ci->i_layout.stripe_unit;
1646 s32 stripe_count = ci->i_layout.stripe_count;
1647 s32 object_size = ci->i_layout.object_size;
1648 u64 object_set_size = object_size * stripe_count;
1651 /* round offset up to next period boundary */
1652 nearly = offset + object_set_size - 1;
1654 nearly -= do_div(t, object_set_size);
1656 while (length && offset < nearly) {
1657 loff_t size = length;
1658 ret = ceph_zero_partial_object(inode, offset, &size);
1664 while (length >= object_set_size) {
1666 loff_t pos = offset;
1667 for (i = 0; i < stripe_count; ++i) {
1668 ret = ceph_zero_partial_object(inode, pos, NULL);
1673 offset += object_set_size;
1674 length -= object_set_size;
1677 loff_t size = length;
1678 ret = ceph_zero_partial_object(inode, offset, &size);
1687 static long ceph_fallocate(struct file *file, int mode,
1688 loff_t offset, loff_t length)
1690 struct ceph_file_info *fi = file->private_data;
1691 struct inode *inode = file_inode(file);
1692 struct ceph_inode_info *ci = ceph_inode(inode);
1693 struct ceph_osd_client *osdc =
1694 &ceph_inode_to_client(inode)->client->osdc;
1695 struct ceph_cap_flush *prealloc_cf;
1702 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1705 if (!S_ISREG(inode->i_mode))
1708 prealloc_cf = ceph_alloc_cap_flush();
1714 if (ceph_snap(inode) != CEPH_NOSNAP) {
1719 if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)) &&
1720 ceph_quota_is_max_bytes_exceeded(inode, offset + length)) {
1725 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
1726 !(mode & FALLOC_FL_PUNCH_HOLE)) {
1731 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1732 ret = ceph_uninline_data(file, NULL);
1737 size = i_size_read(inode);
1738 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
1739 endoff = offset + length;
1740 ret = inode_newsize_ok(inode, endoff);
1745 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1746 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1748 want = CEPH_CAP_FILE_BUFFER;
1750 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1754 if (mode & FALLOC_FL_PUNCH_HOLE) {
1756 ceph_zero_pagecache_range(inode, offset, length);
1757 ret = ceph_zero_objects(inode, offset, length);
1758 } else if (endoff > size) {
1759 truncate_pagecache_range(inode, size, -1);
1760 if (ceph_inode_set_size(inode, endoff))
1761 ceph_check_caps(ceph_inode(inode),
1762 CHECK_CAPS_AUTHONLY, NULL);
1766 spin_lock(&ci->i_ceph_lock);
1767 ci->i_inline_version = CEPH_INLINE_NONE;
1768 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1770 spin_unlock(&ci->i_ceph_lock);
1772 __mark_inode_dirty(inode, dirty);
1773 if ((endoff > size) &&
1774 ceph_quota_is_max_bytes_approaching(inode, endoff))
1775 ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
1778 ceph_put_cap_refs(ci, got);
1780 inode_unlock(inode);
1781 ceph_free_cap_flush(prealloc_cf);
1785 const struct file_operations ceph_file_fops = {
1787 .release = ceph_release,
1788 .llseek = ceph_llseek,
1789 .read_iter = ceph_read_iter,
1790 .write_iter = ceph_write_iter,
1792 .fsync = ceph_fsync,
1794 .flock = ceph_flock,
1795 .splice_read = generic_file_splice_read,
1796 .splice_write = iter_file_splice_write,
1797 .unlocked_ioctl = ceph_ioctl,
1798 .compat_ioctl = ceph_ioctl,
1799 .fallocate = ceph_fallocate,