1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 #include <linux/slab.h>
8 #include <linux/spinlock.h>
9 #include <linux/compat.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/uio.h>
14 #include <linux/blkdev.h>
16 #include <linux/mount.h>
18 #include <linux/filelock.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/falloc.h>
21 #include <linux/swap.h>
22 #include <linux/crc32.h>
23 #include <linux/writeback.h>
24 #include <linux/uaccess.h>
25 #include <linux/dlm.h>
26 #include <linux/dlm_plock.h>
27 #include <linux/delay.h>
28 #include <linux/backing-dev.h>
29 #include <linux/fileattr.h>
47 * gfs2_llseek - seek to a location in a file
50 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
52 * SEEK_END requires the glock for the file because it references the
55 * Returns: The new offset, or errno
58 static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
60 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
61 struct gfs2_holder i_gh;
66 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
69 error = generic_file_llseek(file, offset, whence);
70 gfs2_glock_dq_uninit(&i_gh);
75 error = gfs2_seek_data(file, offset);
79 error = gfs2_seek_hole(file, offset);
85 * These don't reference inode->i_size and don't depend on the
86 * block mapping, so we don't need the glock.
88 error = generic_file_llseek(file, offset, whence);
98 * gfs2_readdir - Iterator for a directory
99 * @file: The directory to read from
100 * @ctx: What to feed directory entries to
105 static int gfs2_readdir(struct file *file, struct dir_context *ctx)
107 struct inode *dir = file->f_mapping->host;
108 struct gfs2_inode *dip = GFS2_I(dir);
109 struct gfs2_holder d_gh;
112 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
116 error = gfs2_dir_read(dir, ctx, &file->f_ra);
118 gfs2_glock_dq_uninit(&d_gh);
124 * struct fsflag_gfs2flag
126 * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories,
127 * and to GFS2_DIF_JDATA for non-directories.
132 } fsflag_gfs2flag[] = {
133 {FS_SYNC_FL, GFS2_DIF_SYNC},
134 {FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE},
135 {FS_APPEND_FL, GFS2_DIF_APPENDONLY},
136 {FS_NOATIME_FL, GFS2_DIF_NOATIME},
137 {FS_INDEX_FL, GFS2_DIF_EXHASH},
138 {FS_TOPDIR_FL, GFS2_DIF_TOPDIR},
139 {FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA},
142 static inline u32 gfs2_gfsflags_to_fsflags(struct inode *inode, u32 gfsflags)
147 if (S_ISDIR(inode->i_mode))
148 gfsflags &= ~GFS2_DIF_JDATA;
150 gfsflags &= ~GFS2_DIF_INHERIT_JDATA;
152 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++)
153 if (gfsflags & fsflag_gfs2flag[i].gfsflag)
154 fsflags |= fsflag_gfs2flag[i].fsflag;
158 int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa)
160 struct inode *inode = d_inode(dentry);
161 struct gfs2_inode *ip = GFS2_I(inode);
162 struct gfs2_holder gh;
166 if (d_is_special(dentry))
169 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
170 error = gfs2_glock_nq(&gh);
174 fsflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags);
176 fileattr_fill_flags(fa, fsflags);
180 gfs2_holder_uninit(&gh);
184 void gfs2_set_inode_flags(struct inode *inode)
186 struct gfs2_inode *ip = GFS2_I(inode);
187 unsigned int flags = inode->i_flags;
189 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
190 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
192 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
193 flags |= S_IMMUTABLE;
194 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
196 if (ip->i_diskflags & GFS2_DIF_NOATIME)
198 if (ip->i_diskflags & GFS2_DIF_SYNC)
200 inode->i_flags = flags;
203 /* Flags that can be set by user space */
204 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
205 GFS2_DIF_IMMUTABLE| \
206 GFS2_DIF_APPENDONLY| \
210 GFS2_DIF_INHERIT_JDATA)
213 * do_gfs2_set_flags - set flags on an inode
215 * @reqflags: The flags to set
216 * @mask: Indicates which flags are valid
219 static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
221 struct gfs2_inode *ip = GFS2_I(inode);
222 struct gfs2_sbd *sdp = GFS2_SB(inode);
223 struct buffer_head *bh;
224 struct gfs2_holder gh;
226 u32 new_flags, flags;
228 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
233 flags = ip->i_diskflags;
234 new_flags = (flags & ~mask) | (reqflags & mask);
235 if ((new_flags ^ flags) == 0)
238 if (!IS_IMMUTABLE(inode)) {
239 error = gfs2_permission(&nop_mnt_idmap, inode, MAY_WRITE);
243 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
244 if (new_flags & GFS2_DIF_JDATA)
245 gfs2_log_flush(sdp, ip->i_gl,
246 GFS2_LOG_HEAD_FLUSH_NORMAL |
248 error = filemap_fdatawrite(inode->i_mapping);
251 error = filemap_fdatawait(inode->i_mapping);
254 truncate_inode_pages(inode->i_mapping, 0);
255 if (new_flags & GFS2_DIF_JDATA)
256 gfs2_ordered_del_inode(ip);
258 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
261 error = gfs2_meta_inode_buffer(ip, &bh);
264 inode_set_ctime_current(inode);
265 gfs2_trans_add_meta(ip->i_gl, bh);
266 ip->i_diskflags = new_flags;
267 gfs2_dinode_out(ip, bh->b_data);
269 gfs2_set_inode_flags(inode);
270 gfs2_set_aops(inode);
274 gfs2_glock_dq_uninit(&gh);
278 int gfs2_fileattr_set(struct mnt_idmap *idmap,
279 struct dentry *dentry, struct fileattr *fa)
281 struct inode *inode = d_inode(dentry);
282 u32 fsflags = fa->flags, gfsflags = 0;
286 if (d_is_special(dentry))
289 if (fileattr_has_fsx(fa))
292 for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) {
293 if (fsflags & fsflag_gfs2flag[i].fsflag) {
294 fsflags &= ~fsflag_gfs2flag[i].fsflag;
295 gfsflags |= fsflag_gfs2flag[i].gfsflag;
298 if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET)
301 mask = GFS2_FLAGS_USER_SET;
302 if (S_ISDIR(inode->i_mode)) {
303 mask &= ~GFS2_DIF_JDATA;
305 /* The GFS2_DIF_TOPDIR flag is only valid for directories. */
306 if (gfsflags & GFS2_DIF_TOPDIR)
308 mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
311 return do_gfs2_set_flags(inode, gfsflags, mask);
314 static int gfs2_getlabel(struct file *filp, char __user *label)
316 struct inode *inode = file_inode(filp);
317 struct gfs2_sbd *sdp = GFS2_SB(inode);
319 if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN))
325 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
329 return gfs2_fitrim(filp, (void __user *)arg);
330 case FS_IOC_GETFSLABEL:
331 return gfs2_getlabel(filp, (char __user *)arg);
338 static long gfs2_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
341 /* Keep this list in sync with gfs2_ioctl */
343 case FS_IOC_GETFSLABEL:
349 return gfs2_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
352 #define gfs2_compat_ioctl NULL
356 * gfs2_size_hint - Give a hint to the size of a write request
357 * @filep: The struct file
358 * @offset: The file offset of the write
359 * @size: The length of the write
361 * When we are about to do a write, this function records the total
362 * write size in order to provide a suitable hint to the lower layers
363 * about how many blocks will be required.
367 static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
369 struct inode *inode = file_inode(filep);
370 struct gfs2_sbd *sdp = GFS2_SB(inode);
371 struct gfs2_inode *ip = GFS2_I(inode);
372 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
373 int hint = min_t(size_t, INT_MAX, blks);
375 if (hint > atomic_read(&ip->i_sizehint))
376 atomic_set(&ip->i_sizehint, hint);
380 * gfs2_allocate_folio_backing - Allocate blocks for a write fault
381 * @folio: The (locked) folio to allocate backing for
382 * @length: Size of the allocation
384 * We try to allocate all the blocks required for the folio in one go. This
385 * might fail for various reasons, so we keep trying until all the blocks to
386 * back this folio are allocated. If some of the blocks are already allocated,
389 static int gfs2_allocate_folio_backing(struct folio *folio, size_t length)
391 u64 pos = folio_pos(folio);
394 struct iomap iomap = { };
396 if (gfs2_iomap_alloc(folio->mapping->host, pos, length, &iomap))
399 if (length < iomap.length)
400 iomap.length = length;
401 length -= iomap.length;
403 } while (length > 0);
409 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
410 * @vmf: The virtual memory fault containing the page to become writable
412 * When the page becomes writable, we need to ensure that we have
413 * blocks allocated on disk to back that page.
416 static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
418 struct folio *folio = page_folio(vmf->page);
419 struct inode *inode = file_inode(vmf->vma->vm_file);
420 struct gfs2_inode *ip = GFS2_I(inode);
421 struct gfs2_sbd *sdp = GFS2_SB(inode);
422 struct gfs2_alloc_parms ap = {};
423 u64 pos = folio_pos(folio);
424 unsigned int data_blocks, ind_blocks, rblocks;
425 vm_fault_t ret = VM_FAULT_LOCKED;
426 struct gfs2_holder gh;
431 sb_start_pagefault(inode->i_sb);
433 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
434 err = gfs2_glock_nq(&gh);
436 ret = vmf_fs_error(err);
440 /* Check folio index against inode size */
441 size = i_size_read(inode);
443 ret = VM_FAULT_SIGBUS;
447 /* Update file times before taking folio lock */
448 file_update_time(vmf->vma->vm_file);
450 /* folio is wholly or partially inside EOF */
451 if (size - pos < folio_size(folio))
454 length = folio_size(folio);
456 gfs2_size_hint(vmf->vma->vm_file, pos, length);
458 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
459 set_bit(GIF_SW_PAGED, &ip->i_flags);
462 * iomap_writepage / iomap_writepages currently don't support inline
463 * files, so always unstuff here.
466 if (!gfs2_is_stuffed(ip) &&
467 !gfs2_write_alloc_required(ip, pos, length)) {
469 if (!folio_test_uptodate(folio) ||
470 folio->mapping != inode->i_mapping) {
471 ret = VM_FAULT_NOPAGE;
477 err = gfs2_rindex_update(sdp);
479 ret = vmf_fs_error(err);
483 gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks);
484 ap.target = data_blocks + ind_blocks;
485 err = gfs2_quota_lock_check(ip, &ap);
487 ret = vmf_fs_error(err);
490 err = gfs2_inplace_reserve(ip, &ap);
492 ret = vmf_fs_error(err);
493 goto out_quota_unlock;
496 rblocks = RES_DINODE + ind_blocks;
497 if (gfs2_is_jdata(ip))
498 rblocks += data_blocks ? data_blocks : 1;
499 if (ind_blocks || data_blocks) {
500 rblocks += RES_STATFS + RES_QUOTA;
501 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
503 err = gfs2_trans_begin(sdp, rblocks, 0);
505 ret = vmf_fs_error(err);
509 /* Unstuff, if required, and allocate backing blocks for folio */
510 if (gfs2_is_stuffed(ip)) {
511 err = gfs2_unstuff_dinode(ip);
513 ret = vmf_fs_error(err);
519 /* If truncated, we must retry the operation, we may have raced
520 * with the glock demotion code.
522 if (!folio_test_uptodate(folio) || folio->mapping != inode->i_mapping) {
523 ret = VM_FAULT_NOPAGE;
524 goto out_page_locked;
527 err = gfs2_allocate_folio_backing(folio, length);
529 ret = vmf_fs_error(err);
532 if (ret != VM_FAULT_LOCKED)
537 gfs2_inplace_release(ip);
539 gfs2_quota_unlock(ip);
543 gfs2_holder_uninit(&gh);
544 if (ret == VM_FAULT_LOCKED) {
545 folio_mark_dirty(folio);
546 folio_wait_stable(folio);
548 sb_end_pagefault(inode->i_sb);
552 static vm_fault_t gfs2_fault(struct vm_fault *vmf)
554 struct inode *inode = file_inode(vmf->vma->vm_file);
555 struct gfs2_inode *ip = GFS2_I(inode);
556 struct gfs2_holder gh;
560 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
561 err = gfs2_glock_nq(&gh);
563 ret = vmf_fs_error(err);
566 ret = filemap_fault(vmf);
569 gfs2_holder_uninit(&gh);
573 static const struct vm_operations_struct gfs2_vm_ops = {
575 .map_pages = filemap_map_pages,
576 .page_mkwrite = gfs2_page_mkwrite,
581 * @file: The file to map
582 * @vma: The VMA which described the mapping
584 * There is no need to get a lock here unless we should be updating
585 * atime. We ignore any locking errors since the only consequence is
586 * a missed atime update (which will just be deferred until later).
591 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
593 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
595 if (!(file->f_flags & O_NOATIME) &&
596 !IS_NOATIME(&ip->i_inode)) {
597 struct gfs2_holder i_gh;
600 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
604 /* grab lock to update inode */
605 gfs2_glock_dq_uninit(&i_gh);
608 vma->vm_ops = &gfs2_vm_ops;
614 * gfs2_open_common - This is common to open and atomic_open
615 * @inode: The inode being opened
616 * @file: The file being opened
618 * This maybe called under a glock or not depending upon how it has
619 * been called. We must always be called under a glock for regular
620 * files, however. For other file types, it does not matter whether
621 * we hold the glock or not.
623 * Returns: Error code or 0 for success
626 int gfs2_open_common(struct inode *inode, struct file *file)
628 struct gfs2_file *fp;
631 if (S_ISREG(inode->i_mode)) {
632 ret = generic_file_open(inode, file);
636 if (!gfs2_is_jdata(GFS2_I(inode)))
637 file->f_mode |= FMODE_CAN_ODIRECT;
640 fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
644 mutex_init(&fp->f_fl_mutex);
646 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
647 file->private_data = fp;
648 if (file->f_mode & FMODE_WRITE) {
649 ret = gfs2_qa_get(GFS2_I(inode));
656 kfree(file->private_data);
657 file->private_data = NULL;
662 * gfs2_open - open a file
663 * @inode: the inode to open
664 * @file: the struct file for this opening
666 * After atomic_open, this function is only used for opening files
667 * which are already cached. We must still get the glock for regular
668 * files to ensure that we have the file size uptodate for the large
669 * file check which is in the common code. That is only an issue for
670 * regular files though.
675 static int gfs2_open(struct inode *inode, struct file *file)
677 struct gfs2_inode *ip = GFS2_I(inode);
678 struct gfs2_holder i_gh;
680 bool need_unlock = false;
682 if (S_ISREG(ip->i_inode.i_mode)) {
683 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
690 error = gfs2_open_common(inode, file);
693 gfs2_glock_dq_uninit(&i_gh);
699 * gfs2_release - called to close a struct file
700 * @inode: the inode the struct file belongs to
701 * @file: the struct file being closed
706 static int gfs2_release(struct inode *inode, struct file *file)
708 struct gfs2_inode *ip = GFS2_I(inode);
710 kfree(file->private_data);
711 file->private_data = NULL;
713 if (file->f_mode & FMODE_WRITE) {
714 if (gfs2_rs_active(&ip->i_res))
722 * gfs2_fsync - sync the dirty data for a file (across the cluster)
723 * @file: the file that points to the dentry
724 * @start: the start position in the file to sync
725 * @end: the end position in the file to sync
726 * @datasync: set if we can ignore timestamp changes
728 * We split the data flushing here so that we don't wait for the data
729 * until after we've also sent the metadata to disk. Note that for
730 * data=ordered, we will write & wait for the data at the log flush
731 * stage anyway, so this is unlikely to make much of a difference
732 * except in the data=writeback case.
734 * If the fdatawrite fails due to any reason except -EIO, we will
735 * continue the remainder of the fsync, although we'll still report
736 * the error at the end. This is to match filemap_write_and_wait_range()
742 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
745 struct address_space *mapping = file->f_mapping;
746 struct inode *inode = mapping->host;
747 int sync_state = inode->i_state & I_DIRTY;
748 struct gfs2_inode *ip = GFS2_I(inode);
749 int ret = 0, ret1 = 0;
751 if (mapping->nrpages) {
752 ret1 = filemap_fdatawrite_range(mapping, start, end);
757 if (!gfs2_is_jdata(ip))
758 sync_state &= ~I_DIRTY_PAGES;
760 sync_state &= ~I_DIRTY_SYNC;
763 ret = sync_inode_metadata(inode, 1);
766 if (gfs2_is_jdata(ip))
767 ret = file_write_and_wait(file);
770 gfs2_ail_flush(ip->i_gl, 1);
773 if (mapping->nrpages)
774 ret = file_fdatawait_range(file, start, end);
776 return ret ? ret : ret1;
779 static inline bool should_fault_in_pages(struct iov_iter *i,
784 size_t count = iov_iter_count(i);
789 if (!user_backed_iter(i))
793 * Try to fault in multiple pages initially. When that doesn't result
794 * in any progress, fall back to a single page.
797 offs = offset_in_page(iocb->ki_pos);
798 if (*prev_count != count) {
801 nr_dirtied = max(current->nr_dirtied_pause -
802 current->nr_dirtied, 8);
803 size = min_t(size_t, SZ_1M, nr_dirtied << PAGE_SHIFT);
807 *window_size = size - offs;
811 static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
812 struct gfs2_holder *gh)
814 struct file *file = iocb->ki_filp;
815 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
816 size_t prev_count = 0, window_size = 0;
821 * In this function, we disable page faults when we're holding the
822 * inode glock while doing I/O. If a page fault occurs, we indicate
823 * that the inode glock should be dropped, fault in the pages manually,
826 * Unlike generic_file_read_iter, for reads, iomap_dio_rw can trigger
827 * physical as well as manual page faults, and we need to disable both
830 * For direct I/O, gfs2 takes the inode glock in deferred mode. This
831 * locking mode is compatible with other deferred holders, so multiple
832 * processes and nodes can do direct I/O to a file at the same time.
833 * There's no guarantee that reads or writes will be atomic. Any
834 * coordination among readers and writers needs to happen externally.
837 if (!iov_iter_count(to))
838 return 0; /* skip atime */
840 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
842 ret = gfs2_glock_nq(gh);
847 ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
848 IOMAP_DIO_PARTIAL, NULL, read);
851 if (ret <= 0 && ret != -EFAULT)
853 /* No increment (+=) because iomap_dio_rw returns a cumulative value. */
857 if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
859 window_size -= fault_in_iov_iter_writeable(to, window_size);
864 if (gfs2_holder_queued(gh))
867 gfs2_holder_uninit(gh);
868 /* User space doesn't expect partial success. */
874 static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
875 struct gfs2_holder *gh)
877 struct file *file = iocb->ki_filp;
878 struct inode *inode = file->f_mapping->host;
879 struct gfs2_inode *ip = GFS2_I(inode);
880 size_t prev_count = 0, window_size = 0;
886 * In this function, we disable page faults when we're holding the
887 * inode glock while doing I/O. If a page fault occurs, we indicate
888 * that the inode glock should be dropped, fault in the pages manually,
891 * For writes, iomap_dio_rw only triggers manual page faults, so we
892 * don't need to disable physical ones.
896 * Deferred lock, even if its a write, since we do no allocation on
897 * this path. All we need to change is the atime, and this lock mode
898 * ensures that other nodes have flushed their buffered read caches
899 * (i.e. their page cache entries for this inode). We do not,
900 * unfortunately, have the option of only flushing a range like the
903 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
905 ret = gfs2_glock_nq(gh);
908 /* Silently fall back to buffered I/O when writing beyond EOF */
909 if (iocb->ki_pos + iov_iter_count(from) > i_size_read(&ip->i_inode))
912 from->nofault = true;
913 ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
914 IOMAP_DIO_PARTIAL, NULL, written);
915 from->nofault = false;
922 /* No increment (+=) because iomap_dio_rw returns a cumulative value. */
926 enough_retries = prev_count == iov_iter_count(from) &&
927 window_size <= PAGE_SIZE;
928 if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
930 window_size -= fault_in_iov_iter_readable(from, window_size);
934 /* fall back to buffered I/O */
939 if (gfs2_holder_queued(gh))
942 gfs2_holder_uninit(gh);
943 /* User space doesn't expect partial success. */
949 static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
951 struct gfs2_inode *ip;
952 struct gfs2_holder gh;
953 size_t prev_count = 0, window_size = 0;
958 * In this function, we disable page faults when we're holding the
959 * inode glock while doing I/O. If a page fault occurs, we indicate
960 * that the inode glock should be dropped, fault in the pages manually,
964 if (iocb->ki_flags & IOCB_DIRECT)
965 return gfs2_file_direct_read(iocb, to, &gh);
968 iocb->ki_flags |= IOCB_NOIO;
969 ret = generic_file_read_iter(iocb, to);
970 iocb->ki_flags &= ~IOCB_NOIO;
973 if (!iov_iter_count(to))
976 } else if (ret != -EFAULT) {
979 if (iocb->ki_flags & IOCB_NOWAIT)
982 ip = GFS2_I(iocb->ki_filp->f_mapping->host);
983 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
985 ret = gfs2_glock_nq(&gh);
989 ret = generic_file_read_iter(iocb, to);
991 if (ret <= 0 && ret != -EFAULT)
996 if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
998 window_size -= fault_in_iov_iter_writeable(to, window_size);
1003 if (gfs2_holder_queued(&gh))
1006 gfs2_holder_uninit(&gh);
1007 return read ? read : ret;
1010 static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
1011 struct iov_iter *from,
1012 struct gfs2_holder *gh)
1014 struct file *file = iocb->ki_filp;
1015 struct inode *inode = file_inode(file);
1016 struct gfs2_inode *ip = GFS2_I(inode);
1017 struct gfs2_sbd *sdp = GFS2_SB(inode);
1018 struct gfs2_holder *statfs_gh = NULL;
1019 size_t prev_count = 0, window_size = 0;
1020 size_t orig_count = iov_iter_count(from);
1025 * In this function, we disable page faults when we're holding the
1026 * inode glock while doing I/O. If a page fault occurs, we indicate
1027 * that the inode glock should be dropped, fault in the pages manually,
1031 if (inode == sdp->sd_rindex) {
1032 statfs_gh = kmalloc(sizeof(*statfs_gh), GFP_NOFS);
1037 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh);
1038 if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
1040 window_size -= fault_in_iov_iter_readable(from, window_size);
1045 from->count = min(from->count, window_size);
1047 ret = gfs2_glock_nq(gh);
1051 if (inode == sdp->sd_rindex) {
1052 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1054 ret = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
1055 GL_NOCACHE, statfs_gh);
1060 pagefault_disable();
1061 ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops, NULL);
1066 if (inode == sdp->sd_rindex)
1067 gfs2_glock_dq_uninit(statfs_gh);
1069 if (ret <= 0 && ret != -EFAULT)
1072 from->count = orig_count - written;
1073 if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
1078 if (gfs2_holder_queued(gh))
1081 gfs2_holder_uninit(gh);
1083 from->count = orig_count - written;
1084 return written ? written : ret;
1088 * gfs2_file_write_iter - Perform a write to a file
1089 * @iocb: The io context
1090 * @from: The data to write
1092 * We have to do a lock/unlock here to refresh the inode size for
1093 * O_APPEND writes, otherwise we can land up writing at the wrong
1094 * offset. There is still a race, but provided the app is using its
1095 * own file locking, this will make O_APPEND work as expected.
1099 static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1101 struct file *file = iocb->ki_filp;
1102 struct inode *inode = file_inode(file);
1103 struct gfs2_inode *ip = GFS2_I(inode);
1104 struct gfs2_holder gh;
1107 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
1109 if (iocb->ki_flags & IOCB_APPEND) {
1110 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
1113 gfs2_glock_dq_uninit(&gh);
1117 ret = generic_write_checks(iocb, from);
1121 ret = file_remove_privs(file);
1125 if (iocb->ki_flags & IOCB_DIRECT) {
1126 struct address_space *mapping = file->f_mapping;
1127 ssize_t buffered, ret2;
1130 * Note that under direct I/O, we don't allow and inode
1131 * timestamp updates, so we're not calling file_update_time()
1135 ret = gfs2_file_direct_write(iocb, from, &gh);
1136 if (ret < 0 || !iov_iter_count(from))
1139 iocb->ki_flags |= IOCB_DSYNC;
1140 buffered = gfs2_file_buffered_write(iocb, from, &gh);
1141 if (unlikely(buffered <= 0)) {
1148 * We need to ensure that the page cache pages are written to
1149 * disk and invalidated to preserve the expected O_DIRECT
1150 * semantics. If the writeback or invalidate fails, only report
1151 * the direct I/O range as we don't know if the buffered pages
1154 ret2 = generic_write_sync(iocb, buffered);
1155 invalidate_mapping_pages(mapping,
1156 (iocb->ki_pos - buffered) >> PAGE_SHIFT,
1157 (iocb->ki_pos - 1) >> PAGE_SHIFT);
1158 if (!ret || ret2 > 0)
1161 ret = file_update_time(file);
1165 ret = gfs2_file_buffered_write(iocb, from, &gh);
1166 if (likely(ret > 0))
1167 ret = generic_write_sync(iocb, ret);
1171 inode_unlock(inode);
1175 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
1178 struct super_block *sb = inode->i_sb;
1179 struct gfs2_inode *ip = GFS2_I(inode);
1180 loff_t end = offset + len;
1181 struct buffer_head *dibh;
1184 error = gfs2_meta_inode_buffer(ip, &dibh);
1185 if (unlikely(error))
1188 gfs2_trans_add_meta(ip->i_gl, dibh);
1190 if (gfs2_is_stuffed(ip)) {
1191 error = gfs2_unstuff_dinode(ip);
1192 if (unlikely(error))
1196 while (offset < end) {
1197 struct iomap iomap = { };
1199 error = gfs2_iomap_alloc(inode, offset, end - offset, &iomap);
1202 offset = iomap.offset + iomap.length;
1203 if (!(iomap.flags & IOMAP_F_NEW))
1205 error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
1206 iomap.length >> inode->i_blkbits,
1209 fs_err(GFS2_SB(inode), "Failed to zero data buffers\n");
1219 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
1220 * blocks, determine how many bytes can be written.
1221 * @ip: The inode in question.
1222 * @len: Max cap of bytes. What we return in *len must be <= this.
1223 * @data_blocks: Compute and return the number of data blocks needed
1224 * @ind_blocks: Compute and return the number of indirect blocks needed
1225 * @max_blocks: The total blocks available to work with.
1227 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
1229 static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
1230 unsigned int *data_blocks, unsigned int *ind_blocks,
1231 unsigned int max_blocks)
1234 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1235 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
1237 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
1238 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
1242 *data_blocks = max_data;
1243 *ind_blocks = max_blocks - max_data;
1244 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
1247 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
1251 static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1253 struct inode *inode = file_inode(file);
1254 struct gfs2_sbd *sdp = GFS2_SB(inode);
1255 struct gfs2_inode *ip = GFS2_I(inode);
1256 struct gfs2_alloc_parms ap = {};
1257 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
1258 loff_t bytes, max_bytes, max_blks;
1260 const loff_t pos = offset;
1261 const loff_t count = len;
1262 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
1263 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
1264 loff_t max_chunk_size = UINT_MAX & bsize_mask;
1266 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
1268 offset &= bsize_mask;
1270 len = next - offset;
1271 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
1274 bytes &= bsize_mask;
1276 bytes = sdp->sd_sb.sb_bsize;
1278 gfs2_size_hint(file, offset, len);
1280 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
1281 ap.min_target = data_blocks + ind_blocks;
1286 if (!gfs2_write_alloc_required(ip, offset, bytes)) {
1292 /* We need to determine how many bytes we can actually
1293 * fallocate without exceeding quota or going over the
1294 * end of the fs. We start off optimistically by assuming
1295 * we can write max_bytes */
1296 max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
1298 /* Since max_bytes is most likely a theoretical max, we
1299 * calculate a more realistic 'bytes' to serve as a good
1300 * starting point for the number of bytes we may be able
1302 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
1303 ap.target = data_blocks + ind_blocks;
1305 error = gfs2_quota_lock_check(ip, &ap);
1308 /* ap.allowed tells us how many blocks quota will allow
1309 * us to write. Check if this reduces max_blks */
1310 max_blks = UINT_MAX;
1312 max_blks = ap.allowed;
1314 error = gfs2_inplace_reserve(ip, &ap);
1318 /* check if the selected rgrp limits our max_blks further */
1319 if (ip->i_res.rs_reserved < max_blks)
1320 max_blks = ip->i_res.rs_reserved;
1322 /* Almost done. Calculate bytes that can be written using
1323 * max_blks. We also recompute max_bytes, data_blocks and
1325 calc_max_reserv(ip, &max_bytes, &data_blocks,
1326 &ind_blocks, max_blks);
1328 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
1329 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1330 if (gfs2_is_jdata(ip))
1331 rblocks += data_blocks ? data_blocks : 1;
1333 error = gfs2_trans_begin(sdp, rblocks,
1334 PAGE_SIZE >> inode->i_blkbits);
1336 goto out_trans_fail;
1338 error = fallocate_chunk(inode, offset, max_bytes, mode);
1339 gfs2_trans_end(sdp);
1342 goto out_trans_fail;
1345 offset += max_bytes;
1346 gfs2_inplace_release(ip);
1347 gfs2_quota_unlock(ip);
1350 if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size)
1351 i_size_write(inode, pos + count);
1352 file_update_time(file);
1353 mark_inode_dirty(inode);
1355 if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
1356 return vfs_fsync_range(file, pos, pos + count - 1,
1357 (file->f_flags & __O_SYNC) ? 0 : 1);
1361 gfs2_inplace_release(ip);
1363 gfs2_quota_unlock(ip);
1367 static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
1369 struct inode *inode = file_inode(file);
1370 struct gfs2_sbd *sdp = GFS2_SB(inode);
1371 struct gfs2_inode *ip = GFS2_I(inode);
1372 struct gfs2_holder gh;
1375 if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE))
1377 /* fallocate is needed by gfs2_grow to reserve space in the rindex */
1378 if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex)
1383 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1384 ret = gfs2_glock_nq(&gh);
1388 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
1389 (offset + len) > inode->i_size) {
1390 ret = inode_newsize_ok(inode, offset + len);
1395 ret = get_write_access(inode);
1399 if (mode & FALLOC_FL_PUNCH_HOLE) {
1400 ret = __gfs2_punch_hole(file, offset, len);
1402 ret = __gfs2_fallocate(file, mode, offset, len);
1404 gfs2_rs_deltree(&ip->i_res);
1407 put_write_access(inode);
1411 gfs2_holder_uninit(&gh);
1412 inode_unlock(inode);
1416 static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
1417 struct file *out, loff_t *ppos,
1418 size_t len, unsigned int flags)
1422 gfs2_size_hint(out, *ppos, len);
1424 ret = iter_file_splice_write(pipe, out, ppos, len, flags);
1428 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
1431 * gfs2_lock - acquire/release a posix lock on a file
1432 * @file: the file pointer
1433 * @cmd: either modify or retrieve lock state, possibly wait
1434 * @fl: type and range of lock
1439 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
1441 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
1442 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
1443 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
1445 if (!(fl->c.flc_flags & FL_POSIX))
1447 if (gfs2_withdrawing_or_withdrawn(sdp)) {
1448 if (lock_is_unlock(fl))
1449 locks_lock_file_wait(file, fl);
1452 if (cmd == F_CANCELLK)
1453 return dlm_posix_cancel(ls->ls_dlm, ip->i_no_addr, file, fl);
1454 else if (IS_GETLK(cmd))
1455 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
1456 else if (lock_is_unlock(fl))
1457 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
1459 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
1462 static void __flock_holder_uninit(struct file *file, struct gfs2_holder *fl_gh)
1464 struct gfs2_glock *gl = gfs2_glock_hold(fl_gh->gh_gl);
1467 * Make sure gfs2_glock_put() won't sleep under the file->f_lock
1471 spin_lock(&file->f_lock);
1472 gfs2_holder_uninit(fl_gh);
1473 spin_unlock(&file->f_lock);
1477 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1479 struct gfs2_file *fp = file->private_data;
1480 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1481 struct gfs2_inode *ip = GFS2_I(file_inode(file));
1482 struct gfs2_glock *gl;
1488 state = lock_is_write(fl) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1489 flags = GL_EXACT | GL_NOPID;
1490 if (!IS_SETLKW(cmd))
1491 flags |= LM_FLAG_TRY_1CB;
1493 mutex_lock(&fp->f_fl_mutex);
1495 if (gfs2_holder_initialized(fl_gh)) {
1496 struct file_lock request;
1497 if (fl_gh->gh_state == state)
1499 locks_init_lock(&request);
1500 request.c.flc_type = F_UNLCK;
1501 request.c.flc_flags = FL_FLOCK;
1502 locks_lock_file_wait(file, &request);
1503 gfs2_glock_dq(fl_gh);
1504 gfs2_holder_reinit(state, flags, fl_gh);
1506 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1507 &gfs2_flock_glops, CREATE, &gl);
1510 spin_lock(&file->f_lock);
1511 gfs2_holder_init(gl, state, flags, fl_gh);
1512 spin_unlock(&file->f_lock);
1515 for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1516 error = gfs2_glock_nq(fl_gh);
1517 if (error != GLR_TRYFAILED)
1519 fl_gh->gh_flags &= ~LM_FLAG_TRY_1CB;
1520 fl_gh->gh_flags |= LM_FLAG_TRY;
1524 __flock_holder_uninit(file, fl_gh);
1525 if (error == GLR_TRYFAILED)
1528 error = locks_lock_file_wait(file, fl);
1529 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1533 mutex_unlock(&fp->f_fl_mutex);
1537 static void do_unflock(struct file *file, struct file_lock *fl)
1539 struct gfs2_file *fp = file->private_data;
1540 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1542 mutex_lock(&fp->f_fl_mutex);
1543 locks_lock_file_wait(file, fl);
1544 if (gfs2_holder_initialized(fl_gh)) {
1545 gfs2_glock_dq(fl_gh);
1546 __flock_holder_uninit(file, fl_gh);
1548 mutex_unlock(&fp->f_fl_mutex);
1552 * gfs2_flock - acquire/release a flock lock on a file
1553 * @file: the file pointer
1554 * @cmd: either modify or retrieve lock state, possibly wait
1555 * @fl: type and range of lock
1560 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1562 if (!(fl->c.flc_flags & FL_FLOCK))
1565 if (lock_is_unlock(fl)) {
1566 do_unflock(file, fl);
1569 return do_flock(file, cmd, fl);
1573 const struct file_operations gfs2_file_fops = {
1574 .llseek = gfs2_llseek,
1575 .read_iter = gfs2_file_read_iter,
1576 .write_iter = gfs2_file_write_iter,
1577 .iopoll = iocb_bio_iopoll,
1578 .unlocked_ioctl = gfs2_ioctl,
1579 .compat_ioctl = gfs2_compat_ioctl,
1582 .release = gfs2_release,
1583 .fsync = gfs2_fsync,
1585 .flock = gfs2_flock,
1586 .splice_read = copy_splice_read,
1587 .splice_write = gfs2_file_splice_write,
1588 .setlease = simple_nosetlease,
1589 .fallocate = gfs2_fallocate,
1590 .fop_flags = FOP_ASYNC_LOCK,
1593 const struct file_operations gfs2_dir_fops = {
1594 .iterate_shared = gfs2_readdir,
1595 .unlocked_ioctl = gfs2_ioctl,
1596 .compat_ioctl = gfs2_compat_ioctl,
1598 .release = gfs2_release,
1599 .fsync = gfs2_fsync,
1601 .flock = gfs2_flock,
1602 .llseek = default_llseek,
1603 .fop_flags = FOP_ASYNC_LOCK,
1606 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1608 const struct file_operations gfs2_file_fops_nolock = {
1609 .llseek = gfs2_llseek,
1610 .read_iter = gfs2_file_read_iter,
1611 .write_iter = gfs2_file_write_iter,
1612 .iopoll = iocb_bio_iopoll,
1613 .unlocked_ioctl = gfs2_ioctl,
1614 .compat_ioctl = gfs2_compat_ioctl,
1617 .release = gfs2_release,
1618 .fsync = gfs2_fsync,
1619 .splice_read = copy_splice_read,
1620 .splice_write = gfs2_file_splice_write,
1621 .setlease = generic_setlease,
1622 .fallocate = gfs2_fallocate,
1625 const struct file_operations gfs2_dir_fops_nolock = {
1626 .iterate_shared = gfs2_readdir,
1627 .unlocked_ioctl = gfs2_ioctl,
1628 .compat_ioctl = gfs2_compat_ioctl,
1630 .release = gfs2_release,
1631 .fsync = gfs2_fsync,
1632 .llseek = default_llseek,