ext4: add shutdown bit and check for it
[linux-2.6-block.git] / fs / ext4 / file.c
1 /*
2  *  linux/fs/ext4/file.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/file.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  ext4 fs regular file handling primitives
16  *
17  *  64-bit file support on 64-bit platforms by Jakub Jelinek
18  *      (jj@sunsite.ms.mff.cuni.cz)
19  */
20
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/mount.h>
24 #include <linux/path.h>
25 #include <linux/dax.h>
26 #include <linux/quotaops.h>
27 #include <linux/pagevec.h>
28 #include <linux/uio.h>
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33
34 #ifdef CONFIG_FS_DAX
35 static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
36 {
37         struct inode *inode = file_inode(iocb->ki_filp);
38         ssize_t ret;
39
40         inode_lock_shared(inode);
41         /*
42          * Recheck under inode lock - at this point we are sure it cannot
43          * change anymore
44          */
45         if (!IS_DAX(inode)) {
46                 inode_unlock_shared(inode);
47                 /* Fallback to buffered IO in case we cannot support DAX */
48                 return generic_file_read_iter(iocb, to);
49         }
50         ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
51         inode_unlock_shared(inode);
52
53         file_accessed(iocb->ki_filp);
54         return ret;
55 }
56 #endif
57
58 static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
59 {
60         if (unlikely(ext4_forced_shutdown(EXT4_SB(file_inode(iocb->ki_filp)->i_sb))))
61                 return -EIO;
62
63         if (!iov_iter_count(to))
64                 return 0; /* skip atime */
65
66 #ifdef CONFIG_FS_DAX
67         if (IS_DAX(file_inode(iocb->ki_filp)))
68                 return ext4_dax_read_iter(iocb, to);
69 #endif
70         return generic_file_read_iter(iocb, to);
71 }
72
73 /*
74  * Called when an inode is released. Note that this is different
75  * from ext4_file_open: open gets called at every open, but release
76  * gets called only when /all/ the files are closed.
77  */
78 static int ext4_release_file(struct inode *inode, struct file *filp)
79 {
80         if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
81                 ext4_alloc_da_blocks(inode);
82                 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
83         }
84         /* if we are the last writer on the inode, drop the block reservation */
85         if ((filp->f_mode & FMODE_WRITE) &&
86                         (atomic_read(&inode->i_writecount) == 1) &&
87                         !EXT4_I(inode)->i_reserved_data_blocks)
88         {
89                 down_write(&EXT4_I(inode)->i_data_sem);
90                 ext4_discard_preallocations(inode);
91                 up_write(&EXT4_I(inode)->i_data_sem);
92         }
93         if (is_dx(inode) && filp->private_data)
94                 ext4_htree_free_dir_info(filp->private_data);
95
96         return 0;
97 }
98
99 static void ext4_unwritten_wait(struct inode *inode)
100 {
101         wait_queue_head_t *wq = ext4_ioend_wq(inode);
102
103         wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
104 }
105
106 /*
107  * This tests whether the IO in question is block-aligned or not.
108  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
109  * are converted to written only after the IO is complete.  Until they are
110  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
111  * it needs to zero out portions of the start and/or end block.  If 2 AIO
112  * threads are at work on the same unwritten block, they must be synchronized
113  * or one thread will zero the other's data, causing corruption.
114  */
115 static int
116 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
117 {
118         struct super_block *sb = inode->i_sb;
119         int blockmask = sb->s_blocksize - 1;
120
121         if (pos >= i_size_read(inode))
122                 return 0;
123
124         if ((pos | iov_iter_alignment(from)) & blockmask)
125                 return 1;
126
127         return 0;
128 }
129
130 /* Is IO overwriting allocated and initialized blocks? */
131 static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
132 {
133         struct ext4_map_blocks map;
134         unsigned int blkbits = inode->i_blkbits;
135         int err, blklen;
136
137         if (pos + len > i_size_read(inode))
138                 return false;
139
140         map.m_lblk = pos >> blkbits;
141         map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
142         blklen = map.m_len;
143
144         err = ext4_map_blocks(NULL, inode, &map, 0);
145         /*
146          * 'err==len' means that all of the blocks have been preallocated,
147          * regardless of whether they have been initialized or not. To exclude
148          * unwritten extents, we need to check m_flags.
149          */
150         return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
151 }
152
153 static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
154 {
155         struct inode *inode = file_inode(iocb->ki_filp);
156         ssize_t ret;
157
158         ret = generic_write_checks(iocb, from);
159         if (ret <= 0)
160                 return ret;
161         /*
162          * If we have encountered a bitmap-format file, the size limit
163          * is smaller than s_maxbytes, which is for extent-mapped files.
164          */
165         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
166                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
167
168                 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
169                         return -EFBIG;
170                 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
171         }
172         return iov_iter_count(from);
173 }
174
175 #ifdef CONFIG_FS_DAX
176 static ssize_t
177 ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
178 {
179         struct inode *inode = file_inode(iocb->ki_filp);
180         ssize_t ret;
181         bool overwrite = false;
182
183         inode_lock(inode);
184         ret = ext4_write_checks(iocb, from);
185         if (ret <= 0)
186                 goto out;
187         ret = file_remove_privs(iocb->ki_filp);
188         if (ret)
189                 goto out;
190         ret = file_update_time(iocb->ki_filp);
191         if (ret)
192                 goto out;
193
194         if (ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from))) {
195                 overwrite = true;
196                 downgrade_write(&inode->i_rwsem);
197         }
198         ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
199 out:
200         if (!overwrite)
201                 inode_unlock(inode);
202         else
203                 inode_unlock_shared(inode);
204         if (ret > 0)
205                 ret = generic_write_sync(iocb, ret);
206         return ret;
207 }
208 #endif
209
210 static ssize_t
211 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
212 {
213         struct inode *inode = file_inode(iocb->ki_filp);
214         int o_direct = iocb->ki_flags & IOCB_DIRECT;
215         int unaligned_aio = 0;
216         int overwrite = 0;
217         ssize_t ret;
218
219         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
220                 return -EIO;
221
222 #ifdef CONFIG_FS_DAX
223         if (IS_DAX(inode))
224                 return ext4_dax_write_iter(iocb, from);
225 #endif
226
227         inode_lock(inode);
228         ret = ext4_write_checks(iocb, from);
229         if (ret <= 0)
230                 goto out;
231
232         /*
233          * Unaligned direct AIO must be serialized among each other as zeroing
234          * of partial blocks of two competing unaligned AIOs can result in data
235          * corruption.
236          */
237         if (o_direct && ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
238             !is_sync_kiocb(iocb) &&
239             ext4_unaligned_aio(inode, from, iocb->ki_pos)) {
240                 unaligned_aio = 1;
241                 ext4_unwritten_wait(inode);
242         }
243
244         iocb->private = &overwrite;
245         /* Check whether we do a DIO overwrite or not */
246         if (o_direct && ext4_should_dioread_nolock(inode) && !unaligned_aio &&
247             ext4_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from)))
248                 overwrite = 1;
249
250         ret = __generic_file_write_iter(iocb, from);
251         inode_unlock(inode);
252
253         if (ret > 0)
254                 ret = generic_write_sync(iocb, ret);
255
256         return ret;
257
258 out:
259         inode_unlock(inode);
260         return ret;
261 }
262
263 #ifdef CONFIG_FS_DAX
264 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
265 {
266         int result;
267         struct inode *inode = file_inode(vma->vm_file);
268         struct super_block *sb = inode->i_sb;
269         bool write = vmf->flags & FAULT_FLAG_WRITE;
270
271         if (write) {
272                 sb_start_pagefault(sb);
273                 file_update_time(vma->vm_file);
274         }
275         down_read(&EXT4_I(inode)->i_mmap_sem);
276         result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
277         up_read(&EXT4_I(inode)->i_mmap_sem);
278         if (write)
279                 sb_end_pagefault(sb);
280
281         return result;
282 }
283
284 static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
285                                                 pmd_t *pmd, unsigned int flags)
286 {
287         int result;
288         struct inode *inode = file_inode(vma->vm_file);
289         struct super_block *sb = inode->i_sb;
290         bool write = flags & FAULT_FLAG_WRITE;
291
292         if (write) {
293                 sb_start_pagefault(sb);
294                 file_update_time(vma->vm_file);
295         }
296         down_read(&EXT4_I(inode)->i_mmap_sem);
297         result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
298                                      &ext4_iomap_ops);
299         up_read(&EXT4_I(inode)->i_mmap_sem);
300         if (write)
301                 sb_end_pagefault(sb);
302
303         return result;
304 }
305
306 /*
307  * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
308  * handler we check for races agaist truncate. Note that since we cycle through
309  * i_mmap_sem, we are sure that also any hole punching that began before we
310  * were called is finished by now and so if it included part of the file we
311  * are working on, our pte will get unmapped and the check for pte_same() in
312  * wp_pfn_shared() fails. Thus fault gets retried and things work out as
313  * desired.
314  */
315 static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
316                                 struct vm_fault *vmf)
317 {
318         struct inode *inode = file_inode(vma->vm_file);
319         struct super_block *sb = inode->i_sb;
320         loff_t size;
321         int ret;
322
323         sb_start_pagefault(sb);
324         file_update_time(vma->vm_file);
325         down_read(&EXT4_I(inode)->i_mmap_sem);
326         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
327         if (vmf->pgoff >= size)
328                 ret = VM_FAULT_SIGBUS;
329         else
330                 ret = dax_pfn_mkwrite(vma, vmf);
331         up_read(&EXT4_I(inode)->i_mmap_sem);
332         sb_end_pagefault(sb);
333
334         return ret;
335 }
336
337 static const struct vm_operations_struct ext4_dax_vm_ops = {
338         .fault          = ext4_dax_fault,
339         .pmd_fault      = ext4_dax_pmd_fault,
340         .page_mkwrite   = ext4_dax_fault,
341         .pfn_mkwrite    = ext4_dax_pfn_mkwrite,
342 };
343 #else
344 #define ext4_dax_vm_ops ext4_file_vm_ops
345 #endif
346
347 static const struct vm_operations_struct ext4_file_vm_ops = {
348         .fault          = ext4_filemap_fault,
349         .map_pages      = filemap_map_pages,
350         .page_mkwrite   = ext4_page_mkwrite,
351 };
352
353 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
354 {
355         struct inode *inode = file->f_mapping->host;
356
357         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
358                 return -EIO;
359
360         if (ext4_encrypted_inode(inode)) {
361                 int err = fscrypt_get_encryption_info(inode);
362                 if (err)
363                         return 0;
364                 if (!fscrypt_has_encryption_key(inode))
365                         return -ENOKEY;
366         }
367         file_accessed(file);
368         if (IS_DAX(file_inode(file))) {
369                 vma->vm_ops = &ext4_dax_vm_ops;
370                 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
371         } else {
372                 vma->vm_ops = &ext4_file_vm_ops;
373         }
374         return 0;
375 }
376
377 static int ext4_file_open(struct inode * inode, struct file * filp)
378 {
379         struct super_block *sb = inode->i_sb;
380         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
381         struct vfsmount *mnt = filp->f_path.mnt;
382         struct dentry *dir;
383         struct path path;
384         char buf[64], *cp;
385         int ret;
386
387         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
388                 return -EIO;
389
390         if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
391                      !(sb->s_flags & MS_RDONLY))) {
392                 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
393                 /*
394                  * Sample where the filesystem has been mounted and
395                  * store it in the superblock for sysadmin convenience
396                  * when trying to sort through large numbers of block
397                  * devices or filesystem images.
398                  */
399                 memset(buf, 0, sizeof(buf));
400                 path.mnt = mnt;
401                 path.dentry = mnt->mnt_root;
402                 cp = d_path(&path, buf, sizeof(buf));
403                 if (!IS_ERR(cp)) {
404                         handle_t *handle;
405                         int err;
406
407                         handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
408                         if (IS_ERR(handle))
409                                 return PTR_ERR(handle);
410                         BUFFER_TRACE(sbi->s_sbh, "get_write_access");
411                         err = ext4_journal_get_write_access(handle, sbi->s_sbh);
412                         if (err) {
413                                 ext4_journal_stop(handle);
414                                 return err;
415                         }
416                         strlcpy(sbi->s_es->s_last_mounted, cp,
417                                 sizeof(sbi->s_es->s_last_mounted));
418                         ext4_handle_dirty_super(handle, sb);
419                         ext4_journal_stop(handle);
420                 }
421         }
422         if (ext4_encrypted_inode(inode)) {
423                 ret = fscrypt_get_encryption_info(inode);
424                 if (ret)
425                         return -EACCES;
426                 if (!fscrypt_has_encryption_key(inode))
427                         return -ENOKEY;
428         }
429
430         dir = dget_parent(file_dentry(filp));
431         if (ext4_encrypted_inode(d_inode(dir)) &&
432                         !fscrypt_has_permitted_context(d_inode(dir), inode)) {
433                 ext4_warning(inode->i_sb,
434                              "Inconsistent encryption contexts: %lu/%lu",
435                              (unsigned long) d_inode(dir)->i_ino,
436                              (unsigned long) inode->i_ino);
437                 dput(dir);
438                 return -EPERM;
439         }
440         dput(dir);
441         /*
442          * Set up the jbd2_inode if we are opening the inode for
443          * writing and the journal is present
444          */
445         if (filp->f_mode & FMODE_WRITE) {
446                 ret = ext4_inode_attach_jinode(inode);
447                 if (ret < 0)
448                         return ret;
449         }
450         return dquot_file_open(inode, filp);
451 }
452
453 /*
454  * Here we use ext4_map_blocks() to get a block mapping for a extent-based
455  * file rather than ext4_ext_walk_space() because we can introduce
456  * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
457  * function.  When extent status tree has been fully implemented, it will
458  * track all extent status for a file and we can directly use it to
459  * retrieve the offset for SEEK_DATA/SEEK_HOLE.
460  */
461
462 /*
463  * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
464  * lookup page cache to check whether or not there has some data between
465  * [startoff, endoff] because, if this range contains an unwritten extent,
466  * we determine this extent as a data or a hole according to whether the
467  * page cache has data or not.
468  */
469 static int ext4_find_unwritten_pgoff(struct inode *inode,
470                                      int whence,
471                                      ext4_lblk_t end_blk,
472                                      loff_t *offset)
473 {
474         struct pagevec pvec;
475         unsigned int blkbits;
476         pgoff_t index;
477         pgoff_t end;
478         loff_t endoff;
479         loff_t startoff;
480         loff_t lastoff;
481         int found = 0;
482
483         blkbits = inode->i_sb->s_blocksize_bits;
484         startoff = *offset;
485         lastoff = startoff;
486         endoff = (loff_t)end_blk << blkbits;
487
488         index = startoff >> PAGE_SHIFT;
489         end = endoff >> PAGE_SHIFT;
490
491         pagevec_init(&pvec, 0);
492         do {
493                 int i, num;
494                 unsigned long nr_pages;
495
496                 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
497                 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
498                                           (pgoff_t)num);
499                 if (nr_pages == 0) {
500                         if (whence == SEEK_DATA)
501                                 break;
502
503                         BUG_ON(whence != SEEK_HOLE);
504                         /*
505                          * If this is the first time to go into the loop and
506                          * offset is not beyond the end offset, it will be a
507                          * hole at this offset
508                          */
509                         if (lastoff == startoff || lastoff < endoff)
510                                 found = 1;
511                         break;
512                 }
513
514                 /*
515                  * If this is the first time to go into the loop and
516                  * offset is smaller than the first page offset, it will be a
517                  * hole at this offset.
518                  */
519                 if (lastoff == startoff && whence == SEEK_HOLE &&
520                     lastoff < page_offset(pvec.pages[0])) {
521                         found = 1;
522                         break;
523                 }
524
525                 for (i = 0; i < nr_pages; i++) {
526                         struct page *page = pvec.pages[i];
527                         struct buffer_head *bh, *head;
528
529                         /*
530                          * If the current offset is not beyond the end of given
531                          * range, it will be a hole.
532                          */
533                         if (lastoff < endoff && whence == SEEK_HOLE &&
534                             page->index > end) {
535                                 found = 1;
536                                 *offset = lastoff;
537                                 goto out;
538                         }
539
540                         lock_page(page);
541
542                         if (unlikely(page->mapping != inode->i_mapping)) {
543                                 unlock_page(page);
544                                 continue;
545                         }
546
547                         if (!page_has_buffers(page)) {
548                                 unlock_page(page);
549                                 continue;
550                         }
551
552                         if (page_has_buffers(page)) {
553                                 lastoff = page_offset(page);
554                                 bh = head = page_buffers(page);
555                                 do {
556                                         if (buffer_uptodate(bh) ||
557                                             buffer_unwritten(bh)) {
558                                                 if (whence == SEEK_DATA)
559                                                         found = 1;
560                                         } else {
561                                                 if (whence == SEEK_HOLE)
562                                                         found = 1;
563                                         }
564                                         if (found) {
565                                                 *offset = max_t(loff_t,
566                                                         startoff, lastoff);
567                                                 unlock_page(page);
568                                                 goto out;
569                                         }
570                                         lastoff += bh->b_size;
571                                         bh = bh->b_this_page;
572                                 } while (bh != head);
573                         }
574
575                         lastoff = page_offset(page) + PAGE_SIZE;
576                         unlock_page(page);
577                 }
578
579                 /*
580                  * The no. of pages is less than our desired, that would be a
581                  * hole in there.
582                  */
583                 if (nr_pages < num && whence == SEEK_HOLE) {
584                         found = 1;
585                         *offset = lastoff;
586                         break;
587                 }
588
589                 index = pvec.pages[i - 1]->index + 1;
590                 pagevec_release(&pvec);
591         } while (index <= end);
592
593 out:
594         pagevec_release(&pvec);
595         return found;
596 }
597
598 /*
599  * ext4_seek_data() retrieves the offset for SEEK_DATA.
600  */
601 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
602 {
603         struct inode *inode = file->f_mapping->host;
604         struct extent_status es;
605         ext4_lblk_t start, last, end;
606         loff_t dataoff, isize;
607         int blkbits;
608         int ret;
609
610         inode_lock(inode);
611
612         isize = i_size_read(inode);
613         if (offset >= isize) {
614                 inode_unlock(inode);
615                 return -ENXIO;
616         }
617
618         blkbits = inode->i_sb->s_blocksize_bits;
619         start = offset >> blkbits;
620         last = start;
621         end = isize >> blkbits;
622         dataoff = offset;
623
624         do {
625                 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
626                 if (ret <= 0) {
627                         /* No extent found -> no data */
628                         if (ret == 0)
629                                 ret = -ENXIO;
630                         inode_unlock(inode);
631                         return ret;
632                 }
633
634                 last = es.es_lblk;
635                 if (last != start)
636                         dataoff = (loff_t)last << blkbits;
637                 if (!ext4_es_is_unwritten(&es))
638                         break;
639
640                 /*
641                  * If there is a unwritten extent at this offset,
642                  * it will be as a data or a hole according to page
643                  * cache that has data or not.
644                  */
645                 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
646                                               es.es_lblk + es.es_len, &dataoff))
647                         break;
648                 last += es.es_len;
649                 dataoff = (loff_t)last << blkbits;
650                 cond_resched();
651         } while (last <= end);
652
653         inode_unlock(inode);
654
655         if (dataoff > isize)
656                 return -ENXIO;
657
658         return vfs_setpos(file, dataoff, maxsize);
659 }
660
661 /*
662  * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
663  */
664 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
665 {
666         struct inode *inode = file->f_mapping->host;
667         struct extent_status es;
668         ext4_lblk_t start, last, end;
669         loff_t holeoff, isize;
670         int blkbits;
671         int ret;
672
673         inode_lock(inode);
674
675         isize = i_size_read(inode);
676         if (offset >= isize) {
677                 inode_unlock(inode);
678                 return -ENXIO;
679         }
680
681         blkbits = inode->i_sb->s_blocksize_bits;
682         start = offset >> blkbits;
683         last = start;
684         end = isize >> blkbits;
685         holeoff = offset;
686
687         do {
688                 ret = ext4_get_next_extent(inode, last, end - last + 1, &es);
689                 if (ret < 0) {
690                         inode_unlock(inode);
691                         return ret;
692                 }
693                 /* Found a hole? */
694                 if (ret == 0 || es.es_lblk > last) {
695                         if (last != start)
696                                 holeoff = (loff_t)last << blkbits;
697                         break;
698                 }
699                 /*
700                  * If there is a unwritten extent at this offset,
701                  * it will be as a data or a hole according to page
702                  * cache that has data or not.
703                  */
704                 if (ext4_es_is_unwritten(&es) &&
705                     ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
706                                               last + es.es_len, &holeoff))
707                         break;
708
709                 last += es.es_len;
710                 holeoff = (loff_t)last << blkbits;
711                 cond_resched();
712         } while (last <= end);
713
714         inode_unlock(inode);
715
716         if (holeoff > isize)
717                 holeoff = isize;
718
719         return vfs_setpos(file, holeoff, maxsize);
720 }
721
722 /*
723  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
724  * by calling generic_file_llseek_size() with the appropriate maxbytes
725  * value for each.
726  */
727 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
728 {
729         struct inode *inode = file->f_mapping->host;
730         loff_t maxbytes;
731
732         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
733                 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
734         else
735                 maxbytes = inode->i_sb->s_maxbytes;
736
737         switch (whence) {
738         case SEEK_SET:
739         case SEEK_CUR:
740         case SEEK_END:
741                 return generic_file_llseek_size(file, offset, whence,
742                                                 maxbytes, i_size_read(inode));
743         case SEEK_DATA:
744                 return ext4_seek_data(file, offset, maxbytes);
745         case SEEK_HOLE:
746                 return ext4_seek_hole(file, offset, maxbytes);
747         }
748
749         return -EINVAL;
750 }
751
752 const struct file_operations ext4_file_operations = {
753         .llseek         = ext4_llseek,
754         .read_iter      = ext4_file_read_iter,
755         .write_iter     = ext4_file_write_iter,
756         .unlocked_ioctl = ext4_ioctl,
757 #ifdef CONFIG_COMPAT
758         .compat_ioctl   = ext4_compat_ioctl,
759 #endif
760         .mmap           = ext4_file_mmap,
761         .open           = ext4_file_open,
762         .release        = ext4_release_file,
763         .fsync          = ext4_sync_file,
764         .get_unmapped_area = thp_get_unmapped_area,
765         .splice_read    = generic_file_splice_read,
766         .splice_write   = iter_file_splice_write,
767         .fallocate      = ext4_fallocate,
768 };
769
770 const struct inode_operations ext4_file_inode_operations = {
771         .setattr        = ext4_setattr,
772         .getattr        = ext4_getattr,
773         .listxattr      = ext4_listxattr,
774         .get_acl        = ext4_get_acl,
775         .set_acl        = ext4_set_acl,
776         .fiemap         = ext4_fiemap,
777 };
778