1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_foreground.h"
7 #include "btree_update.h"
12 #include "extent_update.h"
24 #include <linux/aio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/falloc.h>
27 #include <linux/migrate.h>
28 #include <linux/mmu_context.h>
29 #include <linux/pagevec.h>
30 #include <linux/rmap.h>
31 #include <linux/sched/signal.h>
32 #include <linux/task_io_accounting_ops.h>
33 #include <linux/uio.h>
34 #include <linux/writeback.h>
36 #include <trace/events/writeback.h>
38 static inline bool bio_full(struct bio *bio, unsigned len)
40 if (bio->bi_vcnt >= bio->bi_max_vecs)
42 if (bio->bi_iter.bi_size > UINT_MAX - len)
47 static inline struct address_space *faults_disabled_mapping(void)
49 return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
52 static inline void set_fdm_dropped_locks(void)
54 current->faults_disabled_mapping =
55 (void *) (((unsigned long) current->faults_disabled_mapping)|1);
58 static inline bool fdm_dropped_locks(void)
60 return ((unsigned long) current->faults_disabled_mapping) & 1;
67 struct bch_writepage_io {
69 struct bch_inode_info *inode;
72 struct bch_write_op op;
76 struct completion done;
82 struct quota_res quota_res;
86 struct iovec inline_vecs[2];
89 struct bch_write_op op;
96 struct bch_read_bio rbio;
99 /* pagecache_block must be held */
100 static noinline int write_invalidate_inode_pages_range(struct address_space *mapping,
101 loff_t start, loff_t end)
106 * XXX: the way this is currently implemented, we can spin if a process
107 * is continually redirtying a specific page
110 if (!mapping->nrpages)
113 ret = filemap_write_and_wait_range(mapping, start, end);
117 if (!mapping->nrpages)
120 ret = invalidate_inode_pages2_range(mapping,
123 } while (ret == -EBUSY);
130 #ifdef CONFIG_BCACHEFS_QUOTA
132 static void bch2_quota_reservation_put(struct bch_fs *c,
133 struct bch_inode_info *inode,
134 struct quota_res *res)
139 mutex_lock(&inode->ei_quota_lock);
140 BUG_ON(res->sectors > inode->ei_quota_reserved);
142 bch2_quota_acct(c, inode->ei_qid, Q_SPC,
143 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
144 inode->ei_quota_reserved -= res->sectors;
145 mutex_unlock(&inode->ei_quota_lock);
150 static int bch2_quota_reservation_add(struct bch_fs *c,
151 struct bch_inode_info *inode,
152 struct quota_res *res,
158 mutex_lock(&inode->ei_quota_lock);
159 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
160 check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
162 inode->ei_quota_reserved += sectors;
163 res->sectors += sectors;
165 mutex_unlock(&inode->ei_quota_lock);
172 static void bch2_quota_reservation_put(struct bch_fs *c,
173 struct bch_inode_info *inode,
174 struct quota_res *res)
178 static int bch2_quota_reservation_add(struct bch_fs *c,
179 struct bch_inode_info *inode,
180 struct quota_res *res,
189 /* i_size updates: */
191 struct inode_new_size {
197 static int inode_set_size(struct bch_inode_info *inode,
198 struct bch_inode_unpacked *bi,
201 struct inode_new_size *s = p;
203 bi->bi_size = s->new_size;
204 if (s->fields & ATTR_ATIME)
205 bi->bi_atime = s->now;
206 if (s->fields & ATTR_MTIME)
207 bi->bi_mtime = s->now;
208 if (s->fields & ATTR_CTIME)
209 bi->bi_ctime = s->now;
214 int __must_check bch2_write_inode_size(struct bch_fs *c,
215 struct bch_inode_info *inode,
216 loff_t new_size, unsigned fields)
218 struct inode_new_size s = {
219 .new_size = new_size,
220 .now = bch2_current_time(c),
224 return bch2_write_inode(c, inode, inode_set_size, &s, fields);
227 static void i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
228 struct quota_res *quota_res, s64 sectors)
233 mutex_lock(&inode->ei_quota_lock);
234 #ifdef CONFIG_BCACHEFS_QUOTA
235 if (quota_res && sectors > 0) {
236 BUG_ON(sectors > quota_res->sectors);
237 BUG_ON(sectors > inode->ei_quota_reserved);
239 quota_res->sectors -= sectors;
240 inode->ei_quota_reserved -= sectors;
242 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN);
245 inode->v.i_blocks += sectors;
246 mutex_unlock(&inode->ei_quota_lock);
251 /* stored in page->private: */
253 struct bch_page_sector {
254 /* Uncompressed, fully allocated replicas: */
255 unsigned nr_replicas:3;
257 /* Owns PAGE_SECTORS * replicas_reserved sized reservation: */
258 unsigned replicas_reserved:3;
269 struct bch_page_state {
271 atomic_t write_count;
272 struct bch_page_sector s[PAGE_SECTORS];
275 static inline struct bch_page_state *__bch2_page_state(struct page *page)
277 return page_has_private(page)
278 ? (struct bch_page_state *) page_private(page)
282 static inline struct bch_page_state *bch2_page_state(struct page *page)
284 EBUG_ON(!PageLocked(page));
286 return __bch2_page_state(page);
289 /* for newly allocated pages: */
290 static void __bch2_page_state_release(struct page *page)
292 kfree(detach_page_private(page));
295 static void bch2_page_state_release(struct page *page)
297 EBUG_ON(!PageLocked(page));
298 __bch2_page_state_release(page);
301 /* for newly allocated pages: */
302 static struct bch_page_state *__bch2_page_state_create(struct page *page,
305 struct bch_page_state *s;
307 s = kzalloc(sizeof(*s), GFP_NOFS|gfp);
311 spin_lock_init(&s->lock);
312 attach_page_private(page, s);
316 static struct bch_page_state *bch2_page_state_create(struct page *page,
319 return bch2_page_state(page) ?: __bch2_page_state_create(page, gfp);
322 static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
324 /* XXX: this should not be open coded */
325 return inode->ei_inode.bi_data_replicas
326 ? inode->ei_inode.bi_data_replicas - 1
327 : c->opts.data_replicas;
330 static inline unsigned sectors_to_reserve(struct bch_page_sector *s,
331 unsigned nr_replicas)
333 return max(0, (int) nr_replicas -
335 s->replicas_reserved);
338 static int bch2_get_page_disk_reservation(struct bch_fs *c,
339 struct bch_inode_info *inode,
340 struct page *page, bool check_enospc)
342 struct bch_page_state *s = bch2_page_state_create(page, 0);
343 unsigned nr_replicas = inode_nr_replicas(c, inode);
344 struct disk_reservation disk_res = { 0 };
345 unsigned i, disk_res_sectors = 0;
351 for (i = 0; i < ARRAY_SIZE(s->s); i++)
352 disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
354 if (!disk_res_sectors)
357 ret = bch2_disk_reservation_get(c, &disk_res,
360 ? BCH_DISK_RESERVATION_NOFAIL
365 for (i = 0; i < ARRAY_SIZE(s->s); i++)
366 s->s[i].replicas_reserved +=
367 sectors_to_reserve(&s->s[i], nr_replicas);
372 struct bch2_page_reservation {
373 struct disk_reservation disk;
374 struct quota_res quota;
377 static void bch2_page_reservation_init(struct bch_fs *c,
378 struct bch_inode_info *inode,
379 struct bch2_page_reservation *res)
381 memset(res, 0, sizeof(*res));
383 res->disk.nr_replicas = inode_nr_replicas(c, inode);
386 static void bch2_page_reservation_put(struct bch_fs *c,
387 struct bch_inode_info *inode,
388 struct bch2_page_reservation *res)
390 bch2_disk_reservation_put(c, &res->disk);
391 bch2_quota_reservation_put(c, inode, &res->quota);
394 static int bch2_page_reservation_get(struct bch_fs *c,
395 struct bch_inode_info *inode, struct page *page,
396 struct bch2_page_reservation *res,
397 unsigned offset, unsigned len, bool check_enospc)
399 struct bch_page_state *s = bch2_page_state_create(page, 0);
400 unsigned i, disk_sectors = 0, quota_sectors = 0;
406 for (i = round_down(offset, block_bytes(c)) >> 9;
407 i < round_up(offset + len, block_bytes(c)) >> 9;
409 disk_sectors += sectors_to_reserve(&s->s[i],
410 res->disk.nr_replicas);
411 quota_sectors += s->s[i].state == SECTOR_UNALLOCATED;
415 ret = bch2_disk_reservation_add(c, &res->disk,
418 ? BCH_DISK_RESERVATION_NOFAIL
425 ret = bch2_quota_reservation_add(c, inode, &res->quota,
429 struct disk_reservation tmp = {
430 .sectors = disk_sectors
433 bch2_disk_reservation_put(c, &tmp);
434 res->disk.sectors -= disk_sectors;
442 static void bch2_clear_page_bits(struct page *page)
444 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
445 struct bch_fs *c = inode->v.i_sb->s_fs_info;
446 struct bch_page_state *s = bch2_page_state(page);
447 struct disk_reservation disk_res = { 0 };
448 int i, dirty_sectors = 0;
453 EBUG_ON(!PageLocked(page));
454 EBUG_ON(PageWriteback(page));
456 for (i = 0; i < ARRAY_SIZE(s->s); i++) {
457 disk_res.sectors += s->s[i].replicas_reserved;
458 s->s[i].replicas_reserved = 0;
460 if (s->s[i].state == SECTOR_DIRTY) {
462 s->s[i].state = SECTOR_UNALLOCATED;
466 bch2_disk_reservation_put(c, &disk_res);
469 i_sectors_acct(c, inode, NULL, -dirty_sectors);
471 bch2_page_state_release(page);
474 static void bch2_set_page_dirty(struct bch_fs *c,
475 struct bch_inode_info *inode, struct page *page,
476 struct bch2_page_reservation *res,
477 unsigned offset, unsigned len)
479 struct bch_page_state *s = bch2_page_state(page);
480 unsigned i, dirty_sectors = 0;
482 WARN_ON((u64) page_offset(page) + offset + len >
483 round_up((u64) i_size_read(&inode->v), block_bytes(c)));
487 for (i = round_down(offset, block_bytes(c)) >> 9;
488 i < round_up(offset + len, block_bytes(c)) >> 9;
490 unsigned sectors = sectors_to_reserve(&s->s[i],
491 res->disk.nr_replicas);
494 * This can happen if we race with the error path in
495 * bch2_writepage_io_done():
497 sectors = min_t(unsigned, sectors, res->disk.sectors);
499 s->s[i].replicas_reserved += sectors;
500 res->disk.sectors -= sectors;
502 if (s->s[i].state == SECTOR_UNALLOCATED)
505 s->s[i].state = max_t(unsigned, s->s[i].state, SECTOR_DIRTY);
508 spin_unlock(&s->lock);
511 i_sectors_acct(c, inode, &res->quota, dirty_sectors);
513 if (!PageDirty(page))
514 filemap_dirty_folio(inode->v.i_mapping, page_folio(page));
517 vm_fault_t bch2_page_fault(struct vm_fault *vmf)
519 struct file *file = vmf->vma->vm_file;
520 struct address_space *mapping = file->f_mapping;
521 struct address_space *fdm = faults_disabled_mapping();
522 struct bch_inode_info *inode = file_bch_inode(file);
526 return VM_FAULT_SIGBUS;
530 struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
532 if (bch2_pagecache_add_tryget(&inode->ei_pagecache_lock))
535 bch2_pagecache_block_put(&fdm_host->ei_pagecache_lock);
537 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
538 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
540 bch2_pagecache_block_get(&fdm_host->ei_pagecache_lock);
542 /* Signal that lock has been dropped: */
543 set_fdm_dropped_locks();
544 return VM_FAULT_SIGBUS;
547 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
549 ret = filemap_fault(vmf);
550 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
555 vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
557 struct page *page = vmf->page;
558 struct file *file = vmf->vma->vm_file;
559 struct bch_inode_info *inode = file_bch_inode(file);
560 struct address_space *mapping = file->f_mapping;
561 struct bch_fs *c = inode->v.i_sb->s_fs_info;
562 struct bch2_page_reservation res;
565 int ret = VM_FAULT_LOCKED;
567 bch2_page_reservation_init(c, inode, &res);
569 sb_start_pagefault(inode->v.i_sb);
570 file_update_time(file);
573 * Not strictly necessary, but helps avoid dio writes livelocking in
574 * write_invalidate_inode_pages_range() - can drop this if/when we get
575 * a write_invalidate_inode_pages_range() that works without dropping
576 * page lock before invalidating page
578 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
581 isize = i_size_read(&inode->v);
583 if (page->mapping != mapping || page_offset(page) >= isize) {
585 ret = VM_FAULT_NOPAGE;
589 len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
591 if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
593 ret = VM_FAULT_SIGBUS;
597 bch2_set_page_dirty(c, inode, page, &res, 0, len);
598 bch2_page_reservation_put(c, inode, &res);
600 wait_for_stable_page(page);
602 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
603 sb_end_pagefault(inode->v.i_sb);
608 void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
610 if (offset || length < folio_size(folio))
613 bch2_clear_page_bits(&folio->page);
616 bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
618 if (folio_test_dirty(folio) || folio_test_writeback(folio))
621 bch2_clear_page_bits(&folio->page);
627 static void bch2_readpages_end_io(struct bio *bio)
629 struct bvec_iter_all iter;
632 bio_for_each_segment_all(bv, bio, iter) {
633 struct page *page = bv->bv_page;
635 if (!bio->bi_status) {
636 SetPageUptodate(page);
638 ClearPageUptodate(page);
647 struct readpages_iter {
648 struct address_space *mapping;
655 static int readpages_iter_init(struct readpages_iter *iter,
656 struct readahead_control *ractl)
658 unsigned i, nr_pages = readahead_count(ractl);
660 memset(iter, 0, sizeof(*iter));
662 iter->mapping = ractl->mapping;
663 iter->offset = readahead_index(ractl);
664 iter->nr_pages = nr_pages;
666 iter->pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS);
670 nr_pages = __readahead_batch(ractl, iter->pages, nr_pages);
671 for (i = 0; i < nr_pages; i++) {
672 __bch2_page_state_create(iter->pages[i], __GFP_NOFAIL);
673 put_page(iter->pages[i]);
679 static inline struct page *readpage_iter_next(struct readpages_iter *iter)
681 if (iter->idx >= iter->nr_pages)
684 EBUG_ON(iter->pages[iter->idx]->index != iter->offset + iter->idx);
686 return iter->pages[iter->idx];
689 static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
691 struct bvec_iter iter;
693 unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
694 ? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
695 unsigned state = k.k->type == KEY_TYPE_reservation
699 bio_for_each_segment(bv, bio, iter) {
700 struct bch_page_state *s = bch2_page_state(bv.bv_page);
703 for (i = bv.bv_offset >> 9;
704 i < (bv.bv_offset + bv.bv_len) >> 9;
706 s->s[i].nr_replicas = nr_ptrs;
707 s->s[i].state = state;
712 static bool extent_partial_reads_expensive(struct bkey_s_c k)
714 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
715 struct bch_extent_crc_unpacked crc;
716 const union bch_extent_entry *i;
718 bkey_for_each_crc(k.k, ptrs, crc, i)
719 if (crc.csum_type || crc.compression_type)
724 static void readpage_bio_extend(struct readpages_iter *iter,
726 unsigned sectors_this_extent,
729 while (bio_sectors(bio) < sectors_this_extent &&
730 bio->bi_vcnt < bio->bi_max_vecs) {
731 pgoff_t page_offset = bio_end_sector(bio) >> PAGE_SECTOR_SHIFT;
732 struct page *page = readpage_iter_next(iter);
736 if (iter->offset + iter->idx != page_offset)
744 page = xa_load(&iter->mapping->i_pages, page_offset);
745 if (page && !xa_is_value(page))
748 page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
752 if (!__bch2_page_state_create(page, 0)) {
757 ret = add_to_page_cache_lru(page, iter->mapping,
758 page_offset, GFP_NOFS);
760 __bch2_page_state_release(page);
768 BUG_ON(!bio_add_page(bio, page, PAGE_SIZE, 0));
772 static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
773 struct bch_read_bio *rbio, u64 inum,
774 struct readpages_iter *readpages_iter)
776 struct bch_fs *c = trans->c;
778 int flags = BCH_READ_RETRY_IF_STALE|
779 BCH_READ_MAY_PROMOTE;
783 rbio->start_time = local_clock();
785 bch2_bkey_buf_init(&sk);
789 unsigned bytes, sectors, offset_into_extent;
791 bch2_btree_iter_set_pos(iter,
792 POS(inum, rbio->bio.bi_iter.bi_sector));
794 k = bch2_btree_iter_peek_slot(iter);
799 offset_into_extent = iter->pos.offset -
800 bkey_start_offset(k.k);
801 sectors = k.k->size - offset_into_extent;
803 bch2_bkey_buf_reassemble(&sk, c, k);
805 ret = bch2_read_indirect_extent(trans,
806 &offset_into_extent, &sk);
810 k = bkey_i_to_s_c(sk.k);
812 sectors = min(sectors, k.k->size - offset_into_extent);
814 bch2_trans_unlock(trans);
817 readpage_bio_extend(readpages_iter, &rbio->bio, sectors,
818 extent_partial_reads_expensive(k));
820 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
821 swap(rbio->bio.bi_iter.bi_size, bytes);
823 if (rbio->bio.bi_iter.bi_size == bytes)
824 flags |= BCH_READ_LAST_FRAGMENT;
826 if (bkey_extent_is_allocation(k.k))
827 bch2_add_page_sectors(&rbio->bio, k);
829 bch2_read_extent(trans, rbio, k, offset_into_extent, flags);
831 if (flags & BCH_READ_LAST_FRAGMENT)
834 swap(rbio->bio.bi_iter.bi_size, bytes);
835 bio_advance(&rbio->bio, bytes);
842 bch_err_inum_ratelimited(c, inum,
843 "read error %i from btree lookup", ret);
844 rbio->bio.bi_status = BLK_STS_IOERR;
845 bio_endio(&rbio->bio);
848 bch2_bkey_buf_exit(&sk, c);
851 void bch2_readahead(struct readahead_control *ractl)
853 struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
854 struct bch_fs *c = inode->v.i_sb->s_fs_info;
855 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
856 struct btree_trans trans;
857 struct btree_iter *iter;
859 struct readpages_iter readpages_iter;
862 ret = readpages_iter_init(&readpages_iter, ractl);
865 bch2_trans_init(&trans, c, 0, 0);
867 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
870 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
872 while ((page = readpage_iter_next(&readpages_iter))) {
873 pgoff_t index = readpages_iter.offset + readpages_iter.idx;
874 unsigned n = min_t(unsigned,
875 readpages_iter.nr_pages -
878 struct bch_read_bio *rbio =
879 rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
880 GFP_NOFS, &c->bio_read),
883 readpages_iter.idx++;
885 rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTOR_SHIFT;
886 rbio->bio.bi_end_io = bch2_readpages_end_io;
887 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
889 bchfs_read(&trans, iter, rbio, inode->v.i_ino,
893 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
895 bch2_trans_exit(&trans);
896 kfree(readpages_iter.pages);
899 static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
900 u64 inum, struct page *page)
902 struct btree_trans trans;
903 struct btree_iter *iter;
905 bch2_page_state_create(page, __GFP_NOFAIL);
907 rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
908 rbio->bio.bi_iter.bi_sector =
909 (sector_t) page->index << PAGE_SECTOR_SHIFT;
910 BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
912 bch2_trans_init(&trans, c, 0, 0);
913 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
916 bchfs_read(&trans, iter, rbio, inum, NULL);
918 bch2_trans_exit(&trans);
921 static void bch2_read_single_page_end_io(struct bio *bio)
923 complete(bio->bi_private);
926 static int bch2_read_single_page(struct page *page,
927 struct address_space *mapping)
929 struct bch_inode_info *inode = to_bch_ei(mapping->host);
930 struct bch_fs *c = inode->v.i_sb->s_fs_info;
931 struct bch_read_bio *rbio;
933 DECLARE_COMPLETION_ONSTACK(done);
935 rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
936 io_opts(c, &inode->ei_inode));
937 rbio->bio.bi_private = &done;
938 rbio->bio.bi_end_io = bch2_read_single_page_end_io;
940 __bchfs_readpage(c, rbio, inode->v.i_ino, page);
941 wait_for_completion(&done);
943 ret = blk_status_to_errno(rbio->bio.bi_status);
949 SetPageUptodate(page);
953 int bch2_read_folio(struct file *file, struct folio *folio)
955 struct page *page = &folio->page;
958 ret = bch2_read_single_page(page, page->mapping);
965 struct bch_writepage_state {
966 struct bch_writepage_io *io;
967 struct bch_io_opts opts;
970 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
971 struct bch_inode_info *inode)
973 return (struct bch_writepage_state) {
974 .opts = io_opts(c, &inode->ei_inode)
978 static void bch2_writepage_io_free(struct closure *cl)
980 struct bch_writepage_io *io = container_of(cl,
981 struct bch_writepage_io, cl);
983 bio_put(&io->op.wbio.bio);
986 static void bch2_writepage_io_done(struct closure *cl)
988 struct bch_writepage_io *io = container_of(cl,
989 struct bch_writepage_io, cl);
990 struct bch_fs *c = io->op.c;
991 struct bio *bio = &io->op.wbio.bio;
992 struct bvec_iter_all iter;
993 struct bio_vec *bvec;
997 set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
999 bio_for_each_segment_all(bvec, bio, iter) {
1000 struct bch_page_state *s;
1002 SetPageError(bvec->bv_page);
1003 mapping_set_error(bvec->bv_page->mapping, -EIO);
1005 s = __bch2_page_state(bvec->bv_page);
1006 spin_lock(&s->lock);
1007 for (i = 0; i < PAGE_SECTORS; i++)
1008 s->s[i].nr_replicas = 0;
1009 spin_unlock(&s->lock);
1013 if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
1014 bio_for_each_segment_all(bvec, bio, iter) {
1015 struct bch_page_state *s;
1017 s = __bch2_page_state(bvec->bv_page);
1018 spin_lock(&s->lock);
1019 for (i = 0; i < PAGE_SECTORS; i++)
1020 s->s[i].nr_replicas = 0;
1021 spin_unlock(&s->lock);
1026 * racing with fallocate can cause us to add fewer sectors than
1027 * expected - but we shouldn't add more sectors than expected:
1029 BUG_ON(io->op.i_sectors_delta > 0);
1032 * (error (due to going RO) halfway through a page can screw that up
1035 BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
1039 * PageWriteback is effectively our ref on the inode - fixup i_blocks
1040 * before calling end_page_writeback:
1042 i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
1044 bio_for_each_segment_all(bvec, bio, iter) {
1045 struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
1047 if (atomic_dec_and_test(&s->write_count))
1048 end_page_writeback(bvec->bv_page);
1051 closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
1054 static void bch2_writepage_do_io(struct bch_writepage_state *w)
1056 struct bch_writepage_io *io = w->io;
1059 closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
1060 continue_at(&io->cl, bch2_writepage_io_done, NULL);
1064 * Get a bch_writepage_io and add @page to it - appending to an existing one if
1065 * possible, else allocating a new one:
1067 static void bch2_writepage_io_alloc(struct bch_fs *c,
1068 struct writeback_control *wbc,
1069 struct bch_writepage_state *w,
1070 struct bch_inode_info *inode,
1072 unsigned nr_replicas)
1074 struct bch_write_op *op;
1076 w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
1079 &c->writepage_bioset),
1080 struct bch_writepage_io, op.wbio.bio);
1082 closure_init(&w->io->cl, NULL);
1083 w->io->inode = inode;
1086 bch2_write_op_init(op, c, w->opts);
1087 op->target = w->opts.foreground_target;
1088 op_journal_seq_set(op, &inode->ei_journal_seq);
1089 op->nr_replicas = nr_replicas;
1090 op->res.nr_replicas = nr_replicas;
1091 op->write_point = writepoint_hashed(inode->ei_last_dirtied);
1092 op->pos = POS(inode->v.i_ino, sector);
1093 op->wbio.bio.bi_iter.bi_sector = sector;
1094 op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
1097 static int __bch2_writepage(struct folio *folio,
1098 struct writeback_control *wbc,
1101 struct page *page = &folio->page;
1102 struct bch_inode_info *inode = to_bch_ei(page->mapping->host);
1103 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1104 struct bch_writepage_state *w = data;
1105 struct bch_page_state *s, orig;
1106 unsigned i, offset, nr_replicas_this_write = U32_MAX;
1107 loff_t i_size = i_size_read(&inode->v);
1108 pgoff_t end_index = i_size >> PAGE_SHIFT;
1111 EBUG_ON(!PageUptodate(page));
1113 /* Is the page fully inside i_size? */
1114 if (page->index < end_index)
1117 /* Is the page fully outside i_size? (truncate in progress) */
1118 offset = i_size & (PAGE_SIZE - 1);
1119 if (page->index > end_index || !offset) {
1125 * The page straddles i_size. It must be zeroed out on each and every
1126 * writepage invocation because it may be mmapped. "A file is mapped
1127 * in multiples of the page size. For a file that is not a multiple of
1128 * the page size, the remaining memory is zeroed when mapped, and
1129 * writes to that region are not written out to the file."
1131 zero_user_segment(page, offset, PAGE_SIZE);
1133 s = bch2_page_state_create(page, __GFP_NOFAIL);
1135 ret = bch2_get_page_disk_reservation(c, inode, page, true);
1138 mapping_set_error(page->mapping, ret);
1143 /* Before unlocking the page, get copy of reservations: */
1146 for (i = 0; i < PAGE_SECTORS; i++) {
1147 if (s->s[i].state < SECTOR_DIRTY)
1150 nr_replicas_this_write =
1151 min_t(unsigned, nr_replicas_this_write,
1152 s->s[i].nr_replicas +
1153 s->s[i].replicas_reserved);
1156 for (i = 0; i < PAGE_SECTORS; i++) {
1157 if (s->s[i].state < SECTOR_DIRTY)
1160 s->s[i].nr_replicas = w->opts.compression
1161 ? 0 : nr_replicas_this_write;
1163 s->s[i].replicas_reserved = 0;
1164 s->s[i].state = SECTOR_ALLOCATED;
1167 BUG_ON(atomic_read(&s->write_count));
1168 atomic_set(&s->write_count, 1);
1170 BUG_ON(PageWriteback(page));
1171 set_page_writeback(page);
1177 unsigned sectors = 1, dirty_sectors = 0, reserved_sectors = 0;
1180 while (offset < PAGE_SECTORS &&
1181 orig.s[offset].state < SECTOR_DIRTY)
1184 if (offset == PAGE_SECTORS)
1187 sector = ((u64) page->index << PAGE_SECTOR_SHIFT) + offset;
1189 while (offset + sectors < PAGE_SECTORS &&
1190 orig.s[offset + sectors].state >= SECTOR_DIRTY)
1193 for (i = offset; i < offset + sectors; i++) {
1194 reserved_sectors += orig.s[i].replicas_reserved;
1195 dirty_sectors += orig.s[i].state == SECTOR_DIRTY;
1199 (w->io->op.res.nr_replicas != nr_replicas_this_write ||
1200 bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
1201 w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
1202 (BIO_MAX_VECS * PAGE_SIZE) ||
1203 bio_end_sector(&w->io->op.wbio.bio) != sector))
1204 bch2_writepage_do_io(w);
1207 bch2_writepage_io_alloc(c, wbc, w, inode, sector,
1208 nr_replicas_this_write);
1210 atomic_inc(&s->write_count);
1212 BUG_ON(inode != w->io->inode);
1213 BUG_ON(!bio_add_page(&w->io->op.wbio.bio, page,
1214 sectors << 9, offset << 9));
1216 /* Check for writing past i_size: */
1217 WARN_ON((bio_end_sector(&w->io->op.wbio.bio) << 9) >
1218 round_up(i_size, block_bytes(c)));
1220 w->io->op.res.sectors += reserved_sectors;
1221 w->io->op.i_sectors_delta -= dirty_sectors;
1222 w->io->op.new_i_size = i_size;
1227 if (atomic_dec_and_test(&s->write_count))
1228 end_page_writeback(page);
1233 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
1235 struct bch_fs *c = mapping->host->i_sb->s_fs_info;
1236 struct bch_writepage_state w =
1237 bch_writepage_state_init(c, to_bch_ei(mapping->host));
1238 struct blk_plug plug;
1241 blk_start_plug(&plug);
1242 ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
1244 bch2_writepage_do_io(&w);
1245 blk_finish_plug(&plug);
1249 int bch2_writepage(struct page *page, struct writeback_control *wbc)
1251 struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
1252 struct bch_writepage_state w =
1253 bch_writepage_state_init(c, to_bch_ei(page->mapping->host));
1256 ret = __bch2_writepage(page_folio(page), wbc, &w);
1258 bch2_writepage_do_io(&w);
1263 /* buffered writes: */
1265 int bch2_write_begin(struct file *file, struct address_space *mapping,
1266 loff_t pos, unsigned len,
1267 struct page **pagep, void **fsdata)
1269 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1270 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1271 struct bch2_page_reservation *res;
1272 pgoff_t index = pos >> PAGE_SHIFT;
1273 unsigned offset = pos & (PAGE_SIZE - 1);
1277 res = kmalloc(sizeof(*res), GFP_KERNEL);
1281 bch2_page_reservation_init(c, inode, res);
1284 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1286 page = grab_cache_page_write_begin(mapping, index);
1290 if (PageUptodate(page))
1293 /* If we're writing entire page, don't need to read it in first: */
1294 if (len == PAGE_SIZE)
1297 if (!offset && pos + len >= inode->v.i_size) {
1298 zero_user_segment(page, len, PAGE_SIZE);
1299 flush_dcache_page(page);
1303 if (index > inode->v.i_size >> PAGE_SHIFT) {
1304 zero_user_segments(page, 0, offset, offset + len, PAGE_SIZE);
1305 flush_dcache_page(page);
1309 ret = bch2_read_single_page(page, mapping);
1313 ret = bch2_page_reservation_get(c, inode, page, res,
1316 if (!PageUptodate(page)) {
1318 * If the page hasn't been read in, we won't know if we
1319 * actually need a reservation - we don't actually need
1320 * to read here, we just need to check if the page is
1321 * fully backed by uncompressed data:
1336 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1342 int bch2_write_end(struct file *file, struct address_space *mapping,
1343 loff_t pos, unsigned len, unsigned copied,
1344 struct page *page, void *fsdata)
1346 struct bch_inode_info *inode = to_bch_ei(mapping->host);
1347 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1348 struct bch2_page_reservation *res = fsdata;
1349 unsigned offset = pos & (PAGE_SIZE - 1);
1351 lockdep_assert_held(&inode->v.i_rwsem);
1353 if (unlikely(copied < len && !PageUptodate(page))) {
1355 * The page needs to be read in, but that would destroy
1356 * our partial write - simplest thing is to just force
1357 * userspace to redo the write:
1359 zero_user(page, 0, PAGE_SIZE);
1360 flush_dcache_page(page);
1364 spin_lock(&inode->v.i_lock);
1365 if (pos + copied > inode->v.i_size)
1366 i_size_write(&inode->v, pos + copied);
1367 spin_unlock(&inode->v.i_lock);
1370 if (!PageUptodate(page))
1371 SetPageUptodate(page);
1373 bch2_set_page_dirty(c, inode, page, res, offset, copied);
1375 inode->ei_last_dirtied = (unsigned long) current;
1380 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1382 bch2_page_reservation_put(c, inode, res);
1388 #define WRITE_BATCH_PAGES 32
1390 static int __bch2_buffered_write(struct bch_inode_info *inode,
1391 struct address_space *mapping,
1392 struct iov_iter *iter,
1393 loff_t pos, unsigned len)
1395 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1396 struct page *pages[WRITE_BATCH_PAGES];
1397 struct bch2_page_reservation res;
1398 unsigned long index = pos >> PAGE_SHIFT;
1399 unsigned offset = pos & (PAGE_SIZE - 1);
1400 unsigned nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1401 unsigned i, reserved = 0, set_dirty = 0;
1402 unsigned copied = 0, nr_pages_copied = 0;
1406 BUG_ON(nr_pages > ARRAY_SIZE(pages));
1408 bch2_page_reservation_init(c, inode, &res);
1410 for (i = 0; i < nr_pages; i++) {
1411 pages[i] = grab_cache_page_write_begin(mapping, index + i);
1418 len = min_t(unsigned, len,
1419 nr_pages * PAGE_SIZE - offset);
1424 if (offset && !PageUptodate(pages[0])) {
1425 ret = bch2_read_single_page(pages[0], mapping);
1430 if ((pos + len) & (PAGE_SIZE - 1) &&
1431 !PageUptodate(pages[nr_pages - 1])) {
1432 if ((index + nr_pages - 1) << PAGE_SHIFT >= inode->v.i_size) {
1433 zero_user(pages[nr_pages - 1], 0, PAGE_SIZE);
1435 ret = bch2_read_single_page(pages[nr_pages - 1], mapping);
1441 while (reserved < len) {
1442 struct page *page = pages[(offset + reserved) >> PAGE_SHIFT];
1443 unsigned pg_offset = (offset + reserved) & (PAGE_SIZE - 1);
1444 unsigned pg_len = min_t(unsigned, len - reserved,
1445 PAGE_SIZE - pg_offset);
1447 ret = bch2_page_reservation_get(c, inode, page, &res,
1448 pg_offset, pg_len, true);
1450 if (ret && !PageUptodate(page)) {
1451 ret = bch2_read_single_page(page, mapping);
1453 goto retry_reservation;
1462 if (mapping_writably_mapped(mapping))
1463 for (i = 0; i < nr_pages; i++)
1464 flush_dcache_page(pages[i]);
1466 while (copied < len) {
1467 struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
1468 unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
1469 unsigned pg_len = min_t(unsigned, len - copied,
1470 PAGE_SIZE - pg_offset);
1471 unsigned pg_copied = copy_page_from_iter_atomic(page,
1472 pg_offset, pg_len, iter);
1477 if (!PageUptodate(page) &&
1478 pg_copied != PAGE_SIZE &&
1479 pos + copied + pg_copied < inode->v.i_size) {
1480 zero_user(page, 0, PAGE_SIZE);
1484 flush_dcache_page(page);
1485 copied += pg_copied;
1487 if (pg_copied != pg_len)
1494 spin_lock(&inode->v.i_lock);
1495 if (pos + copied > inode->v.i_size)
1496 i_size_write(&inode->v, pos + copied);
1497 spin_unlock(&inode->v.i_lock);
1499 while (set_dirty < copied) {
1500 struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
1501 unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
1502 unsigned pg_len = min_t(unsigned, copied - set_dirty,
1503 PAGE_SIZE - pg_offset);
1505 if (!PageUptodate(page))
1506 SetPageUptodate(page);
1508 bch2_set_page_dirty(c, inode, page, &res, pg_offset, pg_len);
1512 set_dirty += pg_len;
1515 nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
1516 inode->ei_last_dirtied = (unsigned long) current;
1518 for (i = nr_pages_copied; i < nr_pages; i++) {
1519 unlock_page(pages[i]);
1523 bch2_page_reservation_put(c, inode, &res);
1525 return copied ?: ret;
1528 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
1530 struct file *file = iocb->ki_filp;
1531 struct address_space *mapping = file->f_mapping;
1532 struct bch_inode_info *inode = file_bch_inode(file);
1533 loff_t pos = iocb->ki_pos;
1534 ssize_t written = 0;
1537 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1540 unsigned offset = pos & (PAGE_SIZE - 1);
1541 unsigned bytes = min_t(unsigned long, iov_iter_count(iter),
1542 PAGE_SIZE * WRITE_BATCH_PAGES - offset);
1545 * Bring in the user page that we will copy from _first_.
1546 * Otherwise there's a nasty deadlock on copying from the
1547 * same page as we're writing to, without it being marked
1550 * Not only is this an optimisation, but it is also required
1551 * to check that the address is actually valid, when atomic
1552 * usercopies are used, below.
1554 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1555 bytes = min_t(unsigned long, iov_iter_count(iter),
1556 PAGE_SIZE - offset);
1558 if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1564 if (unlikely(fatal_signal_pending(current))) {
1569 ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1570 if (unlikely(ret < 0))
1575 if (unlikely(ret == 0)) {
1577 * If we were unable to copy any data at all, we must
1578 * fall back to a single segment length write.
1580 * If we didn't fallback here, we could livelock
1581 * because not all segments in the iov can be copied at
1582 * once without a pagefault.
1584 bytes = min_t(unsigned long, PAGE_SIZE - offset,
1585 iov_iter_single_seg_count(iter));
1592 balance_dirty_pages_ratelimited(mapping);
1593 } while (iov_iter_count(iter));
1595 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1597 return written ? written : ret;
1600 /* O_DIRECT reads */
1602 static void bch2_dio_read_complete(struct closure *cl)
1604 struct dio_read *dio = container_of(cl, struct dio_read, cl);
1606 dio->req->ki_complete(dio->req, dio->ret);
1607 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1610 static void bch2_direct_IO_read_endio(struct bio *bio)
1612 struct dio_read *dio = bio->bi_private;
1615 dio->ret = blk_status_to_errno(bio->bi_status);
1617 closure_put(&dio->cl);
1620 static void bch2_direct_IO_read_split_endio(struct bio *bio)
1622 bch2_direct_IO_read_endio(bio);
1623 bio_check_pages_dirty(bio); /* transfers ownership */
1626 static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
1628 struct file *file = req->ki_filp;
1629 struct bch_inode_info *inode = file_bch_inode(file);
1630 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1631 struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
1632 struct dio_read *dio;
1634 loff_t offset = req->ki_pos;
1635 bool sync = is_sync_kiocb(req);
1639 if ((offset|iter->count) & (block_bytes(c) - 1))
1642 ret = min_t(loff_t, iter->count,
1643 max_t(loff_t, 0, i_size_read(&inode->v) - offset));
1648 shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
1649 iter->count -= shorten;
1651 bio = bio_alloc_bioset(NULL,
1652 iov_iter_npages(iter, BIO_MAX_VECS),
1655 &c->dio_read_bioset);
1657 bio->bi_end_io = bch2_direct_IO_read_endio;
1659 dio = container_of(bio, struct dio_read, rbio.bio);
1660 closure_init(&dio->cl, NULL);
1663 * this is a _really_ horrible hack just to avoid an atomic sub at the
1667 set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
1668 atomic_set(&dio->cl.remaining,
1669 CLOSURE_REMAINING_INITIALIZER -
1671 CLOSURE_DESTRUCTOR);
1673 atomic_set(&dio->cl.remaining,
1674 CLOSURE_REMAINING_INITIALIZER + 1);
1681 while (iter->count) {
1682 bio = bio_alloc_bioset(NULL,
1683 iov_iter_npages(iter, BIO_MAX_VECS),
1687 bio->bi_end_io = bch2_direct_IO_read_split_endio;
1689 bio->bi_opf = REQ_OP_READ|REQ_SYNC;
1690 bio->bi_iter.bi_sector = offset >> 9;
1691 bio->bi_private = dio;
1693 ret = bio_iov_iter_get_pages(bio, iter);
1695 /* XXX: fault inject this path */
1696 bio->bi_status = BLK_STS_RESOURCE;
1701 offset += bio->bi_iter.bi_size;
1702 bio_set_pages_dirty(bio);
1705 closure_get(&dio->cl);
1707 bch2_read(c, rbio_init(bio, opts), inode->v.i_ino);
1710 iter->count += shorten;
1713 closure_sync(&dio->cl);
1714 closure_debug_destroy(&dio->cl);
1716 bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
1719 return -EIOCBQUEUED;
1723 ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1725 struct file *file = iocb->ki_filp;
1726 struct bch_inode_info *inode = file_bch_inode(file);
1727 struct address_space *mapping = file->f_mapping;
1728 size_t count = iov_iter_count(iter);
1732 return 0; /* skip atime */
1734 if (iocb->ki_flags & IOCB_DIRECT) {
1735 struct blk_plug plug;
1737 if (unlikely(mapping->nrpages)) {
1738 ret = filemap_write_and_wait_range(mapping,
1740 iocb->ki_pos + count - 1);
1745 file_accessed(file);
1747 blk_start_plug(&plug);
1748 ret = bch2_direct_IO_read(iocb, iter);
1749 blk_finish_plug(&plug);
1752 iocb->ki_pos += ret;
1754 bch2_pagecache_add_get(&inode->ei_pagecache_lock);
1755 ret = generic_file_read_iter(iocb, iter);
1756 bch2_pagecache_add_put(&inode->ei_pagecache_lock);
1762 /* O_DIRECT writes */
1765 * We're going to return -EIOCBQUEUED, but we haven't finished consuming the
1766 * iov_iter yet, so we need to stash a copy of the iovec: it might be on the
1767 * caller's stack, we're not guaranteed that it will live for the duration of
1770 static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
1772 struct iovec *iov = dio->inline_vecs;
1775 * iov_iter has a single embedded iovec - nothing to do:
1777 if (iter_is_ubuf(&dio->iter))
1781 * We don't currently handle non-iovec iov_iters here - return an error,
1782 * and we'll fall back to doing the IO synchronously:
1784 if (!iter_is_iovec(&dio->iter))
1787 if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
1788 iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
1793 dio->free_iov = true;
1796 memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
1797 dio->iter.__iov = iov;
1801 static void bch2_dio_write_loop_async(struct bch_write_op *);
1803 static long bch2_dio_write_loop(struct dio_write *dio)
1805 bool kthread = (current->flags & PF_KTHREAD) != 0;
1806 struct kiocb *req = dio->req;
1807 struct address_space *mapping = req->ki_filp->f_mapping;
1808 struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
1809 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1810 struct bio *bio = &dio->op.wbio.bio;
1811 struct bvec_iter_all iter;
1813 unsigned unaligned, iter_count;
1814 bool sync = dio->sync, dropped_locks;
1821 iter_count = dio->iter.count;
1824 kthread_use_mm(dio->mm);
1825 BUG_ON(current->faults_disabled_mapping);
1826 current->faults_disabled_mapping = mapping;
1828 ret = bio_iov_iter_get_pages(bio, &dio->iter);
1830 dropped_locks = fdm_dropped_locks();
1832 current->faults_disabled_mapping = NULL;
1834 kthread_unuse_mm(dio->mm);
1837 * If the fault handler returned an error but also signalled
1838 * that it dropped & retook ei_pagecache_lock, we just need to
1839 * re-shoot down the page cache and retry:
1841 if (dropped_locks && ret)
1844 if (unlikely(ret < 0))
1847 if (unlikely(dropped_locks)) {
1848 ret = write_invalidate_inode_pages_range(mapping,
1850 req->ki_pos + iter_count - 1);
1854 if (!bio->bi_iter.bi_size)
1858 unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
1859 bio->bi_iter.bi_size -= unaligned;
1860 iov_iter_revert(&dio->iter, unaligned);
1862 if (!bio->bi_iter.bi_size) {
1864 * bio_iov_iter_get_pages was only able to get <
1865 * blocksize worth of pages:
1867 bio_for_each_segment_all(bv, bio, iter)
1868 put_page(bv->bv_page);
1873 bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
1874 dio->op.end_io = bch2_dio_write_loop_async;
1875 dio->op.target = dio->op.opts.foreground_target;
1876 op_journal_seq_set(&dio->op, &inode->ei_journal_seq);
1877 dio->op.write_point = writepoint_hashed((unsigned long) current);
1878 dio->op.nr_replicas = dio->op.opts.data_replicas;
1879 dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
1881 if ((req->ki_flags & IOCB_DSYNC) &&
1882 !c->opts.journal_flush_disabled)
1883 dio->op.flags |= BCH_WRITE_FLUSH;
1885 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
1886 dio->op.opts.data_replicas, 0);
1887 if (unlikely(ret) &&
1888 !bch2_check_range_allocated(c, dio->op.pos,
1890 dio->op.opts.data_replicas,
1891 dio->op.opts.compression != 0))
1894 task_io_account_write(bio->bi_iter.bi_size);
1896 if (!dio->sync && !dio->loop && dio->iter.count) {
1897 if (bch2_dio_write_copy_iov(dio)) {
1898 dio->sync = sync = true;
1904 closure_call(&dio->op.cl, bch2_write, NULL, NULL);
1907 wait_for_completion(&dio->done);
1909 return -EIOCBQUEUED;
1911 i_sectors_acct(c, inode, &dio->quota_res,
1912 dio->op.i_sectors_delta);
1913 req->ki_pos += (u64) dio->op.written << 9;
1914 dio->written += dio->op.written;
1916 spin_lock(&inode->v.i_lock);
1917 if (req->ki_pos > inode->v.i_size)
1918 i_size_write(&inode->v, req->ki_pos);
1919 spin_unlock(&inode->v.i_lock);
1921 bio_for_each_segment_all(bv, bio, iter)
1922 put_page(bv->bv_page);
1924 if (dio->op.error) {
1925 set_bit(EI_INODE_ERROR, &inode->ei_flags);
1929 if (!dio->iter.count)
1932 bio_reset(bio, NULL, REQ_OP_WRITE);
1933 reinit_completion(&dio->done);
1936 ret = dio->op.error ?: ((long) dio->written << 9);
1938 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
1939 bch2_quota_reservation_put(c, inode, &dio->quota_res);
1942 kfree(dio->iter.__iov);
1946 /* inode->i_dio_count is our ref on inode and thus bch_fs */
1947 inode_dio_end(&inode->v);
1950 req->ki_complete(req, ret);
1956 static void bch2_dio_write_loop_async(struct bch_write_op *op)
1958 struct dio_write *dio = container_of(op, struct dio_write, op);
1961 complete(&dio->done);
1963 bch2_dio_write_loop(dio);
1967 ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
1969 struct file *file = req->ki_filp;
1970 struct address_space *mapping = file->f_mapping;
1971 struct bch_inode_info *inode = file_bch_inode(file);
1972 struct bch_fs *c = inode->v.i_sb->s_fs_info;
1973 struct dio_write *dio;
1975 bool locked = true, extending;
1979 prefetch((void *) &c->opts + 64);
1980 prefetch(&inode->ei_inode);
1981 prefetch((void *) &inode->ei_inode + 64);
1983 inode_lock(&inode->v);
1985 ret = generic_write_checks(req, iter);
1986 if (unlikely(ret <= 0))
1989 ret = file_remove_privs(file);
1993 ret = file_update_time(file);
1997 if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
2000 inode_dio_begin(&inode->v);
2001 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2003 extending = req->ki_pos + iter->count > inode->v.i_size;
2005 inode_unlock(&inode->v);
2009 bio = bio_alloc_bioset(NULL,
2010 iov_iter_npages(iter, BIO_MAX_VECS),
2013 &c->dio_write_bioset);
2014 dio = container_of(bio, struct dio_write, op.wbio.bio);
2015 init_completion(&dio->done);
2017 dio->mm = current->mm;
2019 dio->sync = is_sync_kiocb(req) || extending;
2020 dio->free_iov = false;
2021 dio->quota_res.sectors = 0;
2025 ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
2026 iter->count >> 9, true);
2030 if (unlikely(mapping->nrpages)) {
2031 ret = write_invalidate_inode_pages_range(mapping,
2033 req->ki_pos + iter->count - 1);
2038 ret = bch2_dio_write_loop(dio);
2041 inode_unlock(&inode->v);
2044 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2045 bch2_quota_reservation_put(c, inode, &dio->quota_res);
2047 inode_dio_end(&inode->v);
2051 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
2053 struct file *file = iocb->ki_filp;
2054 struct bch_inode_info *inode = file_bch_inode(file);
2057 if (iocb->ki_flags & IOCB_DIRECT)
2058 return bch2_direct_write(iocb, from);
2060 inode_lock(&inode->v);
2062 ret = generic_write_checks(iocb, from);
2066 ret = file_remove_privs(file);
2070 ret = file_update_time(file);
2074 ret = bch2_buffered_write(iocb, from);
2075 if (likely(ret > 0))
2076 iocb->ki_pos += ret;
2078 inode_unlock(&inode->v);
2081 ret = generic_write_sync(iocb, ret);
2088 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2090 struct bch_inode_info *inode = file_bch_inode(file);
2091 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2094 ret = file_write_and_wait_range(file, start, end);
2098 if (datasync && !(inode->v.i_state & I_DIRTY_DATASYNC))
2101 ret = sync_inode_metadata(&inode->v, 1);
2105 if (!c->opts.journal_flush_disabled)
2106 ret = bch2_journal_flush_seq(&c->journal,
2107 inode->ei_journal_seq);
2108 ret2 = file_check_and_advance_wb_err(file);
2115 static inline int range_has_data(struct bch_fs *c,
2119 struct btree_trans trans;
2120 struct btree_iter *iter;
2124 bch2_trans_init(&trans, c, 0, 0);
2126 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
2127 if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
2130 if (bkey_extent_is_data(k.k)) {
2136 return bch2_trans_exit(&trans) ?: ret;
2139 static int __bch2_truncate_page(struct bch_inode_info *inode,
2140 pgoff_t index, loff_t start, loff_t end)
2142 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2143 struct address_space *mapping = inode->v.i_mapping;
2144 struct bch_page_state *s;
2145 unsigned start_offset = start & (PAGE_SIZE - 1);
2146 unsigned end_offset = ((end - 1) & (PAGE_SIZE - 1)) + 1;
2151 /* Page boundary? Nothing to do */
2152 if (!((index == start >> PAGE_SHIFT && start_offset) ||
2153 (index == end >> PAGE_SHIFT && end_offset != PAGE_SIZE)))
2157 if (index << PAGE_SHIFT >= inode->v.i_size)
2160 page = find_lock_page(mapping, index);
2163 * XXX: we're doing two index lookups when we end up reading the
2166 ret = range_has_data(c,
2167 POS(inode->v.i_ino, index << PAGE_SECTOR_SHIFT),
2168 POS(inode->v.i_ino, (index + 1) << PAGE_SECTOR_SHIFT));
2172 page = find_or_create_page(mapping, index, GFP_KERNEL);
2173 if (unlikely(!page)) {
2179 s = bch2_page_state_create(page, 0);
2185 if (!PageUptodate(page)) {
2186 ret = bch2_read_single_page(page, mapping);
2191 if (index != start >> PAGE_SHIFT)
2193 if (index != end >> PAGE_SHIFT)
2194 end_offset = PAGE_SIZE;
2196 for (i = round_up(start_offset, block_bytes(c)) >> 9;
2197 i < round_down(end_offset, block_bytes(c)) >> 9;
2199 s->s[i].nr_replicas = 0;
2200 s->s[i].state = SECTOR_UNALLOCATED;
2203 zero_user_segment(page, start_offset, end_offset);
2206 * Bit of a hack - we don't want truncate to fail due to -ENOSPC.
2208 * XXX: because we aren't currently tracking whether the page has actual
2209 * data in it (vs. just 0s, or only partially written) this wrong. ick.
2211 ret = bch2_get_page_disk_reservation(c, inode, page, false);
2215 * This removes any writeable userspace mappings; we need to force
2216 * .page_mkwrite to be called again before any mmapped writes, to
2217 * redirty the full page:
2220 filemap_dirty_folio(mapping, page_folio(page));
2228 static int bch2_truncate_page(struct bch_inode_info *inode, loff_t from)
2230 return __bch2_truncate_page(inode, from >> PAGE_SHIFT,
2231 from, round_up(from, PAGE_SIZE));
2234 static int bch2_extend(struct bch_inode_info *inode,
2235 struct bch_inode_unpacked *inode_u,
2236 struct iattr *iattr)
2238 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2239 struct address_space *mapping = inode->v.i_mapping;
2245 * this has to be done _before_ extending i_size:
2247 ret = filemap_write_and_wait_range(mapping, inode_u->bi_size, S64_MAX);
2251 truncate_setsize(&inode->v, iattr->ia_size);
2252 /* ATTR_MODE will never be set here, ns argument isn't needed: */
2253 setattr_copy(NULL, &inode->v, iattr);
2255 mutex_lock(&inode->ei_update_lock);
2256 ret = bch2_write_inode_size(c, inode, inode->v.i_size,
2257 ATTR_MTIME|ATTR_CTIME);
2258 mutex_unlock(&inode->ei_update_lock);
2263 static int bch2_truncate_finish_fn(struct bch_inode_info *inode,
2264 struct bch_inode_unpacked *bi,
2267 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2269 bi->bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
2270 bi->bi_mtime = bi->bi_ctime = bch2_current_time(c);
2274 static int bch2_truncate_start_fn(struct bch_inode_info *inode,
2275 struct bch_inode_unpacked *bi, void *p)
2277 u64 *new_i_size = p;
2279 bi->bi_flags |= BCH_INODE_I_SIZE_DIRTY;
2280 bi->bi_size = *new_i_size;
2284 int bch2_truncate(struct bch_inode_info *inode, struct iattr *iattr)
2286 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2287 struct address_space *mapping = inode->v.i_mapping;
2288 struct bch_inode_unpacked inode_u;
2289 struct btree_trans trans;
2290 struct btree_iter *iter;
2291 u64 new_i_size = iattr->ia_size;
2292 s64 i_sectors_delta = 0;
2295 inode_dio_wait(&inode->v);
2296 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2299 * fetch current on disk i_size: inode is locked, i_size can only
2300 * increase underneath us:
2302 bch2_trans_init(&trans, c, 0, 0);
2303 iter = bch2_inode_peek(&trans, &inode_u, inode->v.i_ino, 0);
2304 ret = PTR_ERR_OR_ZERO(iter);
2305 bch2_trans_exit(&trans);
2311 * check this before next assertion; on filesystem error our normal
2312 * invariants are a bit broken (truncate has to truncate the page cache
2313 * before the inode).
2315 ret = bch2_journal_error(&c->journal);
2319 WARN_ON(!test_bit(EI_INODE_ERROR, &inode->ei_flags) &&
2320 inode->v.i_size < inode_u.bi_size);
2322 if (iattr->ia_size > inode->v.i_size) {
2323 ret = bch2_extend(inode, &inode_u, iattr);
2327 ret = bch2_truncate_page(inode, iattr->ia_size);
2332 * When extending, we're going to write the new i_size to disk
2333 * immediately so we need to flush anything above the current on disk
2336 * Also, when extending we need to flush the page that i_size currently
2337 * straddles - if it's mapped to userspace, we need to ensure that
2338 * userspace has to redirty it and call .mkwrite -> set_page_dirty
2339 * again to allocate the part of the page that was extended.
2341 if (iattr->ia_size > inode_u.bi_size)
2342 ret = filemap_write_and_wait_range(mapping,
2344 iattr->ia_size - 1);
2345 else if (iattr->ia_size & (PAGE_SIZE - 1))
2346 ret = filemap_write_and_wait_range(mapping,
2347 round_down(iattr->ia_size, PAGE_SIZE),
2348 iattr->ia_size - 1);
2352 mutex_lock(&inode->ei_update_lock);
2353 ret = bch2_write_inode(c, inode, bch2_truncate_start_fn,
2355 mutex_unlock(&inode->ei_update_lock);
2360 truncate_setsize(&inode->v, iattr->ia_size);
2362 ret = bch2_fpunch(c, inode->v.i_ino,
2363 round_up(iattr->ia_size, block_bytes(c)) >> 9,
2364 U64_MAX, &inode->ei_journal_seq, &i_sectors_delta);
2365 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2370 /* ATTR_MODE will never be set here, ns argument isn't needed: */
2371 setattr_copy(NULL, &inode->v, iattr);
2373 mutex_lock(&inode->ei_update_lock);
2374 ret = bch2_write_inode(c, inode, bch2_truncate_finish_fn, NULL,
2375 ATTR_MTIME|ATTR_CTIME);
2376 mutex_unlock(&inode->ei_update_lock);
2378 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2384 static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len)
2386 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2387 u64 discard_start = round_up(offset, block_bytes(c)) >> 9;
2388 u64 discard_end = round_down(offset + len, block_bytes(c)) >> 9;
2391 inode_lock(&inode->v);
2392 inode_dio_wait(&inode->v);
2393 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2395 ret = __bch2_truncate_page(inode,
2396 offset >> PAGE_SHIFT,
2397 offset, offset + len);
2401 if (offset >> PAGE_SHIFT !=
2402 (offset + len) >> PAGE_SHIFT) {
2403 ret = __bch2_truncate_page(inode,
2404 (offset + len) >> PAGE_SHIFT,
2405 offset, offset + len);
2410 truncate_pagecache_range(&inode->v, offset, offset + len - 1);
2412 if (discard_start < discard_end) {
2413 s64 i_sectors_delta = 0;
2415 ret = bch2_fpunch(c, inode->v.i_ino,
2416 discard_start, discard_end,
2417 &inode->ei_journal_seq,
2419 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2422 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2423 inode_unlock(&inode->v);
2428 static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
2429 loff_t offset, loff_t len,
2432 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2433 struct address_space *mapping = inode->v.i_mapping;
2434 struct bkey_buf copy;
2435 struct btree_trans trans;
2436 struct btree_iter *src, *dst;
2437 loff_t shift, new_size;
2441 if ((offset | len) & (block_bytes(c) - 1))
2444 bch2_bkey_buf_init(©);
2445 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
2448 * We need i_mutex to keep the page cache consistent with the extents
2449 * btree, and the btree consistent with i_size - we don't need outside
2450 * locking for the extents btree itself, because we're using linked
2453 inode_lock(&inode->v);
2454 inode_dio_wait(&inode->v);
2455 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2459 if (inode->v.i_sb->s_maxbytes - inode->v.i_size < len)
2463 if (offset >= inode->v.i_size)
2466 src_start = U64_MAX;
2470 if (offset + len >= inode->v.i_size)
2473 src_start = offset + len;
2477 new_size = inode->v.i_size + shift;
2479 ret = write_invalidate_inode_pages_range(mapping, offset, LLONG_MAX);
2484 i_size_write(&inode->v, new_size);
2485 mutex_lock(&inode->ei_update_lock);
2486 ret = bch2_write_inode_size(c, inode, new_size,
2487 ATTR_MTIME|ATTR_CTIME);
2488 mutex_unlock(&inode->ei_update_lock);
2490 s64 i_sectors_delta = 0;
2492 ret = bch2_fpunch(c, inode->v.i_ino,
2493 offset >> 9, (offset + len) >> 9,
2494 &inode->ei_journal_seq,
2496 i_sectors_acct(c, inode, NULL, i_sectors_delta);
2502 src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2503 POS(inode->v.i_ino, src_start >> 9),
2505 dst = bch2_trans_copy_iter(&trans, src);
2508 struct disk_reservation disk_res =
2509 bch2_disk_reservation_init(c, 0);
2510 struct bkey_i delete;
2512 struct bpos next_pos;
2513 struct bpos move_pos = POS(inode->v.i_ino, offset >> 9);
2514 struct bpos atomic_end;
2515 unsigned trigger_flags = 0;
2518 ? bch2_btree_iter_peek_prev(src)
2519 : bch2_btree_iter_peek(src);
2520 if ((ret = bkey_err(k)))
2523 if (!k.k || k.k->p.inode != inode->v.i_ino)
2526 BUG_ON(bkey_cmp(src->pos, bkey_start_pos(k.k)));
2529 bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
2532 bch2_bkey_buf_reassemble(©, c, k);
2535 bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
2536 bch2_cut_front(move_pos, copy.k);
2538 copy.k->k.p.offset += shift >> 9;
2539 bch2_btree_iter_set_pos(dst, bkey_start_pos(©.k->k));
2541 ret = bch2_extent_atomic_end(dst, copy.k, &atomic_end);
2545 if (bkey_cmp(atomic_end, copy.k->k.p)) {
2547 move_pos = atomic_end;
2548 move_pos.offset -= shift >> 9;
2551 bch2_cut_back(atomic_end, copy.k);
2555 bkey_init(&delete.k);
2556 delete.k.p = copy.k->k.p;
2557 delete.k.size = copy.k->k.size;
2558 delete.k.p.offset -= shift >> 9;
2560 next_pos = insert ? bkey_start_pos(&delete.k) : delete.k.p;
2562 if (copy.k->k.size == k.k->size) {
2564 * If we're moving the entire extent, we can skip
2567 trigger_flags |= BTREE_TRIGGER_NORUN;
2569 /* We might end up splitting compressed extents: */
2571 bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(copy.k));
2573 ret = bch2_disk_reservation_get(c, &disk_res,
2574 copy.k->k.size, nr_ptrs,
2575 BCH_DISK_RESERVATION_NOFAIL);
2579 bch2_btree_iter_set_pos(src, bkey_start_pos(&delete.k));
2581 ret = bch2_trans_update(&trans, src, &delete, trigger_flags) ?:
2582 bch2_trans_update(&trans, dst, copy.k, trigger_flags) ?:
2583 bch2_trans_commit(&trans, &disk_res,
2584 &inode->ei_journal_seq,
2585 BTREE_INSERT_NOFAIL);
2586 bch2_disk_reservation_put(c, &disk_res);
2589 bch2_btree_iter_set_pos(src, next_pos);
2596 bch2_trans_cond_resched(&trans);
2598 bch2_trans_unlock(&trans);
2601 i_size_write(&inode->v, new_size);
2602 mutex_lock(&inode->ei_update_lock);
2603 ret = bch2_write_inode_size(c, inode, new_size,
2604 ATTR_MTIME|ATTR_CTIME);
2605 mutex_unlock(&inode->ei_update_lock);
2608 bch2_trans_exit(&trans);
2609 bch2_bkey_buf_exit(©, c);
2610 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2611 inode_unlock(&inode->v);
2615 static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
2616 loff_t offset, loff_t len)
2618 struct address_space *mapping = inode->v.i_mapping;
2619 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2620 struct btree_trans trans;
2621 struct btree_iter *iter;
2622 struct bpos end_pos;
2623 loff_t end = offset + len;
2624 loff_t block_start = round_down(offset, block_bytes(c));
2625 loff_t block_end = round_up(end, block_bytes(c));
2627 unsigned replicas = io_opts(c, &inode->ei_inode).data_replicas;
2630 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
2632 inode_lock(&inode->v);
2633 inode_dio_wait(&inode->v);
2634 bch2_pagecache_block_get(&inode->ei_pagecache_lock);
2636 if (!(mode & FALLOC_FL_KEEP_SIZE) && end > inode->v.i_size) {
2637 ret = inode_newsize_ok(&inode->v, end);
2642 if (mode & FALLOC_FL_ZERO_RANGE) {
2643 ret = __bch2_truncate_page(inode,
2644 offset >> PAGE_SHIFT,
2648 offset >> PAGE_SHIFT != end >> PAGE_SHIFT)
2649 ret = __bch2_truncate_page(inode,
2656 truncate_pagecache_range(&inode->v, offset, end - 1);
2659 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
2660 POS(inode->v.i_ino, block_start >> 9),
2661 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
2662 end_pos = POS(inode->v.i_ino, block_end >> 9);
2664 while (bkey_cmp(iter->pos, end_pos) < 0) {
2665 s64 i_sectors_delta = 0;
2666 struct disk_reservation disk_res = { 0 };
2667 struct quota_res quota_res = { 0 };
2668 struct bkey_i_reservation reservation;
2671 bch2_trans_begin(&trans);
2673 k = bch2_btree_iter_peek_slot(iter);
2674 if ((ret = bkey_err(k)))
2677 /* already reserved */
2678 if (k.k->type == KEY_TYPE_reservation &&
2679 bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
2680 bch2_btree_iter_next_slot(iter);
2684 if (bkey_extent_is_data(k.k) &&
2685 !(mode & FALLOC_FL_ZERO_RANGE)) {
2686 bch2_btree_iter_next_slot(iter);
2690 bkey_reservation_init(&reservation.k_i);
2691 reservation.k.type = KEY_TYPE_reservation;
2692 reservation.k.p = k.k->p;
2693 reservation.k.size = k.k->size;
2695 bch2_cut_front(iter->pos, &reservation.k_i);
2696 bch2_cut_back(end_pos, &reservation.k_i);
2698 sectors = reservation.k.size;
2699 reservation.v.nr_replicas = bch2_bkey_nr_ptrs_allocated(k);
2701 if (!bkey_extent_is_allocation(k.k)) {
2702 ret = bch2_quota_reservation_add(c, inode,
2709 if (reservation.v.nr_replicas < replicas ||
2710 bch2_bkey_sectors_compressed(k)) {
2711 ret = bch2_disk_reservation_get(c, &disk_res, sectors,
2716 reservation.v.nr_replicas = disk_res.nr_replicas;
2719 ret = bch2_extent_update(&trans, iter, &reservation.k_i,
2720 &disk_res, &inode->ei_journal_seq,
2721 0, &i_sectors_delta);
2722 i_sectors_acct(c, inode, "a_res, i_sectors_delta);
2724 bch2_quota_reservation_put(c, inode, "a_res);
2725 bch2_disk_reservation_put(c, &disk_res);
2733 * Do we need to extend the file?
2735 * If we zeroed up to the end of the file, we dropped whatever writes
2736 * were going to write out the current i_size, so we have to extend
2737 * manually even if FL_KEEP_SIZE was set:
2739 if (end >= inode->v.i_size &&
2740 (!(mode & FALLOC_FL_KEEP_SIZE) ||
2741 (mode & FALLOC_FL_ZERO_RANGE))) {
2742 struct btree_iter *inode_iter;
2743 struct bch_inode_unpacked inode_u;
2746 bch2_trans_begin(&trans);
2747 inode_iter = bch2_inode_peek(&trans, &inode_u,
2749 ret = PTR_ERR_OR_ZERO(inode_iter);
2750 } while (ret == -EINTR);
2752 bch2_trans_unlock(&trans);
2758 * Sync existing appends before extending i_size,
2759 * as in bch2_extend():
2761 ret = filemap_write_and_wait_range(mapping,
2762 inode_u.bi_size, S64_MAX);
2766 if (mode & FALLOC_FL_KEEP_SIZE)
2767 end = inode->v.i_size;
2769 i_size_write(&inode->v, end);
2771 mutex_lock(&inode->ei_update_lock);
2772 ret = bch2_write_inode_size(c, inode, end, 0);
2773 mutex_unlock(&inode->ei_update_lock);
2776 bch2_trans_exit(&trans);
2777 bch2_pagecache_block_put(&inode->ei_pagecache_lock);
2778 inode_unlock(&inode->v);
2782 long bch2_fallocate_dispatch(struct file *file, int mode,
2783 loff_t offset, loff_t len)
2785 struct bch_inode_info *inode = file_bch_inode(file);
2786 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2789 if (!percpu_ref_tryget(&c->writes))
2792 if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
2793 ret = bchfs_fallocate(inode, mode, offset, len);
2794 else if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
2795 ret = bchfs_fpunch(inode, offset, len);
2796 else if (mode == FALLOC_FL_INSERT_RANGE)
2797 ret = bchfs_fcollapse_finsert(inode, offset, len, true);
2798 else if (mode == FALLOC_FL_COLLAPSE_RANGE)
2799 ret = bchfs_fcollapse_finsert(inode, offset, len, false);
2803 percpu_ref_put(&c->writes);
2808 static void mark_range_unallocated(struct bch_inode_info *inode,
2809 loff_t start, loff_t end)
2811 pgoff_t index = start >> PAGE_SHIFT;
2812 pgoff_t end_index = (end - 1) >> PAGE_SHIFT;
2813 struct folio_batch fbatch;
2816 folio_batch_init(&fbatch);
2818 while (filemap_get_folios(inode->v.i_mapping,
2819 &index, end_index, &fbatch)) {
2820 for (i = 0; i < folio_batch_count(&fbatch); i++) {
2821 struct folio *folio = fbatch.folios[i];
2822 struct bch_page_state *s;
2825 s = bch2_page_state(&folio->page);
2828 spin_lock(&s->lock);
2829 for (j = 0; j < PAGE_SECTORS; j++)
2830 s->s[j].nr_replicas = 0;
2831 spin_unlock(&s->lock);
2834 folio_unlock(folio);
2836 folio_batch_release(&fbatch);
2841 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
2842 struct file *file_dst, loff_t pos_dst,
2843 loff_t len, unsigned remap_flags)
2845 struct bch_inode_info *src = file_bch_inode(file_src);
2846 struct bch_inode_info *dst = file_bch_inode(file_dst);
2847 struct bch_fs *c = src->v.i_sb->s_fs_info;
2848 s64 i_sectors_delta = 0;
2852 if (!c->opts.reflink)
2855 if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
2858 if (remap_flags & REMAP_FILE_DEDUP)
2861 if ((pos_src & (block_bytes(c) - 1)) ||
2862 (pos_dst & (block_bytes(c) - 1)))
2866 abs(pos_src - pos_dst) < len)
2869 bch2_lock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
2871 file_update_time(file_dst);
2873 inode_dio_wait(&src->v);
2874 inode_dio_wait(&dst->v);
2876 ret = generic_remap_file_range_prep(file_src, pos_src,
2879 if (ret < 0 || len == 0)
2882 aligned_len = round_up((u64) len, block_bytes(c));
2884 ret = write_invalidate_inode_pages_range(dst->v.i_mapping,
2885 pos_dst, pos_dst + len - 1);
2889 mark_range_unallocated(src, pos_src, pos_src + aligned_len);
2891 ret = bch2_remap_range(c,
2892 POS(dst->v.i_ino, pos_dst >> 9),
2893 POS(src->v.i_ino, pos_src >> 9),
2895 &dst->ei_journal_seq,
2896 pos_dst + len, &i_sectors_delta);
2901 * due to alignment, we might have remapped slightly more than requsted
2903 ret = min((u64) ret << 9, (u64) len);
2905 /* XXX get a quota reservation */
2906 i_sectors_acct(c, dst, NULL, i_sectors_delta);
2908 spin_lock(&dst->v.i_lock);
2909 if (pos_dst + ret > dst->v.i_size)
2910 i_size_write(&dst->v, pos_dst + ret);
2911 spin_unlock(&dst->v.i_lock);
2913 bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
2920 static int folio_data_offset(struct folio *folio, unsigned offset)
2922 struct bch_page_state *s = bch2_page_state(&folio->page);
2926 for (i = offset >> 9; i < PAGE_SECTORS; i++)
2927 if (s->s[i].state >= SECTOR_DIRTY)
2933 static loff_t bch2_seek_pagecache_data(struct inode *vinode,
2934 loff_t start_offset,
2937 struct folio_batch fbatch;
2938 pgoff_t start_index = start_offset >> PAGE_SHIFT;
2939 pgoff_t end_index = end_offset >> PAGE_SHIFT;
2940 pgoff_t index = start_index;
2945 folio_batch_init(&fbatch);
2947 while (filemap_get_folios(vinode->i_mapping,
2948 &index, end_index, &fbatch)) {
2949 for (i = 0; i < folio_batch_count(&fbatch); i++) {
2950 struct folio *folio = fbatch.folios[i];
2953 offset = folio_data_offset(folio,
2954 folio->index == start_index
2955 ? start_offset & (PAGE_SIZE - 1)
2958 ret = clamp(((loff_t) folio->index << PAGE_SHIFT) +
2960 start_offset, end_offset);
2961 folio_unlock(folio);
2962 folio_batch_release(&fbatch);
2965 folio_unlock(folio);
2967 folio_batch_release(&fbatch);
2974 static loff_t bch2_seek_data(struct file *file, u64 offset)
2976 struct bch_inode_info *inode = file_bch_inode(file);
2977 struct bch_fs *c = inode->v.i_sb->s_fs_info;
2978 struct btree_trans trans;
2979 struct btree_iter *iter;
2981 u64 isize, next_data = MAX_LFS_FILESIZE;
2984 isize = i_size_read(&inode->v);
2985 if (offset >= isize)
2988 bch2_trans_init(&trans, c, 0, 0);
2990 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
2991 POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
2992 if (k.k->p.inode != inode->v.i_ino) {
2994 } else if (bkey_extent_is_data(k.k)) {
2995 next_data = max(offset, bkey_start_offset(k.k) << 9);
2997 } else if (k.k->p.offset >> 9 > isize)
3001 ret = bch2_trans_exit(&trans) ?: ret;
3005 if (next_data > offset)
3006 next_data = bch2_seek_pagecache_data(&inode->v,
3009 if (next_data >= isize)
3012 return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
3015 static int __page_hole_offset(struct page *page, unsigned offset)
3017 struct bch_page_state *s = bch2_page_state(page);
3023 for (i = offset >> 9; i < PAGE_SECTORS; i++)
3024 if (s->s[i].state < SECTOR_DIRTY)
3030 static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
3032 pgoff_t index = offset >> PAGE_SHIFT;
3037 page = find_lock_page(mapping, index);
3041 pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
3043 ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
3050 static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
3051 loff_t start_offset,
3054 struct address_space *mapping = vinode->i_mapping;
3055 loff_t offset = start_offset, hole;
3057 while (offset < end_offset) {
3058 hole = page_hole_offset(mapping, offset);
3059 if (hole >= 0 && hole <= end_offset)
3060 return max(start_offset, hole);
3062 offset += PAGE_SIZE;
3063 offset &= PAGE_MASK;
3069 static loff_t bch2_seek_hole(struct file *file, u64 offset)
3071 struct bch_inode_info *inode = file_bch_inode(file);
3072 struct bch_fs *c = inode->v.i_sb->s_fs_info;
3073 struct btree_trans trans;
3074 struct btree_iter *iter;
3076 u64 isize, next_hole = MAX_LFS_FILESIZE;
3079 isize = i_size_read(&inode->v);
3080 if (offset >= isize)
3083 bch2_trans_init(&trans, c, 0, 0);
3085 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
3086 POS(inode->v.i_ino, offset >> 9),
3087 BTREE_ITER_SLOTS, k, ret) {
3088 if (k.k->p.inode != inode->v.i_ino) {
3089 next_hole = bch2_seek_pagecache_hole(&inode->v,
3090 offset, MAX_LFS_FILESIZE);
3092 } else if (!bkey_extent_is_data(k.k)) {
3093 next_hole = bch2_seek_pagecache_hole(&inode->v,
3094 max(offset, bkey_start_offset(k.k) << 9),
3095 k.k->p.offset << 9);
3097 if (next_hole < k.k->p.offset << 9)
3100 offset = max(offset, bkey_start_offset(k.k) << 9);
3104 ret = bch2_trans_exit(&trans) ?: ret;
3108 if (next_hole > isize)
3111 return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
3114 loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
3120 return generic_file_llseek(file, offset, whence);
3122 return bch2_seek_data(file, offset);
3124 return bch2_seek_hole(file, offset);
3130 void bch2_fs_fsio_exit(struct bch_fs *c)
3132 bioset_exit(&c->dio_write_bioset);
3133 bioset_exit(&c->dio_read_bioset);
3134 bioset_exit(&c->writepage_bioset);
3137 int bch2_fs_fsio_init(struct bch_fs *c)
3141 pr_verbose_init(c->opts, "");
3143 if (bioset_init(&c->writepage_bioset,
3144 4, offsetof(struct bch_writepage_io, op.wbio.bio),
3145 BIOSET_NEED_BVECS) ||
3146 bioset_init(&c->dio_read_bioset,
3147 4, offsetof(struct dio_read, rbio.bio),
3148 BIOSET_NEED_BVECS) ||
3149 bioset_init(&c->dio_write_bioset,
3150 4, offsetof(struct dio_write, op.wbio.bio),
3154 pr_verbose_init(c->opts, "ret %i", ret);
3158 #endif /* NO_BCACHEFS_FS */