* Flags for ext4_io_end->flags
*/
#define EXT4_IO_END_UNWRITTEN 0x0001
+#define EXT4_IO_DONTCACHE 0x0002
struct ext4_io_end_vec {
struct list_head list; /* list of io_end_vec */
.splice_write = iter_file_splice_write,
.fallocate = ext4_fallocate,
.fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC |
- FOP_DIO_PARALLEL_WRITE,
+ FOP_DIO_PARALLEL_WRITE | FOP_DONTCACHE,
};
const struct inode_operations ext4_file_inode_operations = {
handle_t *handle;
struct folio *folio;
struct ext4_iloc iloc;
+ fgf_t fgp_flags;
if (pos + len > ext4_get_max_inline_size(inode))
goto convert;
if (ret)
goto out;
- folio = __filemap_get_folio(mapping, 0, FGP_WRITEBEGIN | FGP_NOFS,
+ fgp_flags = FGP_WRITEBEGIN | FGP_NOFS;
+ if (foliop_is_dropbehind(foliop))
+ fgp_flags |= FGP_DONTCACHE;
+
+ folio = __filemap_get_folio(mapping, 0, fgp_flags,
mapping_gfp_mask(mapping));
if (IS_ERR(folio)) {
ret = PTR_ERR(folio);
int ret, needed_blocks;
handle_t *handle;
int retries = 0;
+ fgf_t fgp_flags;
struct folio *folio;
pgoff_t index;
unsigned from, to;
return 0;
}
+ /*
+ * Set FGP_WRITEBEGIN, and FGP_DONTCACHE if foliop is marked as
+ * dropbehind. That's how generic_perform_write() informs us that this
+ * is a dropbehind write.
+ */
+ fgp_flags = FGP_WRITEBEGIN;
+ if (foliop_is_dropbehind(foliop))
+ fgp_flags |= FGP_DONTCACHE;
+
/*
* __filemap_get_folio() can take a long time if the
* system is thrashing due to memory pressure, or if the folio
* the folio (if needed) without using GFP_NOFS.
*/
retry_grab:
- folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ folio = __filemap_get_folio(mapping, index, fgp_flags,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
return PTR_ERR(folio);
struct folio *folio;
pgoff_t index;
struct inode *inode = mapping->host;
+ fgf_t fgp_flags;
if (unlikely(ext4_forced_shutdown(inode->i_sb)))
return -EIO;
return 0;
}
+ fgp_flags = FGP_WRITEBEGIN;
+ if (foliop_is_dropbehind(foliop))
+ fgp_flags |= FGP_DONTCACHE;
retry:
- folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN,
+ folio = __filemap_get_folio(mapping, index, fgp_flags,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
return PTR_ERR(folio);
unsigned long flags;
/* Only reserved conversions from writeback should enter here */
- WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
- WARN_ON(!io_end->handle && sbi->s_journal);
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
wq = sbi->rsv_conversion_wq;
if (list_empty(&ei->i_rsv_conversion_list))
while (!list_empty(&unwritten)) {
io_end = list_entry(unwritten.next, ext4_io_end_t, list);
- BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
+ BUG_ON(!(io_end->flag & (EXT4_IO_END_UNWRITTEN|EXT4_IO_DONTCACHE)));
list_del_init(&io_end->list);
err = ext4_end_io_end(io_end);
void ext4_put_io_end_defer(ext4_io_end_t *io_end)
{
- if (refcount_dec_and_test(&io_end->count)) {
- if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
- list_empty(&io_end->list_vec)) {
- ext4_release_io_end(io_end);
- return;
- }
- ext4_add_complete_io(io_end);
+ if (!refcount_dec_and_test(&io_end->count))
+ return;
+ if ((!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
+ list_empty(&io_end->list_vec)) &&
+ !(io_end->flag & EXT4_IO_DONTCACHE)) {
+ ext4_release_io_end(io_end);
+ return;
}
+ ext4_add_complete_io(io_end);
}
int ext4_put_io_end(ext4_io_end_t *io_end)
blk_status_to_errno(bio->bi_status));
}
- if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
+ if (io_end->flag & (EXT4_IO_END_UNWRITTEN|EXT4_IO_DONTCACHE)) {
/*
* Link bio into list hanging from io_end. We have to do it
* atomically as bio completions can be racing against each
if (io->io_bio == NULL) {
io_submit_init_bio(io, bh);
io->io_bio->bi_write_hint = inode->i_write_hint;
+ if (folio_test_dropbehind(folio)) {
+ ext4_io_end_t *io_end = io->io_bio->bi_private;
+ io_end->flag |= EXT4_IO_DONTCACHE;
+ }
}
if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh)))
goto submit_and_retry;