struct inode *inode = file->f_mapping->host;
return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset,
- nr_segs, blkdev_get_block, NULL, NULL, 0);
+ nr_segs, blkdev_get_block, NULL, NULL,
+ DIO_IGNORE_TRUNCATE);
}
int __sync_blockdev(struct block_device *bdev, int wait)
goto out;
} else if (unlikely(test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
&BTRFS_I(inode)->runtime_flags))) {
- inode_dio_done(inode);
+ inode_dio_done(inode, 0);
flags = DIO_LOCKING | DIO_SKIP_HOLES;
wakeup = false;
}
}
out:
if (wakeup)
- inode_dio_done(inode);
+ inode_dio_done(inode, 0);
if (relock)
mutex_lock(&inode->i_mutex);
if (dio->end_io && dio->result)
dio->end_io(dio->iocb, offset, transferred, dio->private);
- inode_dio_done(dio->inode);
+ inode_dio_done(dio->inode, dio->flags);
+
if (is_async) {
if (dio->rw & WRITE) {
int err;
}
}
- /*
- * Will be decremented at I/O completion time.
- */
- atomic_inc(&inode->i_dio_count);
+ inode_dio_start(inode, dio->flags);
retval = 0;
sdio.blkbits = blkbits;
smp_mb();
if (unlikely(ext4_test_inode_state(inode,
EXT4_STATE_DIOREAD_LOCK))) {
- inode_dio_done(inode);
+ inode_dio_done(inode, 0);
goto locked;
}
ret = __blockdev_direct_IO(rw, iocb, inode,
inode->i_sb->s_bdev, iov,
offset, nr_segs,
ext4_get_block, NULL, NULL, 0);
- inode_dio_done(inode);
+ inode_dio_done(inode, 0);
} else {
locked:
ret = blockdev_direct_IO(rw, iocb, inode, iov,
retake_lock:
if (rw == WRITE)
- inode_dio_done(inode);
+ inode_dio_done(inode, 0);
/* take i_mutex locking again if we do a ovewrite dio */
if (overwrite) {
up_read(&EXT4_I(inode)->i_data_sem);
}
EXPORT_SYMBOL(inode_dio_wait);
+void inode_dio_start(struct inode *inode, int dio_flags)
+{
+ if (!(dio_flags & DIO_IGNORE_TRUNCATE))
+ atomic_inc(&inode->i_dio_count);
+}
+
+static int inode_dio_dec(struct inode *inode, int dio_flags)
+{
+ if (!(dio_flags & DIO_IGNORE_TRUNCATE))
+ return atomic_dec_and_test(&inode->i_dio_count);
+
+ return 0;
+}
+
/*
* inode_dio_done - signal finish of a direct I/O requests
* @inode: inode the direct I/O happens on
* This is called once we've finished processing a direct I/O request,
* and is used to wake up callers waiting for direct I/O to be quiesced.
*/
-void inode_dio_done(struct inode *inode)
+void inode_dio_done(struct inode *inode, int dio_flags)
{
- if (atomic_dec_and_test(&inode->i_dio_count))
+ if (inode_dio_dec(inode, dio_flags))
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
EXPORT_SYMBOL(inode_dio_done);
static void nfs_inode_dio_write_done(struct inode *inode)
{
nfs_zap_mapping(inode, inode->i_mapping);
- inode_dio_done(inode);
+ inode_dio_done(inode, 0);
}
#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
* generic layer handle the completion.
*/
if (requested_bytes == 0) {
- inode_dio_done(inode);
+ inode_dio_done(inode, 0);
nfs_direct_req_release(dreq);
return result < 0 ? result : -EIO;
}
/* filesystem does not support filling holes */
DIO_SKIP_HOLES = 0x02,
+
+ /* inode/fs/bdev does not need truncate protection */
+ DIO_IGNORE_TRUNCATE = 0x04,
};
void dio_end_io(struct bio *bio, int error);
#endif
void inode_dio_wait(struct inode *inode);
-void inode_dio_done(struct inode *inode);
+void inode_dio_done(struct inode *inode, int dio_flags);
+void inode_dio_start(struct inode *inode, int dio_flags);
extern const struct file_operations generic_ro_fops;