}
if (iter->iomap.flags & IOMAP_F_STALE)
break;
+ if (iter->flags & IOMAP_UNCACHED &&
+ !(iter->iomap.flags & IOMAP_F_BUFFER_HEAD))
+ folio_set_uncached(folio);
offset = offset_in_folio(folio, pos);
if (bytes > folio_size(folio) - offset)
iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
const struct iomap_ops *ops, void *private)
{
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
struct iomap_iter iter = {
- .inode = iocb->ki_filp->f_mapping->host,
+ .inode = mapping->host,
.pos = iocb->ki_pos,
.len = iov_iter_count(i),
.flags = IOMAP_WRITE,
if (iocb->ki_flags & IOCB_NOWAIT)
iter.flags |= IOMAP_NOWAIT;
+ if (iocb->ki_flags & IOCB_UNCACHED)
+ iter.flags |= IOMAP_UNCACHED;
while ((ret = iomap_iter(&iter, ops)) > 0)
iter.processed = iomap_write_iter(&iter, i);
if (unlikely(iter.pos == iocb->ki_pos))
return ret;
+ if (iocb->ki_flags & IOCB_UNCACHED) {
+ /* kick off uncached writeback, completion will drop it */
+ __filemap_fdatawrite_range(mapping, iocb->ki_pos, iter.pos,
+ WB_SYNC_NONE);
+ }
ret = iter.pos - iocb->ki_pos;
iocb->ki_pos = iter.pos;
return ret;
#define IOMAP_NOWAIT (1 << 5) /* do not block */
#define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */
#define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */
+#define IOMAP_UNCACHED (1 << 8) /* uncached IO */
#ifdef CONFIG_FS_DAX
-#define IOMAP_DAX (1 << 8) /* DAX mapping */
+#define IOMAP_DAX (1 << 9) /* DAX mapping */
#else
#define IOMAP_DAX 0
#endif /* CONFIG_FS_DAX */