iomap_apply(struct iomap_data *data, const struct iomap_ops *ops,
iomap_actor_t actor)
{
- struct iomap iomap = { .type = IOMAP_HOLE };
+ struct iomap iomap = {
+ .type = IOMAP_HOLE,
+ .page_list = LIST_HEAD_INIT(iomap.page_list)
+ };
struct iomap srcmap = { .type = IOMAP_HOLE };
loff_t written = 0, ret;
u64 end;
data->flags, &iomap);
}
+ if (!list_empty(&iomap.page_list))
+ uncached_write_pages(data->inode->i_mapping, &iomap.page_list);
+
return written ? written : ret;
}
enum {
IOMAP_WRITE_F_UNSHARE = (1 << 0),
+ IOMAP_WRITE_F_UNCACHED = (1 << 1),
};
static void
struct page **pagep, struct iomap *iomap, struct iomap *srcmap)
{
const struct iomap_page_ops *page_ops = iomap->page_ops;
+ unsigned aop_flags;
struct page *page;
int status = 0;
return status;
}
+ aop_flags = AOP_FLAG_NOFS;
+ if (flags & IOMAP_WRITE_F_UNCACHED)
+ aop_flags |= AOP_FLAG_UNCACHED;
page = grab_cache_page_write_begin(inode->i_mapping, pos >> PAGE_SHIFT,
- AOP_FLAG_NOFS);
+ aop_flags);
if (!page) {
status = -ENOMEM;
goto out_no_page;
struct iov_iter *i = data->priv;
loff_t length = data->len;
loff_t pos = data->pos;
+ unsigned flags = 0;
long status = 0;
ssize_t written = 0;
+ if (data->flags & IOMAP_UNCACHED)
+ flags |= IOMAP_WRITE_F_UNCACHED;
+
do {
struct page *page;
unsigned long offset; /* Offset into pagecache page */
break;
}
- status = iomap_write_begin(inode, pos, bytes, 0, &page, iomap,
- srcmap);
+ status = iomap_write_begin(inode, pos, bytes, flags,
+ &page, iomap, srcmap);
if (unlikely(status))
break;
written += copied;
length -= copied;
- balance_dirty_pages_ratelimited(inode->i_mapping);
+ if (!PagePrivio(page))
+ balance_dirty_pages_ratelimited(inode->i_mapping);
+ else
+ list_add_tail(&page->lru, &iomap->page_list);
} while (iov_iter_count(i) && length);
return written ? written : status;
};
loff_t ret = 0, written = 0;
+ if (iocb->ki_flags & IOCB_UNCACHED)
+ data.flags |= IOMAP_UNCACHED;
+
while (iov_iter_count(iter)) {
data.len = iov_iter_count(iter);
ret = iomap_apply(&data, ops, iomap_write_actor);
{ IOMAP_REPORT, "REPORT" }, \
{ IOMAP_FAULT, "FAULT" }, \
{ IOMAP_DIRECT, "DIRECT" }, \
- { IOMAP_NOWAIT, "NOWAIT" }
+ { IOMAP_NOWAIT, "NOWAIT" }, \
+ { IOMAP_UNCACHED, "UNCACHED" }
#define IOMAP_F_FLAGS_STRINGS \
{ IOMAP_F_NEW, "NEW" }, \
{ IOMAP_F_SHARED, "SHARED" }, \
{ IOMAP_F_MERGED, "MERGED" }, \
{ IOMAP_F_BUFFER_HEAD, "BH" }, \
+ { IOMAP_F_PAGE_CREATE, "PAGE_CREATE" }, \
{ IOMAP_F_SIZE_CHANGED, "SIZE_CHANGED" }
DECLARE_EVENT_CLASS(iomap_class,
extern ssize_t generic_perform_write(struct file *, struct iov_iter *,
struct kiocb *);
+extern void uncached_write_pages(struct address_space *, struct list_head *);
+
ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags);
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
*
* IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of
* buffer heads for this mapping.
+ *
+ * IOMAP_F_PAGE_CREATE indicates that pages had to be allocated to satisfy
+ * this operation.
*/
#define IOMAP_F_NEW 0x01
#define IOMAP_F_DIRTY 0x02
#define IOMAP_F_SHARED 0x04
#define IOMAP_F_MERGED 0x08
#define IOMAP_F_BUFFER_HEAD 0x10
+#define IOMAP_F_PAGE_CREATE 0x20
/*
* Flags set by the core iomap code during operations:
void *inline_data;
void *private; /* filesystem private */
const struct iomap_page_ops *page_ops;
+ struct list_head page_list;
};
static inline sector_t
#define IOMAP_FAULT (1 << 3) /* mapping for page fault */
#define IOMAP_DIRECT (1 << 4) /* direct I/O */
#define IOMAP_NOWAIT (1 << 5) /* do not block */
+#define IOMAP_UNCACHED (1 << 6) /* uncached IO */
struct iomap_ops {
/*
struct page *grab_cache_page_write_begin(struct address_space *mapping,
pgoff_t index, unsigned flags)
{
- struct page *page;
+ gfp_t gfp = mapping_gfp_mask(mapping);
int fgp_flags = FGP_LOCK|FGP_WRITE;
+ struct page *page;
if (flags & AOP_FLAG_NOFS)
fgp_flags |= FGP_NOFS;
if (!(flags & AOP_FLAG_UNCACHED))
fgp_flags |= FGP_CREAT;
-
- page = pagecache_get_page(mapping, index, fgp_flags,
- mapping_gfp_mask(mapping));
- if (page)
+ page = pagecache_get_page(mapping, index, fgp_flags, gfp);
+ if (!page && (flags & AOP_FLAG_UNCACHED)) {
+ if (flags & AOP_FLAG_NOFS)
+ gfp &= ~__GFP_FS;
+ page = __page_cache_alloc(gfp);
+ if (page) {
+ page->mapping = mapping;
+ page->index = index;
+ __SetPageLocked(page);
+ __SetPagePrivio(page);
+ get_page(page);
+ }
+ } else if (page)
wait_for_stable_page(page);
return page;
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
+void uncached_write_pages(struct address_space *mapping,
+ struct list_head *wb_list)
+{
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .for_sync = 1,
+ };
+ struct blk_plug plug;
+ struct page *page;
+
+ wbc_attach_fdatawrite_inode(&wbc, mapping->host);
+ blk_start_plug(&plug);
+
+ list_for_each_entry(page, wb_list, lru) {
+ lock_page(page);
+ wbc.nr_to_write = 1;
+ wbc.pages_skipped = 0;
+ test_clear_page_writeback(page);
+ mapping->a_ops->writepage(page, &wbc);
+ if (wbc.pages_skipped)
+ printk("wp done: skipped %ld\n", wbc.pages_skipped);
+ }
+ while (!list_empty(wb_list)) {
+ page = list_first_entry(wb_list, struct page, lru);
+ list_del(&page->lru);
+ wait_on_page_writeback(page);
+ page->mapping = NULL;
+ put_page(page);
+ }
+
+ blk_finish_plug(&plug);
+ wbc_detach_inode(&wbc);
+}
+
ssize_t generic_perform_write(struct file *file,
struct iov_iter *i, struct kiocb *iocb)
{