unlock_page(mp->page);
}
+static int metapage_write_one(struct page *page)
+{
+ struct folio *folio = page_folio(page);
+ struct address_space *mapping = folio->mapping;
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = folio_nr_pages(folio),
+ };
+ int ret = 0;
+
+ BUG_ON(!folio_test_locked(folio));
+
+ folio_wait_writeback(folio);
+
+ if (folio_clear_dirty_for_io(folio)) {
+ folio_get(folio);
+ ret = metapage_writepage(page, &wbc);
+ if (ret == 0)
+ folio_wait_writeback(folio);
+ folio_put(folio);
+ } else {
+ folio_unlock(folio);
+ }
+
+ if (!ret)
+ ret = filemap_check_errors(mapping);
+ return ret;
+}
+
void force_metapage(struct metapage *mp)
{
struct page *page = mp->page;
get_page(page);
lock_page(page);
set_page_dirty(page);
- if (write_one_page(page))
- jfs_error(mp->sb, "write_one_page() failed\n");
+ if (metapage_write_one(page))
+ jfs_error(mp->sb, "metapage_write_one() failed\n");
clear_bit(META_forcewrite, &mp->flag);
put_page(page);
}
set_page_dirty(page);
if (test_bit(META_sync, &mp->flag)) {
clear_bit(META_sync, &mp->flag);
- if (write_one_page(page))
- jfs_error(mp->sb, "write_one_page() failed\n");
- lock_page(page); /* write_one_page unlocks the page */
+ if (metapage_write_one(page))
+ jfs_error(mp->sb, "metapage_write_one() failed\n");
+ lock_page(page);
}
} else if (mp->lsn) /* discard_metapage doesn't remove it */
remove_from_logsync(mp);
*/
if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
if (PageDirty(page)) {
- /*
- * write_on_page will unlock the page on return
- */
- ret = write_one_page(page);
+ unlock_page(page);
+ put_page(page);
+
+ ret = filemap_write_and_wait_range(mapping,
+ offset, map_end - 1);
goto retry;
}
}
return !memcmp(name, de->d_name, len);
}
-static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
+static void ufs_commit_chunk(struct page *page, loff_t pos, unsigned len)
{
struct address_space *mapping = page->mapping;
struct inode *dir = mapping->host;
- int err = 0;
inode_inc_iversion(dir);
block_write_end(NULL, mapping, pos, len, len, page, NULL);
i_size_write(dir, pos+len);
mark_inode_dirty(dir);
}
- if (IS_DIRSYNC(dir))
- err = write_one_page(page);
- else
- unlock_page(page);
+ unlock_page(page);
+}
+
+static int ufs_handle_dirsync(struct inode *dir)
+{
+ int err;
+
+ err = filemap_write_and_wait(dir->i_mapping);
+ if (!err)
+ err = sync_inode_metadata(dir, 1);
return err;
}
de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
ufs_set_de_type(dir->i_sb, de, inode->i_mode);
- err = ufs_commit_chunk(page, pos, len);
+ ufs_commit_chunk(page, pos, len);
ufs_put_page(page);
if (update_times)
dir->i_mtime = dir->i_ctime = current_time(dir);
mark_inode_dirty(dir);
+ ufs_handle_dirsync(dir);
}
de->d_ino = cpu_to_fs32(sb, inode->i_ino);
ufs_set_de_type(sb, de, inode->i_mode);
- err = ufs_commit_chunk(page, pos, rec_len);
+ ufs_commit_chunk(page, pos, rec_len);
dir->i_mtime = dir->i_ctime = current_time(dir);
mark_inode_dirty(dir);
+ err = ufs_handle_dirsync(dir);
/* OFFSET_CACHE */
out_put:
ufs_put_page(page);
if (pde)
pde->d_reclen = cpu_to_fs16(sb, to - from);
dir->d_ino = 0;
- err = ufs_commit_chunk(page, pos, to - from);
+ ufs_commit_chunk(page, pos, to - from);
inode->i_ctime = inode->i_mtime = current_time(inode);
mark_inode_dirty(inode);
+ err = ufs_handle_dirsync(inode);
out:
ufs_put_page(page);
UFSD("EXIT\n");
strcpy (de->d_name, "..");
kunmap(page);
- err = ufs_commit_chunk(page, 0, chunk_size);
+ ufs_commit_chunk(page, 0, chunk_size);
+ err = ufs_handle_dirsync(inode);
fail:
put_page(page);
return err;
bool folio_clear_dirty_for_io(struct folio *folio);
bool clear_page_dirty_for_io(struct page *page);
void folio_invalidate(struct folio *folio, size_t offset, size_t length);
-int __must_check folio_write_one(struct folio *folio);
-static inline int __must_check write_one_page(struct page *page)
-{
- return folio_write_one(page_folio(page));
-}
-
int __set_page_dirty_nobuffers(struct page *page);
bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
return ret;
}
-/**
- * folio_write_one - write out a single folio and wait on I/O.
- * @folio: The folio to write.
- *
- * The folio must be locked by the caller and will be unlocked upon return.
- *
- * Note that the mapping's AS_EIO/AS_ENOSPC flags will be cleared when this
- * function returns.
- *
- * Return: %0 on success, negative error code otherwise
- */
-int folio_write_one(struct folio *folio)
-{
- struct address_space *mapping = folio->mapping;
- int ret = 0;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL,
- .nr_to_write = folio_nr_pages(folio),
- };
-
- BUG_ON(!folio_test_locked(folio));
-
- folio_wait_writeback(folio);
-
- if (folio_clear_dirty_for_io(folio)) {
- folio_get(folio);
- ret = mapping->a_ops->writepage(&folio->page, &wbc);
- if (ret == 0)
- folio_wait_writeback(folio);
- folio_put(folio);
- } else {
- folio_unlock(folio);
- }
-
- if (!ret)
- ret = filemap_check_errors(mapping);
- return ret;
-}
-EXPORT_SYMBOL(folio_write_one);
-
/*
* For address_spaces which do not use buffers nor write back.
*/