1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs compress support
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
9 #include <linux/f2fs_fs.h>
10 #include <linux/writeback.h>
11 #include <linux/backing-dev.h>
12 #include <linux/lzo.h>
13 #include <linux/lz4.h>
17 #include <trace/events/f2fs.h>
19 struct f2fs_compress_ops {
20 int (*init_compress_ctx)(struct compress_ctx *cc);
21 void (*destroy_compress_ctx)(struct compress_ctx *cc);
22 int (*compress_pages)(struct compress_ctx *cc);
23 int (*decompress_pages)(struct decompress_io_ctx *dic);
26 static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
28 return index & (cc->cluster_size - 1);
31 static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
33 return index >> cc->log_cluster_size;
36 static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
38 return cc->cluster_idx << cc->log_cluster_size;
41 bool f2fs_is_compressed_page(struct page *page)
43 if (!PagePrivate(page))
45 if (!page_private(page))
47 if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
49 f2fs_bug_on(F2FS_M_SB(page->mapping),
50 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
54 static void f2fs_set_compressed_page(struct page *page,
55 struct inode *inode, pgoff_t index, void *data, refcount_t *r)
58 set_page_private(page, (unsigned long)data);
60 /* i_crypto_info and iv index */
62 page->mapping = inode->i_mapping;
67 static void f2fs_put_compressed_page(struct page *page)
69 set_page_private(page, (unsigned long)NULL);
70 ClearPagePrivate(page);
76 static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
80 for (i = 0; i < len; i++) {
84 unlock_page(cc->rpages[i]);
86 put_page(cc->rpages[i]);
90 static void f2fs_put_rpages(struct compress_ctx *cc)
92 f2fs_drop_rpages(cc, cc->cluster_size, false);
95 static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
97 f2fs_drop_rpages(cc, len, true);
100 static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
101 struct address_space *mapping,
102 pgoff_t start, int len)
106 for (i = 0; i < len; i++) {
107 struct page *page = find_get_page(mapping, start + i);
114 static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
115 struct writeback_control *wbc, bool redirty, int unlock)
119 for (i = 0; i < cc->cluster_size; i++) {
123 redirty_page_for_writepage(wbc, cc->rpages[i]);
124 f2fs_put_page(cc->rpages[i], unlock);
128 struct page *f2fs_compress_control_page(struct page *page)
130 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
133 int f2fs_init_compress_ctx(struct compress_ctx *cc)
135 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
140 cc->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
141 cc->log_cluster_size, GFP_NOFS);
142 return cc->rpages ? 0 : -ENOMEM;
145 void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
151 cc->cluster_idx = NULL_CLUSTER;
154 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
156 unsigned int cluster_ofs;
158 if (!f2fs_cluster_can_merge_page(cc, page->index))
159 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
161 cluster_ofs = offset_in_cluster(cc, page->index);
162 cc->rpages[cluster_ofs] = page;
164 cc->cluster_idx = cluster_idx(cc, page->index);
167 #ifdef CONFIG_F2FS_FS_LZO
168 static int lzo_init_compress_ctx(struct compress_ctx *cc)
170 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
171 LZO1X_MEM_COMPRESS, GFP_NOFS);
175 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
179 static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
185 static int lzo_compress_pages(struct compress_ctx *cc)
189 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
190 &cc->clen, cc->private);
191 if (ret != LZO_E_OK) {
192 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
193 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
199 static int lzo_decompress_pages(struct decompress_io_ctx *dic)
203 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
204 dic->rbuf, &dic->rlen);
205 if (ret != LZO_E_OK) {
206 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
207 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
211 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
212 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
213 "expected:%lu\n", KERN_ERR,
214 F2FS_I_SB(dic->inode)->sb->s_id,
216 PAGE_SIZE << dic->log_cluster_size);
222 static const struct f2fs_compress_ops f2fs_lzo_ops = {
223 .init_compress_ctx = lzo_init_compress_ctx,
224 .destroy_compress_ctx = lzo_destroy_compress_ctx,
225 .compress_pages = lzo_compress_pages,
226 .decompress_pages = lzo_decompress_pages,
230 #ifdef CONFIG_F2FS_FS_LZ4
231 static int lz4_init_compress_ctx(struct compress_ctx *cc)
233 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
234 LZ4_MEM_COMPRESS, GFP_NOFS);
238 cc->clen = LZ4_compressBound(PAGE_SIZE << cc->log_cluster_size);
242 static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
248 static int lz4_compress_pages(struct compress_ctx *cc)
252 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
253 cc->clen, cc->private);
255 printk_ratelimited("%sF2FS-fs (%s): lz4 compress failed\n",
256 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id);
263 static int lz4_decompress_pages(struct decompress_io_ctx *dic)
267 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
268 dic->clen, dic->rlen);
270 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
271 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
275 if (ret != PAGE_SIZE << dic->log_cluster_size) {
276 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
277 "expected:%lu\n", KERN_ERR,
278 F2FS_I_SB(dic->inode)->sb->s_id,
280 PAGE_SIZE << dic->log_cluster_size);
286 static const struct f2fs_compress_ops f2fs_lz4_ops = {
287 .init_compress_ctx = lz4_init_compress_ctx,
288 .destroy_compress_ctx = lz4_destroy_compress_ctx,
289 .compress_pages = lz4_compress_pages,
290 .decompress_pages = lz4_decompress_pages,
294 static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
295 #ifdef CONFIG_F2FS_FS_LZO
300 #ifdef CONFIG_F2FS_FS_LZ4
307 bool f2fs_is_compress_backend_ready(struct inode *inode)
309 if (!f2fs_compressed_file(inode))
311 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
314 static struct page *f2fs_grab_page(void)
318 page = alloc_page(GFP_NOFS);
325 static int f2fs_compress_pages(struct compress_ctx *cc)
327 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
328 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
329 const struct f2fs_compress_ops *cops =
330 f2fs_cops[fi->i_compress_algorithm];
331 unsigned int max_len, nr_cpages;
334 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
335 cc->cluster_size, fi->i_compress_algorithm);
337 ret = cops->init_compress_ctx(cc);
341 max_len = COMPRESS_HEADER_SIZE + cc->clen;
342 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
344 cc->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
345 cc->nr_cpages, GFP_NOFS);
348 goto destroy_compress_ctx;
351 for (i = 0; i < cc->nr_cpages; i++) {
352 cc->cpages[i] = f2fs_grab_page();
353 if (!cc->cpages[i]) {
355 goto out_free_cpages;
359 cc->rbuf = vmap(cc->rpages, cc->cluster_size, VM_MAP, PAGE_KERNEL_RO);
362 goto out_free_cpages;
365 cc->cbuf = vmap(cc->cpages, cc->nr_cpages, VM_MAP, PAGE_KERNEL);
368 goto out_vunmap_rbuf;
371 ret = cops->compress_pages(cc);
373 goto out_vunmap_cbuf;
375 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
377 if (cc->clen > max_len) {
379 goto out_vunmap_cbuf;
382 cc->cbuf->clen = cpu_to_le32(cc->clen);
384 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
385 cc->cbuf->reserved[i] = cpu_to_le32(0);
387 nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
389 /* zero out any unused part of the last page */
390 memset(&cc->cbuf->cdata[cc->clen], 0,
391 (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
396 for (i = nr_cpages; i < cc->nr_cpages; i++) {
397 f2fs_put_compressed_page(cc->cpages[i]);
398 cc->cpages[i] = NULL;
401 cc->nr_cpages = nr_cpages;
403 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
412 for (i = 0; i < cc->nr_cpages; i++) {
414 f2fs_put_compressed_page(cc->cpages[i]);
418 destroy_compress_ctx:
419 cops->destroy_compress_ctx(cc);
421 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
426 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
428 struct decompress_io_ctx *dic =
429 (struct decompress_io_ctx *)page_private(page);
430 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
431 struct f2fs_inode_info *fi= F2FS_I(dic->inode);
432 const struct f2fs_compress_ops *cops =
433 f2fs_cops[fi->i_compress_algorithm];
436 dec_page_count(sbi, F2FS_RD_DATA);
438 if (bio->bi_status || PageError(page))
441 if (refcount_dec_not_one(&dic->ref))
444 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
445 dic->cluster_size, fi->i_compress_algorithm);
447 /* submit partial compressed pages */
453 dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL);
459 dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO);
462 goto out_vunmap_rbuf;
465 dic->clen = le32_to_cpu(dic->cbuf->clen);
466 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
468 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
470 goto out_vunmap_cbuf;
473 ret = cops->decompress_pages(dic);
481 f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
484 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
490 static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
492 if (cc->cluster_idx == NULL_CLUSTER)
494 return cc->cluster_idx == cluster_idx(cc, index);
497 bool f2fs_cluster_is_empty(struct compress_ctx *cc)
499 return cc->nr_rpages == 0;
502 static bool f2fs_cluster_is_full(struct compress_ctx *cc)
504 return cc->cluster_size == cc->nr_rpages;
507 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
509 if (f2fs_cluster_is_empty(cc))
511 return is_page_in_cluster(cc, index);
514 static bool __cluster_may_compress(struct compress_ctx *cc)
516 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
517 loff_t i_size = i_size_read(cc->inode);
518 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
521 for (i = 0; i < cc->cluster_size; i++) {
522 struct page *page = cc->rpages[i];
524 f2fs_bug_on(sbi, !page);
526 if (unlikely(f2fs_cp_error(sbi)))
528 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
532 if (page->index >= nr_pages)
538 /* return # of compressed block addresses */
539 static int f2fs_compressed_blocks(struct compress_ctx *cc)
541 struct dnode_of_data dn;
544 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
545 ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
553 if (dn.data_blkaddr == COMPRESS_ADDR) {
557 for (i = 1; i < cc->cluster_size; i++) {
560 blkaddr = data_blkaddr(dn.inode,
561 dn.node_page, dn.ofs_in_node + i);
562 if (blkaddr != NULL_ADDR)
571 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
573 struct compress_ctx cc = {
575 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
576 .cluster_size = F2FS_I(inode)->i_cluster_size,
577 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
580 return f2fs_compressed_blocks(&cc);
583 static bool cluster_may_compress(struct compress_ctx *cc)
585 if (!f2fs_compressed_file(cc->inode))
587 if (f2fs_is_atomic_file(cc->inode))
589 if (f2fs_is_mmap_file(cc->inode))
591 if (!f2fs_cluster_is_full(cc))
593 return __cluster_may_compress(cc);
596 static void set_cluster_writeback(struct compress_ctx *cc)
600 for (i = 0; i < cc->cluster_size; i++) {
602 set_page_writeback(cc->rpages[i]);
606 static void set_cluster_dirty(struct compress_ctx *cc)
610 for (i = 0; i < cc->cluster_size; i++)
612 set_page_dirty(cc->rpages[i]);
615 static int prepare_compress_overwrite(struct compress_ctx *cc,
616 struct page **pagep, pgoff_t index, void **fsdata)
618 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
619 struct address_space *mapping = cc->inode->i_mapping;
621 struct dnode_of_data dn;
622 sector_t last_block_in_bio;
623 unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
624 pgoff_t start_idx = start_idx_of_cluster(cc);
629 ret = f2fs_compressed_blocks(cc);
633 /* compressed case */
634 prealloc = (ret < cc->cluster_size);
636 ret = f2fs_init_compress_ctx(cc);
640 /* keep page reference to avoid page reclaim */
641 for (i = 0; i < cc->cluster_size; i++) {
642 page = f2fs_pagecache_get_page(mapping, start_idx + i,
649 if (PageUptodate(page))
652 f2fs_compress_ctx_add_page(cc, page);
655 if (!f2fs_cluster_is_empty(cc)) {
656 struct bio *bio = NULL;
658 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
659 &last_block_in_bio, false, true);
660 f2fs_destroy_compress_ctx(cc);
664 f2fs_submit_bio(sbi, bio, DATA);
666 ret = f2fs_init_compress_ctx(cc);
671 for (i = 0; i < cc->cluster_size; i++) {
672 f2fs_bug_on(sbi, cc->rpages[i]);
674 page = find_lock_page(mapping, start_idx + i);
675 f2fs_bug_on(sbi, !page);
677 f2fs_wait_on_page_writeback(page, DATA, true, true);
679 f2fs_compress_ctx_add_page(cc, page);
680 f2fs_put_page(page, 0);
682 if (!PageUptodate(page)) {
683 f2fs_unlock_rpages(cc, i + 1);
684 f2fs_put_rpages_mapping(cc, mapping, start_idx,
686 f2fs_destroy_compress_ctx(cc);
692 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
694 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
696 for (i = cc->cluster_size - 1; i > 0; i--) {
697 ret = f2fs_get_block(&dn, start_idx + i);
699 i = cc->cluster_size;
703 if (dn.data_blkaddr != NEW_ADDR)
707 __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
711 *fsdata = cc->rpages;
712 *pagep = cc->rpages[offset_in_cluster(cc, index)];
713 return cc->cluster_size;
717 f2fs_unlock_rpages(cc, i);
719 f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
720 f2fs_destroy_compress_ctx(cc);
724 int f2fs_prepare_compress_overwrite(struct inode *inode,
725 struct page **pagep, pgoff_t index, void **fsdata)
727 struct compress_ctx cc = {
729 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
730 .cluster_size = F2FS_I(inode)->i_cluster_size,
731 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
736 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
739 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
740 pgoff_t index, unsigned copied)
743 struct compress_ctx cc = {
744 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
745 .cluster_size = F2FS_I(inode)->i_cluster_size,
748 bool first_index = (index == cc.rpages[0]->index);
751 set_cluster_dirty(&cc);
753 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
754 f2fs_destroy_compress_ctx(&cc);
759 static int f2fs_write_compressed_pages(struct compress_ctx *cc,
761 struct writeback_control *wbc,
762 enum iostat_type io_type)
764 struct inode *inode = cc->inode;
765 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
766 struct f2fs_inode_info *fi = F2FS_I(inode);
767 struct f2fs_io_info fio = {
769 .ino = cc->inode->i_ino,
772 .op_flags = wbc_to_write_flags(wbc),
773 .old_blkaddr = NEW_ADDR,
775 .encrypted_page = NULL,
776 .compressed_page = NULL,
780 .encrypted = f2fs_encrypted_file(cc->inode),
782 struct dnode_of_data dn;
784 struct compress_io_ctx *cic;
785 pgoff_t start_idx = start_idx_of_cluster(cc);
786 unsigned int last_index = cc->cluster_size - 1;
790 if (!f2fs_trylock_op(sbi))
793 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
795 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
799 for (i = 0; i < cc->cluster_size; i++) {
800 if (data_blkaddr(dn.inode, dn.node_page,
801 dn.ofs_in_node + i) == NULL_ADDR)
805 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
807 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
811 fio.version = ni.version;
813 cic = f2fs_kzalloc(sbi, sizeof(struct compress_io_ctx), GFP_NOFS);
817 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
819 refcount_set(&cic->ref, 1);
820 cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
821 cc->log_cluster_size, GFP_NOFS);
825 cic->nr_rpages = cc->cluster_size;
827 for (i = 0; i < cc->nr_cpages; i++) {
828 f2fs_set_compressed_page(cc->cpages[i], inode,
829 cc->rpages[i + 1]->index,
830 cic, i ? &cic->ref : NULL);
831 fio.compressed_page = cc->cpages[i];
833 fio.page = cc->rpages[i + 1];
834 err = f2fs_encrypt_one_page(&fio);
836 goto out_destroy_crypt;
837 cc->cpages[i] = fio.encrypted_page;
841 set_cluster_writeback(cc);
843 for (i = 0; i < cc->cluster_size; i++)
844 cic->rpages[i] = cc->rpages[i];
846 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
849 blkaddr = f2fs_data_blkaddr(&dn);
850 fio.page = cc->rpages[i];
851 fio.old_blkaddr = blkaddr;
855 if (blkaddr == COMPRESS_ADDR)
857 if (__is_valid_data_blkaddr(blkaddr))
858 f2fs_invalidate_blocks(sbi, blkaddr);
859 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
860 goto unlock_continue;
863 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
866 if (i > cc->nr_cpages) {
867 if (__is_valid_data_blkaddr(blkaddr)) {
868 f2fs_invalidate_blocks(sbi, blkaddr);
869 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
871 goto unlock_continue;
874 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
877 fio.encrypted_page = cc->cpages[i - 1];
879 fio.compressed_page = cc->cpages[i - 1];
881 cc->cpages[i - 1] = NULL;
882 f2fs_outplace_write_data(&dn, &fio);
885 inode_dec_dirty_pages(cc->inode);
886 unlock_page(fio.page);
889 if (fio.compr_blocks)
890 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
891 f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
893 set_inode_flag(cc->inode, FI_APPEND_WRITE);
894 if (cc->cluster_idx == 0)
895 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
900 spin_lock(&fi->i_size_lock);
901 if (fi->last_disk_size < psize)
902 fi->last_disk_size = psize;
903 spin_unlock(&fi->i_size_lock);
906 f2fs_destroy_compress_ctx(cc);
912 for (--i; i >= 0; i--)
913 fscrypt_finalize_bounce_page(&cc->cpages[i]);
914 for (i = 0; i < cc->nr_cpages; i++) {
917 f2fs_put_page(cc->cpages[i], 1);
928 void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
930 struct f2fs_sb_info *sbi = bio->bi_private;
931 struct compress_io_ctx *cic =
932 (struct compress_io_ctx *)page_private(page);
935 if (unlikely(bio->bi_status))
936 mapping_set_error(cic->inode->i_mapping, -EIO);
938 f2fs_put_compressed_page(page);
940 dec_page_count(sbi, F2FS_WB_DATA);
942 if (refcount_dec_not_one(&cic->ref))
945 for (i = 0; i < cic->nr_rpages; i++) {
946 WARN_ON(!cic->rpages[i]);
947 clear_cold_data(cic->rpages[i]);
948 end_page_writeback(cic->rpages[i]);
955 static int f2fs_write_raw_pages(struct compress_ctx *cc,
957 struct writeback_control *wbc,
958 enum iostat_type io_type)
960 struct address_space *mapping = cc->inode->i_mapping;
961 int _submitted, compr_blocks, ret;
964 compr_blocks = f2fs_compressed_blocks(cc);
965 if (compr_blocks < 0) {
970 for (i = 0; i < cc->cluster_size; i++) {
974 if (cc->rpages[i]->mapping != mapping) {
975 unlock_page(cc->rpages[i]);
979 BUG_ON(!PageLocked(cc->rpages[i]));
981 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
982 NULL, NULL, wbc, io_type,
985 if (ret == AOP_WRITEPAGE_ACTIVATE) {
986 unlock_page(cc->rpages[i]);
988 } else if (ret == -EAGAIN) {
991 congestion_wait(BLK_RW_ASYNC,
993 lock_page(cc->rpages[i]);
994 clear_page_dirty_for_io(cc->rpages[i]);
1001 *submitted += _submitted;
1006 /* TODO: revoke partially updated block addresses */
1007 BUG_ON(compr_blocks);
1009 for (++i; i < cc->cluster_size; i++) {
1012 redirty_page_for_writepage(wbc, cc->rpages[i]);
1013 unlock_page(cc->rpages[i]);
1018 int f2fs_write_multi_pages(struct compress_ctx *cc,
1020 struct writeback_control *wbc,
1021 enum iostat_type io_type)
1023 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
1024 const struct f2fs_compress_ops *cops =
1025 f2fs_cops[fi->i_compress_algorithm];
1029 if (cluster_may_compress(cc)) {
1030 err = f2fs_compress_pages(cc);
1031 if (err == -EAGAIN) {
1034 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1038 err = f2fs_write_compressed_pages(cc, submitted,
1040 cops->destroy_compress_ctx(cc);
1043 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1046 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1048 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1049 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1051 f2fs_destroy_compress_ctx(cc);
1055 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1057 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1058 struct decompress_io_ctx *dic;
1059 pgoff_t start_idx = start_idx_of_cluster(cc);
1062 dic = f2fs_kzalloc(sbi, sizeof(struct decompress_io_ctx), GFP_NOFS);
1064 return ERR_PTR(-ENOMEM);
1066 dic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
1067 cc->log_cluster_size, GFP_NOFS);
1070 return ERR_PTR(-ENOMEM);
1073 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1074 dic->inode = cc->inode;
1075 refcount_set(&dic->ref, 1);
1076 dic->cluster_idx = cc->cluster_idx;
1077 dic->cluster_size = cc->cluster_size;
1078 dic->log_cluster_size = cc->log_cluster_size;
1079 dic->nr_cpages = cc->nr_cpages;
1080 dic->failed = false;
1082 for (i = 0; i < dic->cluster_size; i++)
1083 dic->rpages[i] = cc->rpages[i];
1084 dic->nr_rpages = cc->cluster_size;
1086 dic->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
1087 dic->nr_cpages, GFP_NOFS);
1091 for (i = 0; i < dic->nr_cpages; i++) {
1094 page = f2fs_grab_page();
1098 f2fs_set_compressed_page(page, cc->inode,
1100 dic, i ? &dic->ref : NULL);
1101 dic->cpages[i] = page;
1104 dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
1105 dic->cluster_size, GFP_NOFS);
1109 for (i = 0; i < dic->cluster_size; i++) {
1113 dic->tpages[i] = f2fs_grab_page();
1114 if (!dic->tpages[i])
1118 for (i = 0; i < dic->cluster_size; i++) {
1121 dic->tpages[i] = cc->rpages[i];
1128 return ERR_PTR(-ENOMEM);
1131 void f2fs_free_dic(struct decompress_io_ctx *dic)
1136 for (i = 0; i < dic->cluster_size; i++) {
1139 unlock_page(dic->tpages[i]);
1140 put_page(dic->tpages[i]);
1146 for (i = 0; i < dic->nr_cpages; i++) {
1147 if (!dic->cpages[i])
1149 f2fs_put_compressed_page(dic->cpages[i]);
1158 void f2fs_decompress_end_io(struct page **rpages,
1159 unsigned int cluster_size, bool err, bool verity)
1163 for (i = 0; i < cluster_size; i++) {
1164 struct page *rpage = rpages[i];
1169 if (err || PageError(rpage)) {
1170 ClearPageUptodate(rpage);
1171 ClearPageError(rpage);
1173 if (!verity || fsverity_verify_page(rpage))
1174 SetPageUptodate(rpage);
1176 SetPageError(rpage);