overlayfs: Implement splice-read
[linux-block.git] / fs / f2fs / compress.c
CommitLineData
4c8ff709
CY
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * f2fs compress support
4 *
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6 */
7
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
e41d12f5 10#include <linux/moduleparam.h>
4c8ff709
CY
11#include <linux/writeback.h>
12#include <linux/backing-dev.h>
13#include <linux/lzo.h>
14#include <linux/lz4.h>
50cfa66f 15#include <linux/zstd.h>
6ce19aff 16#include <linux/pagevec.h>
4c8ff709
CY
17
18#include "f2fs.h"
19#include "node.h"
6ce19aff 20#include "segment.h"
4c8ff709
CY
21#include <trace/events/f2fs.h>
22
c68d6c88
CY
23static struct kmem_cache *cic_entry_slab;
24static struct kmem_cache *dic_entry_slab;
25
31083031
CY
26static void *page_array_alloc(struct inode *inode, int nr)
27{
28 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29 unsigned int size = sizeof(struct page *) * nr;
30
31 if (likely(size <= sbi->page_array_slab_size))
32410577
CY
32 return f2fs_kmem_cache_alloc(sbi->page_array_slab,
33 GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
31083031
CY
34 return f2fs_kzalloc(sbi, size, GFP_NOFS);
35}
36
37static void page_array_free(struct inode *inode, void *pages, int nr)
38{
39 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
40 unsigned int size = sizeof(struct page *) * nr;
41
42 if (!pages)
43 return;
44
45 if (likely(size <= sbi->page_array_slab_size))
46 kmem_cache_free(sbi->page_array_slab, pages);
47 else
48 kfree(pages);
49}
50
4c8ff709
CY
51struct f2fs_compress_ops {
52 int (*init_compress_ctx)(struct compress_ctx *cc);
53 void (*destroy_compress_ctx)(struct compress_ctx *cc);
54 int (*compress_pages)(struct compress_ctx *cc);
23b1faaa
CY
55 int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
56 void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
4c8ff709
CY
57 int (*decompress_pages)(struct decompress_io_ctx *dic);
58};
59
60static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
61{
62 return index & (cc->cluster_size - 1);
63}
64
65static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
66{
67 return index >> cc->log_cluster_size;
68}
69
70static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
71{
72 return cc->cluster_idx << cc->log_cluster_size;
73}
74
75bool f2fs_is_compressed_page(struct page *page)
76{
77 if (!PagePrivate(page))
78 return false;
79 if (!page_private(page))
80 return false;
b763f3be 81 if (page_private_nonpointer(page))
4c8ff709 82 return false;
29b993c7 83
4c8ff709
CY
84 f2fs_bug_on(F2FS_M_SB(page->mapping),
85 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
86 return true;
87}
88
89static void f2fs_set_compressed_page(struct page *page,
887347a0 90 struct inode *inode, pgoff_t index, void *data)
4c8ff709 91{
b763f3be 92 attach_page_private(page, (void *)data);
4c8ff709
CY
93
94 /* i_crypto_info and iv index */
95 page->index = index;
96 page->mapping = inode->i_mapping;
4c8ff709
CY
97}
98
4c8ff709
CY
99static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
100{
101 int i;
102
103 for (i = 0; i < len; i++) {
104 if (!cc->rpages[i])
105 continue;
106 if (unlock)
107 unlock_page(cc->rpages[i]);
108 else
109 put_page(cc->rpages[i]);
110 }
111}
112
113static void f2fs_put_rpages(struct compress_ctx *cc)
114{
115 f2fs_drop_rpages(cc, cc->cluster_size, false);
116}
117
118static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
119{
120 f2fs_drop_rpages(cc, len, true);
121}
122
4c8ff709
CY
123static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
124 struct writeback_control *wbc, bool redirty, int unlock)
125{
126 unsigned int i;
127
128 for (i = 0; i < cc->cluster_size; i++) {
129 if (!cc->rpages[i])
130 continue;
131 if (redirty)
132 redirty_page_for_writepage(wbc, cc->rpages[i]);
133 f2fs_put_page(cc->rpages[i], unlock);
134 }
135}
136
137struct page *f2fs_compress_control_page(struct page *page)
138{
139 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
140}
141
142int f2fs_init_compress_ctx(struct compress_ctx *cc)
143{
adfc6943 144 if (cc->rpages)
4c8ff709
CY
145 return 0;
146
31083031 147 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
4c8ff709
CY
148 return cc->rpages ? 0 : -ENOMEM;
149}
150
8bfbfb0d 151void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
4c8ff709 152{
31083031 153 page_array_free(cc->inode, cc->rpages, cc->cluster_size);
4c8ff709
CY
154 cc->rpages = NULL;
155 cc->nr_rpages = 0;
156 cc->nr_cpages = 0;
3271d7eb 157 cc->valid_nr_cpages = 0;
8bfbfb0d
CY
158 if (!reuse)
159 cc->cluster_idx = NULL_CLUSTER;
4c8ff709
CY
160}
161
162void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
163{
164 unsigned int cluster_ofs;
165
166 if (!f2fs_cluster_can_merge_page(cc, page->index))
167 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
168
169 cluster_ofs = offset_in_cluster(cc, page->index);
170 cc->rpages[cluster_ofs] = page;
171 cc->nr_rpages++;
172 cc->cluster_idx = cluster_idx(cc, page->index);
173}
174
175#ifdef CONFIG_F2FS_FS_LZO
176static int lzo_init_compress_ctx(struct compress_ctx *cc)
177{
178 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
179 LZO1X_MEM_COMPRESS, GFP_NOFS);
180 if (!cc->private)
181 return -ENOMEM;
182
183 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
184 return 0;
185}
186
187static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
188{
189 kvfree(cc->private);
190 cc->private = NULL;
191}
192
193static int lzo_compress_pages(struct compress_ctx *cc)
194{
195 int ret;
196
197 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
198 &cc->clen, cc->private);
199 if (ret != LZO_E_OK) {
200 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
201 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
202 return -EIO;
203 }
204 return 0;
205}
206
207static int lzo_decompress_pages(struct decompress_io_ctx *dic)
208{
209 int ret;
210
211 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
212 dic->rbuf, &dic->rlen);
213 if (ret != LZO_E_OK) {
214 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
215 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
216 return -EIO;
217 }
218
219 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
220 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
221 "expected:%lu\n", KERN_ERR,
222 F2FS_I_SB(dic->inode)->sb->s_id,
223 dic->rlen,
224 PAGE_SIZE << dic->log_cluster_size);
225 return -EIO;
226 }
227 return 0;
228}
229
230static const struct f2fs_compress_ops f2fs_lzo_ops = {
231 .init_compress_ctx = lzo_init_compress_ctx,
232 .destroy_compress_ctx = lzo_destroy_compress_ctx,
233 .compress_pages = lzo_compress_pages,
234 .decompress_pages = lzo_decompress_pages,
235};
236#endif
237
238#ifdef CONFIG_F2FS_FS_LZ4
239static int lz4_init_compress_ctx(struct compress_ctx *cc)
240{
3fde13f8
CY
241 unsigned int size = LZ4_MEM_COMPRESS;
242
243#ifdef CONFIG_F2FS_FS_LZ4HC
b90e5086 244 if (F2FS_I(cc->inode)->i_compress_level)
3fde13f8
CY
245 size = LZ4HC_MEM_COMPRESS;
246#endif
247
248 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
4c8ff709
CY
249 if (!cc->private)
250 return -ENOMEM;
251
f6644143
CY
252 /*
253 * we do not change cc->clen to LZ4_compressBound(inputsize) to
254 * adapt worst compress case, because lz4 compressor can handle
255 * output budget properly.
256 */
257 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
4c8ff709
CY
258 return 0;
259}
260
261static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
262{
263 kvfree(cc->private);
264 cc->private = NULL;
265}
266
3094e557 267static int lz4_compress_pages(struct compress_ctx *cc)
3fde13f8 268{
3094e557 269 int len = -EINVAL;
b90e5086 270 unsigned char level = F2FS_I(cc->inode)->i_compress_level;
3fde13f8 271
3094e557 272 if (!level)
3fde13f8
CY
273 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
274 cc->clen, cc->private);
3fde13f8 275#ifdef CONFIG_F2FS_FS_LZ4HC
3094e557
YL
276 else
277 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
278 cc->clen, level, cc->private);
3fde13f8 279#endif
3094e557
YL
280 if (len < 0)
281 return len;
f6644143
CY
282 if (!len)
283 return -EAGAIN;
284
4c8ff709
CY
285 cc->clen = len;
286 return 0;
287}
288
289static int lz4_decompress_pages(struct decompress_io_ctx *dic)
290{
291 int ret;
292
293 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
294 dic->clen, dic->rlen);
295 if (ret < 0) {
296 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
297 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
298 return -EIO;
299 }
300
301 if (ret != PAGE_SIZE << dic->log_cluster_size) {
d284af43 302 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, "
4c8ff709 303 "expected:%lu\n", KERN_ERR,
d284af43 304 F2FS_I_SB(dic->inode)->sb->s_id, ret,
4c8ff709
CY
305 PAGE_SIZE << dic->log_cluster_size);
306 return -EIO;
307 }
308 return 0;
309}
310
311static const struct f2fs_compress_ops f2fs_lz4_ops = {
312 .init_compress_ctx = lz4_init_compress_ctx,
313 .destroy_compress_ctx = lz4_destroy_compress_ctx,
314 .compress_pages = lz4_compress_pages,
315 .decompress_pages = lz4_decompress_pages,
316};
317#endif
318
50cfa66f
CY
319#ifdef CONFIG_F2FS_FS_ZSTD
320#define F2FS_ZSTD_DEFAULT_CLEVEL 1
321
322static int zstd_init_compress_ctx(struct compress_ctx *cc)
323{
cf30f6a5
NT
324 zstd_parameters params;
325 zstd_cstream *stream;
50cfa66f
CY
326 void *workspace;
327 unsigned int workspace_size;
b90e5086 328 unsigned char level = F2FS_I(cc->inode)->i_compress_level;
3fde13f8
CY
329
330 if (!level)
331 level = F2FS_ZSTD_DEFAULT_CLEVEL;
50cfa66f 332
4ff23a65 333 params = zstd_get_params(level, cc->rlen);
cf30f6a5 334 workspace_size = zstd_cstream_workspace_bound(&params.cParams);
50cfa66f
CY
335
336 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
337 workspace_size, GFP_NOFS);
338 if (!workspace)
339 return -ENOMEM;
340
cf30f6a5 341 stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
50cfa66f 342 if (!stream) {
cf30f6a5 343 printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n",
50cfa66f
CY
344 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
345 __func__);
346 kvfree(workspace);
347 return -EIO;
348 }
349
350 cc->private = workspace;
351 cc->private2 = stream;
352
353 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
354 return 0;
355}
356
357static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
358{
359 kvfree(cc->private);
360 cc->private = NULL;
361 cc->private2 = NULL;
362}
363
364static int zstd_compress_pages(struct compress_ctx *cc)
365{
cf30f6a5
NT
366 zstd_cstream *stream = cc->private2;
367 zstd_in_buffer inbuf;
368 zstd_out_buffer outbuf;
50cfa66f
CY
369 int src_size = cc->rlen;
370 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
371 int ret;
372
373 inbuf.pos = 0;
374 inbuf.src = cc->rbuf;
375 inbuf.size = src_size;
376
377 outbuf.pos = 0;
378 outbuf.dst = cc->cbuf->cdata;
379 outbuf.size = dst_size;
380
cf30f6a5
NT
381 ret = zstd_compress_stream(stream, &outbuf, &inbuf);
382 if (zstd_is_error(ret)) {
383 printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n",
50cfa66f 384 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
cf30f6a5 385 __func__, zstd_get_error_code(ret));
50cfa66f
CY
386 return -EIO;
387 }
388
cf30f6a5
NT
389 ret = zstd_end_stream(stream, &outbuf);
390 if (zstd_is_error(ret)) {
391 printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n",
50cfa66f 392 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
cf30f6a5 393 __func__, zstd_get_error_code(ret));
50cfa66f
CY
394 return -EIO;
395 }
396
1454c978
CY
397 /*
398 * there is compressed data remained in intermediate buffer due to
399 * no more space in cbuf.cdata
400 */
401 if (ret)
402 return -EAGAIN;
403
50cfa66f
CY
404 cc->clen = outbuf.pos;
405 return 0;
406}
407
408static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
409{
cf30f6a5 410 zstd_dstream *stream;
50cfa66f
CY
411 void *workspace;
412 unsigned int workspace_size;
0e2b7385
CY
413 unsigned int max_window_size =
414 MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
50cfa66f 415
cf30f6a5 416 workspace_size = zstd_dstream_workspace_bound(max_window_size);
50cfa66f
CY
417
418 workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
419 workspace_size, GFP_NOFS);
420 if (!workspace)
421 return -ENOMEM;
422
cf30f6a5 423 stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
50cfa66f 424 if (!stream) {
cf30f6a5 425 printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n",
50cfa66f
CY
426 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
427 __func__);
428 kvfree(workspace);
429 return -EIO;
430 }
431
432 dic->private = workspace;
433 dic->private2 = stream;
434
435 return 0;
436}
437
438static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
439{
440 kvfree(dic->private);
441 dic->private = NULL;
442 dic->private2 = NULL;
443}
444
445static int zstd_decompress_pages(struct decompress_io_ctx *dic)
446{
cf30f6a5
NT
447 zstd_dstream *stream = dic->private2;
448 zstd_in_buffer inbuf;
449 zstd_out_buffer outbuf;
50cfa66f
CY
450 int ret;
451
452 inbuf.pos = 0;
453 inbuf.src = dic->cbuf->cdata;
454 inbuf.size = dic->clen;
455
456 outbuf.pos = 0;
457 outbuf.dst = dic->rbuf;
458 outbuf.size = dic->rlen;
459
cf30f6a5
NT
460 ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
461 if (zstd_is_error(ret)) {
462 printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n",
50cfa66f 463 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
cf30f6a5 464 __func__, zstd_get_error_code(ret));
50cfa66f
CY
465 return -EIO;
466 }
467
468 if (dic->rlen != outbuf.pos) {
469 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
470 "expected:%lu\n", KERN_ERR,
471 F2FS_I_SB(dic->inode)->sb->s_id,
472 __func__, dic->rlen,
473 PAGE_SIZE << dic->log_cluster_size);
474 return -EIO;
475 }
476
477 return 0;
478}
479
480static const struct f2fs_compress_ops f2fs_zstd_ops = {
481 .init_compress_ctx = zstd_init_compress_ctx,
482 .destroy_compress_ctx = zstd_destroy_compress_ctx,
483 .compress_pages = zstd_compress_pages,
484 .init_decompress_ctx = zstd_init_decompress_ctx,
485 .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
486 .decompress_pages = zstd_decompress_pages,
487};
488#endif
489
6d92b201
CY
490#ifdef CONFIG_F2FS_FS_LZO
491#ifdef CONFIG_F2FS_FS_LZORLE
492static int lzorle_compress_pages(struct compress_ctx *cc)
493{
494 int ret;
495
496 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
497 &cc->clen, cc->private);
498 if (ret != LZO_E_OK) {
499 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
500 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
501 return -EIO;
502 }
503 return 0;
504}
505
506static const struct f2fs_compress_ops f2fs_lzorle_ops = {
507 .init_compress_ctx = lzo_init_compress_ctx,
508 .destroy_compress_ctx = lzo_destroy_compress_ctx,
509 .compress_pages = lzorle_compress_pages,
510 .decompress_pages = lzo_decompress_pages,
511};
512#endif
513#endif
514
4c8ff709
CY
515static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
516#ifdef CONFIG_F2FS_FS_LZO
517 &f2fs_lzo_ops,
518#else
519 NULL,
520#endif
521#ifdef CONFIG_F2FS_FS_LZ4
522 &f2fs_lz4_ops,
523#else
524 NULL,
525#endif
50cfa66f
CY
526#ifdef CONFIG_F2FS_FS_ZSTD
527 &f2fs_zstd_ops,
528#else
529 NULL,
530#endif
6d92b201
CY
531#if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
532 &f2fs_lzorle_ops,
533#else
534 NULL,
535#endif
4c8ff709
CY
536};
537
538bool f2fs_is_compress_backend_ready(struct inode *inode)
539{
540 if (!f2fs_compressed_file(inode))
541 return true;
542 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
543}
544
99bbe307 545static mempool_t *compress_page_pool;
5e6bbde9
CY
546static int num_compress_pages = 512;
547module_param(num_compress_pages, uint, 0444);
548MODULE_PARM_DESC(num_compress_pages,
549 "Number of intermediate compress pages to preallocate");
550
a1357a91 551int __init f2fs_init_compress_mempool(void)
5e6bbde9
CY
552{
553 compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
870af777 554 return compress_page_pool ? 0 : -ENOMEM;
5e6bbde9
CY
555}
556
557void f2fs_destroy_compress_mempool(void)
558{
559 mempool_destroy(compress_page_pool);
560}
561
562static struct page *f2fs_compress_alloc_page(void)
4c8ff709
CY
563{
564 struct page *page;
565
5e6bbde9 566 page = mempool_alloc(compress_page_pool, GFP_NOFS);
4c8ff709 567 lock_page(page);
5e6bbde9 568
4c8ff709
CY
569 return page;
570}
571
5e6bbde9
CY
572static void f2fs_compress_free_page(struct page *page)
573{
574 if (!page)
575 return;
b763f3be 576 detach_page_private(page);
5e6bbde9
CY
577 page->mapping = NULL;
578 unlock_page(page);
579 mempool_free(page, compress_page_pool);
580}
581
6fcaebac
DJ
582#define MAX_VMAP_RETRIES 3
583
584static void *f2fs_vmap(struct page **pages, unsigned int count)
585{
586 int i;
587 void *buf = NULL;
588
589 for (i = 0; i < MAX_VMAP_RETRIES; i++) {
590 buf = vm_map_ram(pages, count, -1);
591 if (buf)
592 break;
593 vm_unmap_aliases();
594 }
595 return buf;
596}
597
4c8ff709
CY
598static int f2fs_compress_pages(struct compress_ctx *cc)
599{
4c8ff709
CY
600 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
601 const struct f2fs_compress_ops *cops =
602 f2fs_cops[fi->i_compress_algorithm];
31083031 603 unsigned int max_len, new_nr_cpages;
b28f047b 604 u32 chksum = 0;
4c8ff709
CY
605 int i, ret;
606
607 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
608 cc->cluster_size, fi->i_compress_algorithm);
609
23b1faaa
CY
610 if (cops->init_compress_ctx) {
611 ret = cops->init_compress_ctx(cc);
612 if (ret)
613 goto out;
614 }
4c8ff709
CY
615
616 max_len = COMPRESS_HEADER_SIZE + cc->clen;
617 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
3271d7eb 618 cc->valid_nr_cpages = cc->nr_cpages;
4c8ff709 619
31083031 620 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
4c8ff709
CY
621 if (!cc->cpages) {
622 ret = -ENOMEM;
623 goto destroy_compress_ctx;
624 }
625
626 for (i = 0; i < cc->nr_cpages; i++) {
5e6bbde9 627 cc->cpages[i] = f2fs_compress_alloc_page();
4c8ff709
CY
628 if (!cc->cpages[i]) {
629 ret = -ENOMEM;
630 goto out_free_cpages;
631 }
632 }
633
6fcaebac 634 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
4c8ff709
CY
635 if (!cc->rbuf) {
636 ret = -ENOMEM;
637 goto out_free_cpages;
638 }
639
6fcaebac 640 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
4c8ff709
CY
641 if (!cc->cbuf) {
642 ret = -ENOMEM;
643 goto out_vunmap_rbuf;
644 }
645
646 ret = cops->compress_pages(cc);
647 if (ret)
648 goto out_vunmap_cbuf;
649
650 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
651
652 if (cc->clen > max_len) {
653 ret = -EAGAIN;
654 goto out_vunmap_cbuf;
655 }
656
657 cc->cbuf->clen = cpu_to_le32(cc->clen);
4c8ff709 658
447286eb 659 if (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))
b28f047b
CY
660 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
661 cc->cbuf->cdata, cc->clen);
662 cc->cbuf->chksum = cpu_to_le32(chksum);
663
4c8ff709
CY
664 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
665 cc->cbuf->reserved[i] = cpu_to_le32(0);
666
31083031
CY
667 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
668
7fa6d598
EB
669 /* zero out any unused part of the last page */
670 memset(&cc->cbuf->cdata[cc->clen], 0,
31083031
CY
671 (new_nr_cpages * PAGE_SIZE) -
672 (cc->clen + COMPRESS_HEADER_SIZE));
7fa6d598 673
6fcaebac
DJ
674 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
675 vm_unmap_ram(cc->rbuf, cc->cluster_size);
4c8ff709 676
ebaaec35 677 for (i = new_nr_cpages; i < cc->nr_cpages; i++) {
5e6bbde9 678 f2fs_compress_free_page(cc->cpages[i]);
4c8ff709
CY
679 cc->cpages[i] = NULL;
680 }
681
23b1faaa
CY
682 if (cops->destroy_compress_ctx)
683 cops->destroy_compress_ctx(cc);
09ff4801 684
3271d7eb 685 cc->valid_nr_cpages = new_nr_cpages;
4c8ff709
CY
686
687 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
688 cc->clen, ret);
689 return 0;
690
691out_vunmap_cbuf:
6fcaebac 692 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
4c8ff709 693out_vunmap_rbuf:
6fcaebac 694 vm_unmap_ram(cc->rbuf, cc->cluster_size);
4c8ff709
CY
695out_free_cpages:
696 for (i = 0; i < cc->nr_cpages; i++) {
697 if (cc->cpages[i])
5e6bbde9 698 f2fs_compress_free_page(cc->cpages[i]);
4c8ff709 699 }
31083031 700 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
4c8ff709
CY
701 cc->cpages = NULL;
702destroy_compress_ctx:
23b1faaa
CY
703 if (cops->destroy_compress_ctx)
704 cops->destroy_compress_ctx(cc);
4c8ff709
CY
705out:
706 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
707 cc->clen, ret);
708 return ret;
709}
710
bff139b4
DJ
711static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
712 bool pre_alloc);
713static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
714 bool bypass_destroy_callback, bool pre_alloc);
715
716void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
4c8ff709 717{
4c8ff709 718 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
7f59b277 719 struct f2fs_inode_info *fi = F2FS_I(dic->inode);
4c8ff709
CY
720 const struct f2fs_compress_ops *cops =
721 f2fs_cops[fi->i_compress_algorithm];
bff139b4 722 bool bypass_callback = false;
4c8ff709
CY
723 int ret;
724
4c8ff709
CY
725 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
726 dic->cluster_size, fi->i_compress_algorithm);
727
4c8ff709
CY
728 if (dic->failed) {
729 ret = -EIO;
7f59b277 730 goto out_end_io;
4c8ff709
CY
731 }
732
bff139b4
DJ
733 ret = f2fs_prepare_decomp_mem(dic, false);
734 if (ret) {
735 bypass_callback = true;
736 goto out_release;
4c8ff709
CY
737 }
738
739 dic->clen = le32_to_cpu(dic->cbuf->clen);
740 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
741
742 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
743 ret = -EFSCORRUPTED;
1aa161e4
JK
744
745 /* Avoid f2fs_commit_super in irq context */
746 if (in_task)
747 f2fs_save_errors(sbi, ERROR_FAIL_DECOMPRESSION);
748 else
749 f2fs_handle_error(sbi, ERROR_FAIL_DECOMPRESSION);
bff139b4 750 goto out_release;
4c8ff709
CY
751 }
752
753 ret = cops->decompress_pages(dic);
754
447286eb 755 if (!ret && (fi->i_compress_flag & BIT(COMPRESS_CHKSUM))) {
b28f047b
CY
756 u32 provided = le32_to_cpu(dic->cbuf->chksum);
757 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
758
759 if (provided != calculated) {
760 if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
761 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
762 printk_ratelimited(
763 "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
764 KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
765 provided, calculated);
766 }
767 set_sbi_flag(sbi, SBI_NEED_FSCK);
b28f047b
CY
768 }
769 }
770
bff139b4
DJ
771out_release:
772 f2fs_release_decomp_mem(dic, bypass_callback, false);
773
7f59b277 774out_end_io:
4c8ff709
CY
775 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
776 dic->clen, ret);
bff139b4 777 f2fs_decompress_end_io(dic, ret, in_task);
7f59b277
EB
778}
779
780/*
781 * This is called when a page of a compressed cluster has been read from disk
782 * (or failed to be read from disk). It checks whether this page was the last
783 * page being waited on in the cluster, and if so, it decompresses the cluster
784 * (or in the case of a failure, cleans up without actually decompressing).
785 */
6ce19aff 786void f2fs_end_read_compressed_page(struct page *page, bool failed,
bff139b4 787 block_t blkaddr, bool in_task)
7f59b277
EB
788{
789 struct decompress_io_ctx *dic =
790 (struct decompress_io_ctx *)page_private(page);
791 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
792
793 dec_page_count(sbi, F2FS_RD_DATA);
794
795 if (failed)
796 WRITE_ONCE(dic->failed, true);
bff139b4 797 else if (blkaddr && in_task)
6ce19aff
CY
798 f2fs_cache_compressed_page(sbi, page,
799 dic->inode->i_ino, blkaddr);
7f59b277
EB
800
801 if (atomic_dec_and_test(&dic->remaining_pages))
bff139b4 802 f2fs_decompress_cluster(dic, in_task);
4c8ff709
CY
803}
804
805static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
806{
807 if (cc->cluster_idx == NULL_CLUSTER)
808 return true;
809 return cc->cluster_idx == cluster_idx(cc, index);
810}
811
812bool f2fs_cluster_is_empty(struct compress_ctx *cc)
813{
814 return cc->nr_rpages == 0;
815}
816
817static bool f2fs_cluster_is_full(struct compress_ctx *cc)
818{
819 return cc->cluster_size == cc->nr_rpages;
820}
821
822bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
823{
824 if (f2fs_cluster_is_empty(cc))
825 return true;
826 return is_page_in_cluster(cc, index);
827}
828
01fc4b9a 829bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages,
4f8219f8 830 int index, int nr_pages, bool uptodate)
b368cc5e 831{
01fc4b9a 832 unsigned long pgidx = pages[index]->index;
4f8219f8 833 int i = uptodate ? 0 : 1;
b368cc5e 834
4f8219f8
FC
835 /*
836 * when uptodate set to true, try to check all pages in cluster is
837 * uptodate or not.
838 */
839 if (uptodate && (pgidx % cc->cluster_size))
b368cc5e
FC
840 return false;
841
4f8219f8
FC
842 if (nr_pages - index < cc->cluster_size)
843 return false;
b368cc5e 844
4f8219f8 845 for (; i < cc->cluster_size; i++) {
01fc4b9a 846 if (pages[index + i]->index != pgidx + i)
b368cc5e 847 return false;
01fc4b9a 848 if (uptodate && !PageUptodate(pages[index + i]))
b368cc5e
FC
849 return false;
850 }
851
852 return true;
853}
854
5db479f0 855static bool cluster_has_invalid_data(struct compress_ctx *cc)
4c8ff709 856{
4c8ff709
CY
857 loff_t i_size = i_size_read(cc->inode);
858 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
859 int i;
860
861 for (i = 0; i < cc->cluster_size; i++) {
862 struct page *page = cc->rpages[i];
863
8af85f71 864 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
4c8ff709
CY
865
866 /* beyond EOF */
867 if (page->index >= nr_pages)
5db479f0 868 return true;
4c8ff709 869 }
5db479f0 870 return false;
4c8ff709
CY
871}
872
bbe1da7e
CY
873bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
874{
875 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
876 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
877 bool compressed = dn->data_blkaddr == COMPRESS_ADDR;
878 int cluster_end = 0;
879 int i;
880 char *reason = "";
881
882 if (!compressed)
883 return false;
884
885 /* [..., COMPR_ADDR, ...] */
886 if (dn->ofs_in_node % cluster_size) {
887 reason = "[*|C|*|*]";
888 goto out;
889 }
890
891 for (i = 1; i < cluster_size; i++) {
892 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
893 dn->ofs_in_node + i);
894
895 /* [COMPR_ADDR, ..., COMPR_ADDR] */
896 if (blkaddr == COMPRESS_ADDR) {
897 reason = "[C|*|C|*]";
898 goto out;
899 }
9df6d6f9
ZQ
900 if (!__is_valid_data_blkaddr(blkaddr)) {
901 if (!cluster_end)
902 cluster_end = i;
903 continue;
904 }
905 /* [COMPR_ADDR, NULL_ADDR or NEW_ADDR, valid_blkaddr] */
906 if (cluster_end) {
907 reason = "[C|N|N|V]";
908 goto out;
bbe1da7e
CY
909 }
910 }
911 return false;
912out:
913 f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
914 dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
915 set_sbi_flag(sbi, SBI_NEED_FSCK);
916 return true;
917}
918
91f0fb69
CY
919static int __f2fs_cluster_blocks(struct inode *inode,
920 unsigned int cluster_idx, bool compr)
4c8ff709
CY
921{
922 struct dnode_of_data dn;
91f0fb69
CY
923 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
924 unsigned int start_idx = cluster_idx <<
925 F2FS_I(inode)->i_log_cluster_size;
4c8ff709
CY
926 int ret;
927
91f0fb69
CY
928 set_new_dnode(&dn, inode, NULL, NULL, 0);
929 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
4c8ff709
CY
930 if (ret) {
931 if (ret == -ENOENT)
932 ret = 0;
933 goto fail;
934 }
935
bbe1da7e
CY
936 if (f2fs_sanity_check_cluster(&dn)) {
937 ret = -EFSCORRUPTED;
95fa90c9 938 f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_CLUSTER);
bbe1da7e
CY
939 goto fail;
940 }
941
4c8ff709
CY
942 if (dn.data_blkaddr == COMPRESS_ADDR) {
943 int i;
944
945 ret = 1;
91f0fb69 946 for (i = 1; i < cluster_size; i++) {
4c8ff709
CY
947 block_t blkaddr;
948
a2ced1ce 949 blkaddr = data_blkaddr(dn.inode,
4c8ff709 950 dn.node_page, dn.ofs_in_node + i);
1a67cbe1
CY
951 if (compr) {
952 if (__is_valid_data_blkaddr(blkaddr))
953 ret++;
954 } else {
955 if (blkaddr != NULL_ADDR)
956 ret++;
957 }
4c8ff709 958 }
8f1d4983
CY
959
960 f2fs_bug_on(F2FS_I_SB(inode),
c6140415
JK
961 !compr && ret != cluster_size &&
962 !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
4c8ff709
CY
963 }
964fail:
965 f2fs_put_dnode(&dn);
966 return ret;
967}
968
1a67cbe1
CY
969/* return # of compressed blocks in compressed cluster */
970static int f2fs_compressed_blocks(struct compress_ctx *cc)
971{
91f0fb69 972 return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
1a67cbe1
CY
973}
974
975/* return # of valid blocks in compressed cluster */
4c8ff709
CY
976int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
977{
91f0fb69
CY
978 return __f2fs_cluster_blocks(inode,
979 index >> F2FS_I(inode)->i_log_cluster_size,
980 false);
4c8ff709
CY
981}
982
983static bool cluster_may_compress(struct compress_ctx *cc)
984{
602a16d5 985 if (!f2fs_need_compress_data(cc->inode))
4c8ff709
CY
986 return false;
987 if (f2fs_is_atomic_file(cc->inode))
988 return false;
4c8ff709
CY
989 if (!f2fs_cluster_is_full(cc))
990 return false;
dc35d73a
CY
991 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
992 return false;
5db479f0 993 return !cluster_has_invalid_data(cc);
4c8ff709
CY
994}
995
996static void set_cluster_writeback(struct compress_ctx *cc)
997{
998 int i;
999
1000 for (i = 0; i < cc->cluster_size; i++) {
1001 if (cc->rpages[i])
1002 set_page_writeback(cc->rpages[i]);
1003 }
1004}
1005
1006static void set_cluster_dirty(struct compress_ctx *cc)
1007{
1008 int i;
1009
1010 for (i = 0; i < cc->cluster_size; i++)
1011 if (cc->rpages[i])
1012 set_page_dirty(cc->rpages[i]);
1013}
1014
1015static int prepare_compress_overwrite(struct compress_ctx *cc,
1016 struct page **pagep, pgoff_t index, void **fsdata)
1017{
1018 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1019 struct address_space *mapping = cc->inode->i_mapping;
1020 struct page *page;
4c8ff709
CY
1021 sector_t last_block_in_bio;
1022 unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1023 pgoff_t start_idx = start_idx_of_cluster(cc);
1024 int i, ret;
4c8ff709
CY
1025
1026retry:
91f0fb69 1027 ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
4c8ff709
CY
1028 if (ret <= 0)
1029 return ret;
1030
4c8ff709
CY
1031 ret = f2fs_init_compress_ctx(cc);
1032 if (ret)
1033 return ret;
1034
1035 /* keep page reference to avoid page reclaim */
1036 for (i = 0; i < cc->cluster_size; i++) {
1037 page = f2fs_pagecache_get_page(mapping, start_idx + i,
1038 fgp_flag, GFP_NOFS);
1039 if (!page) {
1040 ret = -ENOMEM;
1041 goto unlock_pages;
1042 }
1043
1044 if (PageUptodate(page))
a949dc5f 1045 f2fs_put_page(page, 1);
4c8ff709
CY
1046 else
1047 f2fs_compress_ctx_add_page(cc, page);
1048 }
1049
1050 if (!f2fs_cluster_is_empty(cc)) {
1051 struct bio *bio = NULL;
1052
1053 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
0683728a 1054 &last_block_in_bio, false, true);
a949dc5f 1055 f2fs_put_rpages(cc);
8bfbfb0d 1056 f2fs_destroy_compress_ctx(cc, true);
4c8ff709 1057 if (ret)
a949dc5f 1058 goto out;
4c8ff709 1059 if (bio)
bc29835a 1060 f2fs_submit_read_bio(sbi, bio, DATA);
4c8ff709
CY
1061
1062 ret = f2fs_init_compress_ctx(cc);
1063 if (ret)
a949dc5f 1064 goto out;
4c8ff709
CY
1065 }
1066
1067 for (i = 0; i < cc->cluster_size; i++) {
1068 f2fs_bug_on(sbi, cc->rpages[i]);
1069
1070 page = find_lock_page(mapping, start_idx + i);
a949dc5f
CY
1071 if (!page) {
1072 /* page can be truncated */
1073 goto release_and_retry;
1074 }
4c8ff709
CY
1075
1076 f2fs_wait_on_page_writeback(page, DATA, true, true);
4c8ff709 1077 f2fs_compress_ctx_add_page(cc, page);
4c8ff709
CY
1078
1079 if (!PageUptodate(page)) {
a949dc5f
CY
1080release_and_retry:
1081 f2fs_put_rpages(cc);
4c8ff709 1082 f2fs_unlock_rpages(cc, i + 1);
8bfbfb0d 1083 f2fs_destroy_compress_ctx(cc, true);
4c8ff709
CY
1084 goto retry;
1085 }
1086 }
1087
4c8ff709
CY
1088 if (likely(!ret)) {
1089 *fsdata = cc->rpages;
1090 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1091 return cc->cluster_size;
1092 }
1093
1094unlock_pages:
a949dc5f 1095 f2fs_put_rpages(cc);
4c8ff709 1096 f2fs_unlock_rpages(cc, i);
8bfbfb0d 1097 f2fs_destroy_compress_ctx(cc, true);
a949dc5f 1098out:
4c8ff709
CY
1099 return ret;
1100}
1101
1102int f2fs_prepare_compress_overwrite(struct inode *inode,
1103 struct page **pagep, pgoff_t index, void **fsdata)
1104{
1105 struct compress_ctx cc = {
1106 .inode = inode,
1107 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1108 .cluster_size = F2FS_I(inode)->i_cluster_size,
1109 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1110 .rpages = NULL,
1111 .nr_rpages = 0,
1112 };
1113
1114 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1115}
1116
1117bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1118 pgoff_t index, unsigned copied)
1119
1120{
1121 struct compress_ctx cc = {
31083031 1122 .inode = inode,
4c8ff709
CY
1123 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1124 .cluster_size = F2FS_I(inode)->i_cluster_size,
1125 .rpages = fsdata,
1126 };
1127 bool first_index = (index == cc.rpages[0]->index);
1128
1129 if (copied)
1130 set_cluster_dirty(&cc);
1131
1132 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
8bfbfb0d 1133 f2fs_destroy_compress_ctx(&cc, false);
4c8ff709
CY
1134
1135 return first_index;
1136}
1137
3265d3db
CY
1138int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1139{
1140 void *fsdata = NULL;
1141 struct page *pagep;
1142 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1143 pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1144 log_cluster_size;
1145 int err;
1146
1147 err = f2fs_is_compressed_cluster(inode, start_idx);
1148 if (err < 0)
1149 return err;
1150
1151 /* truncate normal cluster */
1152 if (!err)
1153 return f2fs_do_truncate_blocks(inode, from, lock);
1154
1155 /* truncate compressed cluster */
1156 err = f2fs_prepare_compress_overwrite(inode, &pagep,
1157 start_idx, &fsdata);
1158
1159 /* should not be a normal cluster */
1160 f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1161
1162 if (err <= 0)
1163 return err;
1164
1165 if (err > 0) {
1166 struct page **rpages = fsdata;
1167 int cluster_size = F2FS_I(inode)->i_cluster_size;
1168 int i;
1169
1170 for (i = cluster_size - 1; i >= 0; i--) {
1171 loff_t start = rpages[i]->index << PAGE_SHIFT;
1172
1173 if (from <= start) {
1174 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1175 } else {
1176 zero_user_segment(rpages[i], from - start,
1177 PAGE_SIZE);
1178 break;
1179 }
1180 }
1181
1182 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1183 }
1184 return 0;
1185}
1186
4c8ff709
CY
1187static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1188 int *submitted,
1189 struct writeback_control *wbc,
1190 enum iostat_type io_type)
1191{
1192 struct inode *inode = cc->inode;
1193 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1194 struct f2fs_inode_info *fi = F2FS_I(inode);
1195 struct f2fs_io_info fio = {
1196 .sbi = sbi,
1197 .ino = cc->inode->i_ino,
1198 .type = DATA,
1199 .op = REQ_OP_WRITE,
1200 .op_flags = wbc_to_write_flags(wbc),
1201 .old_blkaddr = NEW_ADDR,
1202 .page = NULL,
1203 .encrypted_page = NULL,
1204 .compressed_page = NULL,
2eae077e 1205 .submitted = 0,
4c8ff709
CY
1206 .io_type = io_type,
1207 .io_wbc = wbc,
2eae077e
CY
1208 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ?
1209 1 : 0,
4c8ff709
CY
1210 };
1211 struct dnode_of_data dn;
1212 struct node_info ni;
1213 struct compress_io_ctx *cic;
1214 pgoff_t start_idx = start_idx_of_cluster(cc);
1215 unsigned int last_index = cc->cluster_size - 1;
1216 loff_t psize;
1217 int i, err;
1218
146949de 1219 /* we should bypass data pages to proceed the kworker jobs */
ee68d271
CY
1220 if (unlikely(f2fs_cp_error(sbi))) {
1221 mapping_set_error(cc->rpages[0]->mapping, -EIO);
1222 goto out_free;
1223 }
1224
79963d96
CY
1225 if (IS_NOQUOTA(inode)) {
1226 /*
1227 * We need to wait for node_write to avoid block allocation during
1228 * checkpoint. This can only happen to quota writes which can cause
1229 * the below discard race condition.
1230 */
e4544b63 1231 f2fs_down_read(&sbi->node_write);
79963d96 1232 } else if (!f2fs_trylock_op(sbi)) {
31083031 1233 goto out_free;
79963d96 1234 }
4c8ff709 1235
df77fbd8 1236 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
4c8ff709
CY
1237
1238 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1239 if (err)
1240 goto out_unlock_op;
1241
1242 for (i = 0; i < cc->cluster_size; i++) {
a2ced1ce 1243 if (data_blkaddr(dn.inode, dn.node_page,
4c8ff709
CY
1244 dn.ofs_in_node + i) == NULL_ADDR)
1245 goto out_put_dnode;
1246 }
1247
1248 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1249
a9419b63 1250 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
4c8ff709
CY
1251 if (err)
1252 goto out_put_dnode;
1253
1254 fio.version = ni.version;
1255
32410577 1256 cic = f2fs_kmem_cache_alloc(cic_entry_slab, GFP_F2FS_ZERO, false, sbi);
4c8ff709
CY
1257 if (!cic)
1258 goto out_put_dnode;
1259
1260 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1261 cic->inode = inode;
3271d7eb 1262 atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
31083031 1263 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
4c8ff709
CY
1264 if (!cic->rpages)
1265 goto out_put_cic;
1266
1267 cic->nr_rpages = cc->cluster_size;
1268
3271d7eb 1269 for (i = 0; i < cc->valid_nr_cpages; i++) {
4c8ff709 1270 f2fs_set_compressed_page(cc->cpages[i], inode,
887347a0 1271 cc->rpages[i + 1]->index, cic);
4c8ff709 1272 fio.compressed_page = cc->cpages[i];
f567adb0
CY
1273
1274 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1275 dn.ofs_in_node + i + 1);
1276
1277 /* wait for GCed page writeback via META_MAPPING */
1278 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1279
4c8ff709
CY
1280 if (fio.encrypted) {
1281 fio.page = cc->rpages[i + 1];
1282 err = f2fs_encrypt_one_page(&fio);
1283 if (err)
1284 goto out_destroy_crypt;
1285 cc->cpages[i] = fio.encrypted_page;
1286 }
1287 }
1288
1289 set_cluster_writeback(cc);
1290
1291 for (i = 0; i < cc->cluster_size; i++)
1292 cic->rpages[i] = cc->rpages[i];
1293
1294 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1295 block_t blkaddr;
1296
a2ced1ce 1297 blkaddr = f2fs_data_blkaddr(&dn);
95978caa 1298 fio.page = cc->rpages[i];
4c8ff709
CY
1299 fio.old_blkaddr = blkaddr;
1300
1301 /* cluster header */
1302 if (i == 0) {
1303 if (blkaddr == COMPRESS_ADDR)
1304 fio.compr_blocks++;
1305 if (__is_valid_data_blkaddr(blkaddr))
1306 f2fs_invalidate_blocks(sbi, blkaddr);
1307 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1308 goto unlock_continue;
1309 }
1310
1311 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1312 fio.compr_blocks++;
1313
3271d7eb 1314 if (i > cc->valid_nr_cpages) {
4c8ff709
CY
1315 if (__is_valid_data_blkaddr(blkaddr)) {
1316 f2fs_invalidate_blocks(sbi, blkaddr);
1317 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1318 }
1319 goto unlock_continue;
1320 }
1321
1322 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1323
1324 if (fio.encrypted)
1325 fio.encrypted_page = cc->cpages[i - 1];
1326 else
1327 fio.compressed_page = cc->cpages[i - 1];
1328
1329 cc->cpages[i - 1] = NULL;
1330 f2fs_outplace_write_data(&dn, &fio);
1331 (*submitted)++;
1332unlock_continue:
1333 inode_dec_dirty_pages(cc->inode);
1334 unlock_page(fio.page);
1335 }
1336
1337 if (fio.compr_blocks)
1338 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
3271d7eb
FC
1339 f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
1340 add_compr_block_stat(inode, cc->valid_nr_cpages);
4c8ff709
CY
1341
1342 set_inode_flag(cc->inode, FI_APPEND_WRITE);
1343 if (cc->cluster_idx == 0)
1344 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1345
1346 f2fs_put_dnode(&dn);
79963d96 1347 if (IS_NOQUOTA(inode))
e4544b63 1348 f2fs_up_read(&sbi->node_write);
79963d96 1349 else
435cbab9 1350 f2fs_unlock_op(sbi);
4c8ff709 1351
c10c9820 1352 spin_lock(&fi->i_size_lock);
4c8ff709
CY
1353 if (fi->last_disk_size < psize)
1354 fi->last_disk_size = psize;
c10c9820 1355 spin_unlock(&fi->i_size_lock);
4c8ff709
CY
1356
1357 f2fs_put_rpages(cc);
31083031
CY
1358 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1359 cc->cpages = NULL;
8bfbfb0d 1360 f2fs_destroy_compress_ctx(cc, false);
4c8ff709
CY
1361 return 0;
1362
1363out_destroy_crypt:
31083031 1364 page_array_free(cc->inode, cic->rpages, cc->cluster_size);
4c8ff709
CY
1365
1366 for (--i; i >= 0; i--)
1367 fscrypt_finalize_bounce_page(&cc->cpages[i]);
4c8ff709 1368out_put_cic:
c68d6c88 1369 kmem_cache_free(cic_entry_slab, cic);
4c8ff709
CY
1370out_put_dnode:
1371 f2fs_put_dnode(&dn);
1372out_unlock_op:
79963d96 1373 if (IS_NOQUOTA(inode))
e4544b63 1374 f2fs_up_read(&sbi->node_write);
79963d96 1375 else
435cbab9 1376 f2fs_unlock_op(sbi);
31083031 1377out_free:
3271d7eb 1378 for (i = 0; i < cc->valid_nr_cpages; i++) {
827f0284
JK
1379 f2fs_compress_free_page(cc->cpages[i]);
1380 cc->cpages[i] = NULL;
1381 }
31083031
CY
1382 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1383 cc->cpages = NULL;
4c8ff709
CY
1384 return -EAGAIN;
1385}
1386
1387void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1388{
1389 struct f2fs_sb_info *sbi = bio->bi_private;
1390 struct compress_io_ctx *cic =
1391 (struct compress_io_ctx *)page_private(page);
1392 int i;
1393
1394 if (unlikely(bio->bi_status))
1395 mapping_set_error(cic->inode->i_mapping, -EIO);
1396
5e6bbde9 1397 f2fs_compress_free_page(page);
4c8ff709
CY
1398
1399 dec_page_count(sbi, F2FS_WB_DATA);
1400
e6c3948d 1401 if (atomic_dec_return(&cic->pending_pages))
4c8ff709
CY
1402 return;
1403
1404 for (i = 0; i < cic->nr_rpages; i++) {
1405 WARN_ON(!cic->rpages[i]);
b763f3be 1406 clear_page_private_gcing(cic->rpages[i]);
4c8ff709
CY
1407 end_page_writeback(cic->rpages[i]);
1408 }
1409
31083031 1410 page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
c68d6c88 1411 kmem_cache_free(cic_entry_slab, cic);
4c8ff709
CY
1412}
1413
1414static int f2fs_write_raw_pages(struct compress_ctx *cc,
1415 int *submitted,
1416 struct writeback_control *wbc,
1417 enum iostat_type io_type)
1418{
1419 struct address_space *mapping = cc->inode->i_mapping;
7377e853 1420 int _submitted, compr_blocks, ret, i;
4c8ff709
CY
1421
1422 compr_blocks = f2fs_compressed_blocks(cc);
7377e853
HJK
1423
1424 for (i = 0; i < cc->cluster_size; i++) {
1425 if (!cc->rpages[i])
1426 continue;
1427
1428 redirty_page_for_writepage(wbc, cc->rpages[i]);
1429 unlock_page(cc->rpages[i]);
4c8ff709
CY
1430 }
1431
7377e853
HJK
1432 if (compr_blocks < 0)
1433 return compr_blocks;
1434
4c8ff709
CY
1435 for (i = 0; i < cc->cluster_size; i++) {
1436 if (!cc->rpages[i])
1437 continue;
1438retry_write:
7377e853
HJK
1439 lock_page(cc->rpages[i]);
1440
4c8ff709 1441 if (cc->rpages[i]->mapping != mapping) {
7377e853 1442continue_unlock:
4c8ff709
CY
1443 unlock_page(cc->rpages[i]);
1444 continue;
1445 }
1446
7377e853
HJK
1447 if (!PageDirty(cc->rpages[i]))
1448 goto continue_unlock;
1449
babedcba
YL
1450 if (PageWriteback(cc->rpages[i])) {
1451 if (wbc->sync_mode == WB_SYNC_NONE)
1452 goto continue_unlock;
1453 f2fs_wait_on_page_writeback(cc->rpages[i], DATA, true, true);
1454 }
1455
7377e853
HJK
1456 if (!clear_page_dirty_for_io(cc->rpages[i]))
1457 goto continue_unlock;
4c8ff709
CY
1458
1459 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1460 NULL, NULL, wbc, io_type,
3afae09f 1461 compr_blocks, false);
4c8ff709
CY
1462 if (ret) {
1463 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1464 unlock_page(cc->rpages[i]);
1465 ret = 0;
1466 } else if (ret == -EAGAIN) {
466357dc
CY
1467 /*
1468 * for quota file, just redirty left pages to
1469 * avoid deadlock caused by cluster update race
1470 * from foreground operation.
1471 */
7377e853
HJK
1472 if (IS_NOQUOTA(cc->inode))
1473 return 0;
4c8ff709 1474 ret = 0;
a64239d0 1475 f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
4c8ff709
CY
1476 goto retry_write;
1477 }
7377e853 1478 return ret;
4c8ff709
CY
1479 }
1480
1481 *submitted += _submitted;
1482 }
3afae09f
CY
1483
1484 f2fs_balance_fs(F2FS_M_SB(mapping), true);
1485
4c8ff709 1486 return 0;
4c8ff709
CY
1487}
1488
1489int f2fs_write_multi_pages(struct compress_ctx *cc,
1490 int *submitted,
1491 struct writeback_control *wbc,
1492 enum iostat_type io_type)
1493{
4c8ff709
CY
1494 int err;
1495
1496 *submitted = 0;
1497 if (cluster_may_compress(cc)) {
1498 err = f2fs_compress_pages(cc);
1499 if (err == -EAGAIN) {
09631cf3 1500 add_compr_block_stat(cc->inode, cc->cluster_size);
4c8ff709
CY
1501 goto write;
1502 } else if (err) {
1503 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1504 goto destroy_out;
1505 }
1506
1507 err = f2fs_write_compressed_pages(cc, submitted,
1508 wbc, io_type);
4c8ff709
CY
1509 if (!err)
1510 return 0;
1511 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1512 }
1513write:
1514 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1515
1516 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1517 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1518destroy_out:
8bfbfb0d 1519 f2fs_destroy_compress_ctx(cc, false);
4c8ff709
CY
1520 return err;
1521}
1522
bff139b4
DJ
1523static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
1524 bool pre_alloc)
1525{
1526 return pre_alloc ^ f2fs_low_mem_mode(sbi);
1527}
1528
1529static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
1530 bool pre_alloc)
1531{
1532 const struct f2fs_compress_ops *cops =
1533 f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1534 int i;
1535
1536 if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1537 return 0;
1538
1539 dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
1540 if (!dic->tpages)
1541 return -ENOMEM;
1542
1543 for (i = 0; i < dic->cluster_size; i++) {
1544 if (dic->rpages[i]) {
1545 dic->tpages[i] = dic->rpages[i];
1546 continue;
1547 }
1548
1549 dic->tpages[i] = f2fs_compress_alloc_page();
1550 if (!dic->tpages[i])
1551 return -ENOMEM;
1552 }
1553
1554 dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
1555 if (!dic->rbuf)
1556 return -ENOMEM;
1557
1558 dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
1559 if (!dic->cbuf)
1560 return -ENOMEM;
1561
8140654e
ZQ
1562 if (cops->init_decompress_ctx)
1563 return cops->init_decompress_ctx(dic);
bff139b4
DJ
1564
1565 return 0;
1566}
1567
1568static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
1569 bool bypass_destroy_callback, bool pre_alloc)
1570{
1571 const struct f2fs_compress_ops *cops =
1572 f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
1573
1574 if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
1575 return;
1576
1577 if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
1578 cops->destroy_decompress_ctx(dic);
1579
1580 if (dic->cbuf)
1581 vm_unmap_ram(dic->cbuf, dic->nr_cpages);
1582
1583 if (dic->rbuf)
1584 vm_unmap_ram(dic->rbuf, dic->cluster_size);
1585}
1586
1587static void f2fs_free_dic(struct decompress_io_ctx *dic,
1588 bool bypass_destroy_callback);
7f59b277 1589
4c8ff709
CY
1590struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1591{
4c8ff709
CY
1592 struct decompress_io_ctx *dic;
1593 pgoff_t start_idx = start_idx_of_cluster(cc);
bff139b4
DJ
1594 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
1595 int i, ret;
4c8ff709 1596
bff139b4 1597 dic = f2fs_kmem_cache_alloc(dic_entry_slab, GFP_F2FS_ZERO, false, sbi);
4c8ff709
CY
1598 if (!dic)
1599 return ERR_PTR(-ENOMEM);
1600
31083031 1601 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
4c8ff709 1602 if (!dic->rpages) {
c68d6c88 1603 kmem_cache_free(dic_entry_slab, dic);
4c8ff709
CY
1604 return ERR_PTR(-ENOMEM);
1605 }
1606
1607 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1608 dic->inode = cc->inode;
7f59b277 1609 atomic_set(&dic->remaining_pages, cc->nr_cpages);
4c8ff709
CY
1610 dic->cluster_idx = cc->cluster_idx;
1611 dic->cluster_size = cc->cluster_size;
1612 dic->log_cluster_size = cc->log_cluster_size;
1613 dic->nr_cpages = cc->nr_cpages;
7f59b277 1614 refcount_set(&dic->refcnt, 1);
4c8ff709 1615 dic->failed = false;
7f59b277 1616 dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
4c8ff709
CY
1617
1618 for (i = 0; i < dic->cluster_size; i++)
1619 dic->rpages[i] = cc->rpages[i];
1620 dic->nr_rpages = cc->cluster_size;
1621
31083031 1622 dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
bff139b4
DJ
1623 if (!dic->cpages) {
1624 ret = -ENOMEM;
4c8ff709 1625 goto out_free;
bff139b4 1626 }
4c8ff709
CY
1627
1628 for (i = 0; i < dic->nr_cpages; i++) {
1629 struct page *page;
1630
5e6bbde9 1631 page = f2fs_compress_alloc_page();
bff139b4
DJ
1632 if (!page) {
1633 ret = -ENOMEM;
4c8ff709 1634 goto out_free;
bff139b4 1635 }
4c8ff709
CY
1636
1637 f2fs_set_compressed_page(page, cc->inode,
887347a0 1638 start_idx + i + 1, dic);
4c8ff709
CY
1639 dic->cpages[i] = page;
1640 }
1641
bff139b4
DJ
1642 ret = f2fs_prepare_decomp_mem(dic, true);
1643 if (ret)
1644 goto out_free;
1645
4c8ff709
CY
1646 return dic;
1647
1648out_free:
bff139b4
DJ
1649 f2fs_free_dic(dic, true);
1650 return ERR_PTR(ret);
4c8ff709
CY
1651}
1652
bff139b4
DJ
1653static void f2fs_free_dic(struct decompress_io_ctx *dic,
1654 bool bypass_destroy_callback)
4c8ff709
CY
1655{
1656 int i;
1657
bff139b4
DJ
1658 f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
1659
4c8ff709
CY
1660 if (dic->tpages) {
1661 for (i = 0; i < dic->cluster_size; i++) {
1662 if (dic->rpages[i])
1663 continue;
8908e753
CY
1664 if (!dic->tpages[i])
1665 continue;
5e6bbde9 1666 f2fs_compress_free_page(dic->tpages[i]);
4c8ff709 1667 }
31083031 1668 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
4c8ff709
CY
1669 }
1670
1671 if (dic->cpages) {
1672 for (i = 0; i < dic->nr_cpages; i++) {
1673 if (!dic->cpages[i])
1674 continue;
5e6bbde9 1675 f2fs_compress_free_page(dic->cpages[i]);
4c8ff709 1676 }
31083031 1677 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
4c8ff709
CY
1678 }
1679
31083031 1680 page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
c68d6c88 1681 kmem_cache_free(dic_entry_slab, dic);
4c8ff709
CY
1682}
1683
bff139b4 1684static void f2fs_late_free_dic(struct work_struct *work)
7f59b277 1685{
bff139b4
DJ
1686 struct decompress_io_ctx *dic =
1687 container_of(work, struct decompress_io_ctx, free_work);
1688
1689 f2fs_free_dic(dic, false);
1690}
1691
1692static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
7f59b277 1693{
bff139b4
DJ
1694 if (refcount_dec_and_test(&dic->refcnt)) {
1695 if (in_task) {
1696 f2fs_free_dic(dic, false);
1697 } else {
1698 INIT_WORK(&dic->free_work, f2fs_late_free_dic);
1699 queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
1700 &dic->free_work);
1701 }
1702 }
7f59b277
EB
1703}
1704
98dc08ba 1705static void f2fs_verify_cluster(struct work_struct *work)
4c8ff709 1706{
98dc08ba
EB
1707 struct decompress_io_ctx *dic =
1708 container_of(work, struct decompress_io_ctx, verity_work);
4c8ff709
CY
1709 int i;
1710
98dc08ba 1711 /* Verify, update, and unlock the decompressed pages. */
7f59b277
EB
1712 for (i = 0; i < dic->cluster_size; i++) {
1713 struct page *rpage = dic->rpages[i];
4c8ff709
CY
1714
1715 if (!rpage)
1716 continue;
1717
98dc08ba 1718 if (fsverity_verify_page(rpage))
23c51bed 1719 SetPageUptodate(rpage);
98dc08ba
EB
1720 else
1721 ClearPageUptodate(rpage);
4c8ff709
CY
1722 unlock_page(rpage);
1723 }
7f59b277 1724
98dc08ba 1725 f2fs_put_dic(dic, true);
7f59b277
EB
1726}
1727
1728/*
1729 * This is called when a compressed cluster has been decompressed
1730 * (or failed to be read and/or decompressed).
1731 */
bff139b4
DJ
1732void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
1733 bool in_task)
7f59b277 1734{
98dc08ba
EB
1735 int i;
1736
7f59b277
EB
1737 if (!failed && dic->need_verity) {
1738 /*
1739 * Note that to avoid deadlocks, the verity work can't be done
1740 * on the decompression workqueue. This is because verifying
1741 * the data pages can involve reading metadata pages from the
1742 * file, and these metadata pages may be compressed.
1743 */
1744 INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1745 fsverity_enqueue_verify_work(&dic->verity_work);
98dc08ba
EB
1746 return;
1747 }
1748
1749 /* Update and unlock the cluster's pagecache pages. */
1750 for (i = 0; i < dic->cluster_size; i++) {
1751 struct page *rpage = dic->rpages[i];
1752
1753 if (!rpage)
1754 continue;
1755
1756 if (failed)
1757 ClearPageUptodate(rpage);
1758 else
1759 SetPageUptodate(rpage);
1760 unlock_page(rpage);
7f59b277 1761 }
98dc08ba
EB
1762
1763 /*
1764 * Release the reference to the decompress_io_ctx that was being held
1765 * for I/O completion.
1766 */
1767 f2fs_put_dic(dic, in_task);
7f59b277
EB
1768}
1769
1770/*
1771 * Put a reference to a compressed page's decompress_io_ctx.
1772 *
1773 * This is called when the page is no longer needed and can be freed.
1774 */
bff139b4 1775void f2fs_put_page_dic(struct page *page, bool in_task)
7f59b277
EB
1776{
1777 struct decompress_io_ctx *dic =
1778 (struct decompress_io_ctx *)page_private(page);
1779
bff139b4 1780 f2fs_put_dic(dic, in_task);
4c8ff709 1781}
31083031 1782
94afd6d6
CY
1783/*
1784 * check whether cluster blocks are contiguous, and add extent cache entry
1785 * only if cluster blocks are logically and physically contiguous.
1786 */
1787unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
1788{
1789 bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
1790 int i = compressed ? 1 : 0;
1791 block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
1792 dn->ofs_in_node + i);
1793
1794 for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
1795 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
1796 dn->ofs_in_node + i);
1797
1798 if (!__is_valid_data_blkaddr(blkaddr))
1799 break;
1800 if (first_blkaddr + i - (compressed ? 1 : 0) != blkaddr)
1801 return 0;
1802 }
1803
1804 return compressed ? i - 1 : i;
1805}
1806
6ce19aff 1807const struct address_space_operations f2fs_compress_aops = {
c26cd045 1808 .release_folio = f2fs_release_folio,
91503996 1809 .invalidate_folio = f2fs_invalidate_folio,
f35474ec 1810 .migrate_folio = filemap_migrate_folio,
6ce19aff
CY
1811};
1812
1813struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1814{
1815 return sbi->compress_inode->i_mapping;
1816}
1817
1818void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
1819{
1820 if (!sbi->compress_inode)
1821 return;
1822 invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
1823}
1824
1825void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1826 nid_t ino, block_t blkaddr)
1827{
1828 struct page *cpage;
1829 int ret;
1830
1831 if (!test_opt(sbi, COMPRESS_CACHE))
1832 return;
1833
1834 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1835 return;
1836
1837 if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1838 return;
1839
1840 cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1841 if (cpage) {
1842 f2fs_put_page(cpage, 0);
1843 return;
1844 }
1845
1846 cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1847 if (!cpage)
1848 return;
1849
1850 ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1851 blkaddr, GFP_NOFS);
1852 if (ret) {
1853 f2fs_put_page(cpage, 0);
1854 return;
1855 }
1856
1857 set_page_private_data(cpage, ino);
1858
1859 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1860 goto out;
1861
1862 memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1863 SetPageUptodate(cpage);
1864out:
1865 f2fs_put_page(cpage, 1);
1866}
1867
1868bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1869 block_t blkaddr)
1870{
1871 struct page *cpage;
1872 bool hitted = false;
1873
1874 if (!test_opt(sbi, COMPRESS_CACHE))
1875 return false;
1876
1877 cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1878 blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1879 if (cpage) {
1880 if (PageUptodate(cpage)) {
1881 atomic_inc(&sbi->compress_page_hit);
1882 memcpy(page_address(page),
1883 page_address(cpage), PAGE_SIZE);
1884 hitted = true;
1885 }
1886 f2fs_put_page(cpage, 1);
1887 }
1888
1889 return hitted;
1890}
1891
1892void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1893{
173cdf2c 1894 struct address_space *mapping = COMPRESS_MAPPING(sbi);
bbfe4f66 1895 struct folio_batch fbatch;
6ce19aff
CY
1896 pgoff_t index = 0;
1897 pgoff_t end = MAX_BLKADDR(sbi);
1898
1899 if (!mapping->nrpages)
1900 return;
1901
bbfe4f66 1902 folio_batch_init(&fbatch);
6ce19aff
CY
1903
1904 do {
bbfe4f66 1905 unsigned int nr, i;
6ce19aff 1906
bbfe4f66
MWO
1907 nr = filemap_get_folios(mapping, &index, end - 1, &fbatch);
1908 if (!nr)
6ce19aff
CY
1909 break;
1910
bbfe4f66
MWO
1911 for (i = 0; i < nr; i++) {
1912 struct folio *folio = fbatch.folios[i];
6ce19aff 1913
bbfe4f66
MWO
1914 folio_lock(folio);
1915 if (folio->mapping != mapping) {
1916 folio_unlock(folio);
6ce19aff
CY
1917 continue;
1918 }
1919
bbfe4f66
MWO
1920 if (ino != get_page_private_data(&folio->page)) {
1921 folio_unlock(folio);
6ce19aff
CY
1922 continue;
1923 }
1924
bbfe4f66
MWO
1925 generic_error_remove_page(mapping, &folio->page);
1926 folio_unlock(folio);
6ce19aff 1927 }
bbfe4f66 1928 folio_batch_release(&fbatch);
6ce19aff
CY
1929 cond_resched();
1930 } while (index < end);
1931}
1932
1933int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
1934{
1935 struct inode *inode;
1936
1937 if (!test_opt(sbi, COMPRESS_CACHE))
1938 return 0;
1939
1940 inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
1941 if (IS_ERR(inode))
1942 return PTR_ERR(inode);
1943 sbi->compress_inode = inode;
1944
1945 sbi->compress_percent = COMPRESS_PERCENT;
1946 sbi->compress_watermark = COMPRESS_WATERMARK;
1947
1948 atomic_set(&sbi->compress_page_hit, 0);
1949
1950 return 0;
1951}
1952
1953void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
1954{
1955 if (!sbi->compress_inode)
1956 return;
1957 iput(sbi->compress_inode);
1958 sbi->compress_inode = NULL;
1959}
1960
31083031
CY
1961int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1962{
1963 dev_t dev = sbi->sb->s_bdev->bd_dev;
1964 char slab_name[32];
1965
29be7ec3
CY
1966 if (!f2fs_sb_has_compression(sbi))
1967 return 0;
1968
31083031
CY
1969 sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1970
1971 sbi->page_array_slab_size = sizeof(struct page *) <<
1972 F2FS_OPTION(sbi).compress_log_size;
1973
1974 sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1975 sbi->page_array_slab_size);
870af777 1976 return sbi->page_array_slab ? 0 : -ENOMEM;
31083031
CY
1977}
1978
1979void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1980{
1981 kmem_cache_destroy(sbi->page_array_slab);
1982}
c68d6c88 1983
870af777 1984int __init f2fs_init_compress_cache(void)
c68d6c88
CY
1985{
1986 cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1987 sizeof(struct compress_io_ctx));
1988 if (!cic_entry_slab)
1989 return -ENOMEM;
c68d6c88
CY
1990 dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1991 sizeof(struct decompress_io_ctx));
1992 if (!dic_entry_slab)
c68d6c88
CY
1993 goto free_cic;
1994 return 0;
1995free_cic:
870af777 1996 kmem_cache_destroy(cic_entry_slab);
c68d6c88
CY
1997 return -ENOMEM;
1998}
1999
2000void f2fs_destroy_compress_cache(void)
2001{
870af777
YL
2002 kmem_cache_destroy(dic_entry_slab);
2003 kmem_cache_destroy(cic_entry_slab);
c68d6c88 2004}