lockd: Update the NLMv4 nlm_res arguments decoder to use struct xdr_stream
[linux-block.git] / fs / f2fs / compress.c
CommitLineData
4c8ff709
CY
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * f2fs compress support
4 *
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6 */
7
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/writeback.h>
11#include <linux/backing-dev.h>
12#include <linux/lzo.h>
13#include <linux/lz4.h>
50cfa66f 14#include <linux/zstd.h>
4c8ff709
CY
15
16#include "f2fs.h"
17#include "node.h"
18#include <trace/events/f2fs.h>
19
c68d6c88
CY
20static struct kmem_cache *cic_entry_slab;
21static struct kmem_cache *dic_entry_slab;
22
31083031
CY
23static void *page_array_alloc(struct inode *inode, int nr)
24{
25 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
26 unsigned int size = sizeof(struct page *) * nr;
27
28 if (likely(size <= sbi->page_array_slab_size))
29 return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
30 return f2fs_kzalloc(sbi, size, GFP_NOFS);
31}
32
33static void page_array_free(struct inode *inode, void *pages, int nr)
34{
35 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
36 unsigned int size = sizeof(struct page *) * nr;
37
38 if (!pages)
39 return;
40
41 if (likely(size <= sbi->page_array_slab_size))
42 kmem_cache_free(sbi->page_array_slab, pages);
43 else
44 kfree(pages);
45}
46
4c8ff709
CY
47struct f2fs_compress_ops {
48 int (*init_compress_ctx)(struct compress_ctx *cc);
49 void (*destroy_compress_ctx)(struct compress_ctx *cc);
50 int (*compress_pages)(struct compress_ctx *cc);
23b1faaa
CY
51 int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
52 void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
4c8ff709
CY
53 int (*decompress_pages)(struct decompress_io_ctx *dic);
54};
55
56static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
57{
58 return index & (cc->cluster_size - 1);
59}
60
61static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
62{
63 return index >> cc->log_cluster_size;
64}
65
66static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
67{
68 return cc->cluster_idx << cc->log_cluster_size;
69}
70
71bool f2fs_is_compressed_page(struct page *page)
72{
73 if (!PagePrivate(page))
74 return false;
75 if (!page_private(page))
76 return false;
77 if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page))
78 return false;
29b993c7 79
4c8ff709
CY
80 f2fs_bug_on(F2FS_M_SB(page->mapping),
81 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
82 return true;
83}
84
85static void f2fs_set_compressed_page(struct page *page,
887347a0 86 struct inode *inode, pgoff_t index, void *data)
4c8ff709
CY
87{
88 SetPagePrivate(page);
89 set_page_private(page, (unsigned long)data);
90
91 /* i_crypto_info and iv index */
92 page->index = index;
93 page->mapping = inode->i_mapping;
4c8ff709
CY
94}
95
4c8ff709
CY
96static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
97{
98 int i;
99
100 for (i = 0; i < len; i++) {
101 if (!cc->rpages[i])
102 continue;
103 if (unlock)
104 unlock_page(cc->rpages[i]);
105 else
106 put_page(cc->rpages[i]);
107 }
108}
109
110static void f2fs_put_rpages(struct compress_ctx *cc)
111{
112 f2fs_drop_rpages(cc, cc->cluster_size, false);
113}
114
115static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
116{
117 f2fs_drop_rpages(cc, len, true);
118}
119
4c8ff709
CY
120static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
121 struct writeback_control *wbc, bool redirty, int unlock)
122{
123 unsigned int i;
124
125 for (i = 0; i < cc->cluster_size; i++) {
126 if (!cc->rpages[i])
127 continue;
128 if (redirty)
129 redirty_page_for_writepage(wbc, cc->rpages[i]);
130 f2fs_put_page(cc->rpages[i], unlock);
131 }
132}
133
134struct page *f2fs_compress_control_page(struct page *page)
135{
136 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
137}
138
139int f2fs_init_compress_ctx(struct compress_ctx *cc)
140{
adfc6943 141 if (cc->rpages)
4c8ff709
CY
142 return 0;
143
31083031 144 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
4c8ff709
CY
145 return cc->rpages ? 0 : -ENOMEM;
146}
147
8bfbfb0d 148void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
4c8ff709 149{
31083031 150 page_array_free(cc->inode, cc->rpages, cc->cluster_size);
4c8ff709
CY
151 cc->rpages = NULL;
152 cc->nr_rpages = 0;
153 cc->nr_cpages = 0;
8bfbfb0d
CY
154 if (!reuse)
155 cc->cluster_idx = NULL_CLUSTER;
4c8ff709
CY
156}
157
158void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
159{
160 unsigned int cluster_ofs;
161
162 if (!f2fs_cluster_can_merge_page(cc, page->index))
163 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
164
165 cluster_ofs = offset_in_cluster(cc, page->index);
166 cc->rpages[cluster_ofs] = page;
167 cc->nr_rpages++;
168 cc->cluster_idx = cluster_idx(cc, page->index);
169}
170
171#ifdef CONFIG_F2FS_FS_LZO
172static int lzo_init_compress_ctx(struct compress_ctx *cc)
173{
174 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
175 LZO1X_MEM_COMPRESS, GFP_NOFS);
176 if (!cc->private)
177 return -ENOMEM;
178
179 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
180 return 0;
181}
182
183static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
184{
185 kvfree(cc->private);
186 cc->private = NULL;
187}
188
189static int lzo_compress_pages(struct compress_ctx *cc)
190{
191 int ret;
192
193 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
194 &cc->clen, cc->private);
195 if (ret != LZO_E_OK) {
196 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
197 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
198 return -EIO;
199 }
200 return 0;
201}
202
203static int lzo_decompress_pages(struct decompress_io_ctx *dic)
204{
205 int ret;
206
207 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
208 dic->rbuf, &dic->rlen);
209 if (ret != LZO_E_OK) {
210 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
211 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
212 return -EIO;
213 }
214
215 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
216 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
217 "expected:%lu\n", KERN_ERR,
218 F2FS_I_SB(dic->inode)->sb->s_id,
219 dic->rlen,
220 PAGE_SIZE << dic->log_cluster_size);
221 return -EIO;
222 }
223 return 0;
224}
225
226static const struct f2fs_compress_ops f2fs_lzo_ops = {
227 .init_compress_ctx = lzo_init_compress_ctx,
228 .destroy_compress_ctx = lzo_destroy_compress_ctx,
229 .compress_pages = lzo_compress_pages,
230 .decompress_pages = lzo_decompress_pages,
231};
232#endif
233
234#ifdef CONFIG_F2FS_FS_LZ4
235static int lz4_init_compress_ctx(struct compress_ctx *cc)
236{
3fde13f8
CY
237 unsigned int size = LZ4_MEM_COMPRESS;
238
239#ifdef CONFIG_F2FS_FS_LZ4HC
240 if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
241 size = LZ4HC_MEM_COMPRESS;
242#endif
243
244 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
4c8ff709
CY
245 if (!cc->private)
246 return -ENOMEM;
247
f6644143
CY
248 /*
249 * we do not change cc->clen to LZ4_compressBound(inputsize) to
250 * adapt worst compress case, because lz4 compressor can handle
251 * output budget properly.
252 */
253 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
4c8ff709
CY
254 return 0;
255}
256
257static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
258{
259 kvfree(cc->private);
260 cc->private = NULL;
261}
262
3fde13f8
CY
263#ifdef CONFIG_F2FS_FS_LZ4HC
264static int lz4hc_compress_pages(struct compress_ctx *cc)
265{
266 unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
267 COMPRESS_LEVEL_OFFSET;
268 int len;
269
270 if (level)
271 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
272 cc->clen, level, cc->private);
273 else
274 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
275 cc->clen, cc->private);
276 if (!len)
277 return -EAGAIN;
278
279 cc->clen = len;
280 return 0;
281}
282#endif
283
4c8ff709
CY
284static int lz4_compress_pages(struct compress_ctx *cc)
285{
286 int len;
287
3fde13f8
CY
288#ifdef CONFIG_F2FS_FS_LZ4HC
289 return lz4hc_compress_pages(cc);
290#endif
4c8ff709
CY
291 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
292 cc->clen, cc->private);
f6644143
CY
293 if (!len)
294 return -EAGAIN;
295
4c8ff709
CY
296 cc->clen = len;
297 return 0;
298}
299
300static int lz4_decompress_pages(struct decompress_io_ctx *dic)
301{
302 int ret;
303
304 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
305 dic->clen, dic->rlen);
306 if (ret < 0) {
307 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
308 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
309 return -EIO;
310 }
311
312 if (ret != PAGE_SIZE << dic->log_cluster_size) {
313 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid rlen:%zu, "
314 "expected:%lu\n", KERN_ERR,
315 F2FS_I_SB(dic->inode)->sb->s_id,
316 dic->rlen,
317 PAGE_SIZE << dic->log_cluster_size);
318 return -EIO;
319 }
320 return 0;
321}
322
323static const struct f2fs_compress_ops f2fs_lz4_ops = {
324 .init_compress_ctx = lz4_init_compress_ctx,
325 .destroy_compress_ctx = lz4_destroy_compress_ctx,
326 .compress_pages = lz4_compress_pages,
327 .decompress_pages = lz4_decompress_pages,
328};
329#endif
330
50cfa66f
CY
331#ifdef CONFIG_F2FS_FS_ZSTD
332#define F2FS_ZSTD_DEFAULT_CLEVEL 1
333
334static int zstd_init_compress_ctx(struct compress_ctx *cc)
335{
336 ZSTD_parameters params;
337 ZSTD_CStream *stream;
338 void *workspace;
339 unsigned int workspace_size;
3fde13f8
CY
340 unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
341 COMPRESS_LEVEL_OFFSET;
342
343 if (!level)
344 level = F2FS_ZSTD_DEFAULT_CLEVEL;
50cfa66f 345
3fde13f8 346 params = ZSTD_getParams(level, cc->rlen, 0);
50cfa66f
CY
347 workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
348
349 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
350 workspace_size, GFP_NOFS);
351 if (!workspace)
352 return -ENOMEM;
353
354 stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
355 if (!stream) {
356 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
357 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
358 __func__);
359 kvfree(workspace);
360 return -EIO;
361 }
362
363 cc->private = workspace;
364 cc->private2 = stream;
365
366 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
367 return 0;
368}
369
370static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
371{
372 kvfree(cc->private);
373 cc->private = NULL;
374 cc->private2 = NULL;
375}
376
377static int zstd_compress_pages(struct compress_ctx *cc)
378{
379 ZSTD_CStream *stream = cc->private2;
380 ZSTD_inBuffer inbuf;
381 ZSTD_outBuffer outbuf;
382 int src_size = cc->rlen;
383 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
384 int ret;
385
386 inbuf.pos = 0;
387 inbuf.src = cc->rbuf;
388 inbuf.size = src_size;
389
390 outbuf.pos = 0;
391 outbuf.dst = cc->cbuf->cdata;
392 outbuf.size = dst_size;
393
394 ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
395 if (ZSTD_isError(ret)) {
396 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
397 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
398 __func__, ZSTD_getErrorCode(ret));
399 return -EIO;
400 }
401
402 ret = ZSTD_endStream(stream, &outbuf);
403 if (ZSTD_isError(ret)) {
404 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
405 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
406 __func__, ZSTD_getErrorCode(ret));
407 return -EIO;
408 }
409
1454c978
CY
410 /*
411 * there is compressed data remained in intermediate buffer due to
412 * no more space in cbuf.cdata
413 */
414 if (ret)
415 return -EAGAIN;
416
50cfa66f
CY
417 cc->clen = outbuf.pos;
418 return 0;
419}
420
421static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
422{
423 ZSTD_DStream *stream;
424 void *workspace;
425 unsigned int workspace_size;
0e2b7385
CY
426 unsigned int max_window_size =
427 MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
50cfa66f 428
0e2b7385 429 workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
50cfa66f
CY
430
431 workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
432 workspace_size, GFP_NOFS);
433 if (!workspace)
434 return -ENOMEM;
435
0e2b7385 436 stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
50cfa66f
CY
437 if (!stream) {
438 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
439 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
440 __func__);
441 kvfree(workspace);
442 return -EIO;
443 }
444
445 dic->private = workspace;
446 dic->private2 = stream;
447
448 return 0;
449}
450
451static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
452{
453 kvfree(dic->private);
454 dic->private = NULL;
455 dic->private2 = NULL;
456}
457
458static int zstd_decompress_pages(struct decompress_io_ctx *dic)
459{
460 ZSTD_DStream *stream = dic->private2;
461 ZSTD_inBuffer inbuf;
462 ZSTD_outBuffer outbuf;
463 int ret;
464
465 inbuf.pos = 0;
466 inbuf.src = dic->cbuf->cdata;
467 inbuf.size = dic->clen;
468
469 outbuf.pos = 0;
470 outbuf.dst = dic->rbuf;
471 outbuf.size = dic->rlen;
472
473 ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
474 if (ZSTD_isError(ret)) {
475 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
476 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
477 __func__, ZSTD_getErrorCode(ret));
478 return -EIO;
479 }
480
481 if (dic->rlen != outbuf.pos) {
482 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
483 "expected:%lu\n", KERN_ERR,
484 F2FS_I_SB(dic->inode)->sb->s_id,
485 __func__, dic->rlen,
486 PAGE_SIZE << dic->log_cluster_size);
487 return -EIO;
488 }
489
490 return 0;
491}
492
493static const struct f2fs_compress_ops f2fs_zstd_ops = {
494 .init_compress_ctx = zstd_init_compress_ctx,
495 .destroy_compress_ctx = zstd_destroy_compress_ctx,
496 .compress_pages = zstd_compress_pages,
497 .init_decompress_ctx = zstd_init_decompress_ctx,
498 .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
499 .decompress_pages = zstd_decompress_pages,
500};
501#endif
502
6d92b201
CY
503#ifdef CONFIG_F2FS_FS_LZO
504#ifdef CONFIG_F2FS_FS_LZORLE
505static int lzorle_compress_pages(struct compress_ctx *cc)
506{
507 int ret;
508
509 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
510 &cc->clen, cc->private);
511 if (ret != LZO_E_OK) {
512 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
513 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
514 return -EIO;
515 }
516 return 0;
517}
518
519static const struct f2fs_compress_ops f2fs_lzorle_ops = {
520 .init_compress_ctx = lzo_init_compress_ctx,
521 .destroy_compress_ctx = lzo_destroy_compress_ctx,
522 .compress_pages = lzorle_compress_pages,
523 .decompress_pages = lzo_decompress_pages,
524};
525#endif
526#endif
527
4c8ff709
CY
528static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
529#ifdef CONFIG_F2FS_FS_LZO
530 &f2fs_lzo_ops,
531#else
532 NULL,
533#endif
534#ifdef CONFIG_F2FS_FS_LZ4
535 &f2fs_lz4_ops,
536#else
537 NULL,
538#endif
50cfa66f
CY
539#ifdef CONFIG_F2FS_FS_ZSTD
540 &f2fs_zstd_ops,
541#else
542 NULL,
543#endif
6d92b201
CY
544#if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
545 &f2fs_lzorle_ops,
546#else
547 NULL,
548#endif
4c8ff709
CY
549};
550
551bool f2fs_is_compress_backend_ready(struct inode *inode)
552{
553 if (!f2fs_compressed_file(inode))
554 return true;
555 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
556}
557
99bbe307 558static mempool_t *compress_page_pool;
5e6bbde9
CY
559static int num_compress_pages = 512;
560module_param(num_compress_pages, uint, 0444);
561MODULE_PARM_DESC(num_compress_pages,
562 "Number of intermediate compress pages to preallocate");
563
564int f2fs_init_compress_mempool(void)
565{
566 compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
567 if (!compress_page_pool)
568 return -ENOMEM;
569
570 return 0;
571}
572
573void f2fs_destroy_compress_mempool(void)
574{
575 mempool_destroy(compress_page_pool);
576}
577
578static struct page *f2fs_compress_alloc_page(void)
4c8ff709
CY
579{
580 struct page *page;
581
5e6bbde9 582 page = mempool_alloc(compress_page_pool, GFP_NOFS);
4c8ff709 583 lock_page(page);
5e6bbde9 584
4c8ff709
CY
585 return page;
586}
587
5e6bbde9
CY
588static void f2fs_compress_free_page(struct page *page)
589{
590 if (!page)
591 return;
592 set_page_private(page, (unsigned long)NULL);
593 ClearPagePrivate(page);
594 page->mapping = NULL;
595 unlock_page(page);
596 mempool_free(page, compress_page_pool);
597}
598
6fcaebac
DJ
599#define MAX_VMAP_RETRIES 3
600
601static void *f2fs_vmap(struct page **pages, unsigned int count)
602{
603 int i;
604 void *buf = NULL;
605
606 for (i = 0; i < MAX_VMAP_RETRIES; i++) {
607 buf = vm_map_ram(pages, count, -1);
608 if (buf)
609 break;
610 vm_unmap_aliases();
611 }
612 return buf;
613}
614
4c8ff709
CY
615static int f2fs_compress_pages(struct compress_ctx *cc)
616{
4c8ff709
CY
617 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
618 const struct f2fs_compress_ops *cops =
619 f2fs_cops[fi->i_compress_algorithm];
31083031
CY
620 unsigned int max_len, new_nr_cpages;
621 struct page **new_cpages;
b28f047b 622 u32 chksum = 0;
4c8ff709
CY
623 int i, ret;
624
625 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
626 cc->cluster_size, fi->i_compress_algorithm);
627
23b1faaa
CY
628 if (cops->init_compress_ctx) {
629 ret = cops->init_compress_ctx(cc);
630 if (ret)
631 goto out;
632 }
4c8ff709
CY
633
634 max_len = COMPRESS_HEADER_SIZE + cc->clen;
635 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
636
31083031 637 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
4c8ff709
CY
638 if (!cc->cpages) {
639 ret = -ENOMEM;
640 goto destroy_compress_ctx;
641 }
642
643 for (i = 0; i < cc->nr_cpages; i++) {
5e6bbde9 644 cc->cpages[i] = f2fs_compress_alloc_page();
4c8ff709
CY
645 if (!cc->cpages[i]) {
646 ret = -ENOMEM;
647 goto out_free_cpages;
648 }
649 }
650
6fcaebac 651 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
4c8ff709
CY
652 if (!cc->rbuf) {
653 ret = -ENOMEM;
654 goto out_free_cpages;
655 }
656
6fcaebac 657 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
4c8ff709
CY
658 if (!cc->cbuf) {
659 ret = -ENOMEM;
660 goto out_vunmap_rbuf;
661 }
662
663 ret = cops->compress_pages(cc);
664 if (ret)
665 goto out_vunmap_cbuf;
666
667 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
668
669 if (cc->clen > max_len) {
670 ret = -EAGAIN;
671 goto out_vunmap_cbuf;
672 }
673
674 cc->cbuf->clen = cpu_to_le32(cc->clen);
4c8ff709 675
b28f047b
CY
676 if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
677 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
678 cc->cbuf->cdata, cc->clen);
679 cc->cbuf->chksum = cpu_to_le32(chksum);
680
4c8ff709
CY
681 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
682 cc->cbuf->reserved[i] = cpu_to_le32(0);
683
31083031
CY
684 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
685
686 /* Now we're going to cut unnecessary tail pages */
687 new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
688 if (!new_cpages) {
689 ret = -ENOMEM;
690 goto out_vunmap_cbuf;
691 }
7fa6d598
EB
692
693 /* zero out any unused part of the last page */
694 memset(&cc->cbuf->cdata[cc->clen], 0,
31083031
CY
695 (new_nr_cpages * PAGE_SIZE) -
696 (cc->clen + COMPRESS_HEADER_SIZE));
7fa6d598 697
6fcaebac
DJ
698 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
699 vm_unmap_ram(cc->rbuf, cc->cluster_size);
4c8ff709 700
31083031
CY
701 for (i = 0; i < cc->nr_cpages; i++) {
702 if (i < new_nr_cpages) {
703 new_cpages[i] = cc->cpages[i];
704 continue;
705 }
5e6bbde9 706 f2fs_compress_free_page(cc->cpages[i]);
4c8ff709
CY
707 cc->cpages[i] = NULL;
708 }
709
23b1faaa
CY
710 if (cops->destroy_compress_ctx)
711 cops->destroy_compress_ctx(cc);
09ff4801 712
31083031
CY
713 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
714 cc->cpages = new_cpages;
715 cc->nr_cpages = new_nr_cpages;
4c8ff709
CY
716
717 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
718 cc->clen, ret);
719 return 0;
720
721out_vunmap_cbuf:
6fcaebac 722 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
4c8ff709 723out_vunmap_rbuf:
6fcaebac 724 vm_unmap_ram(cc->rbuf, cc->cluster_size);
4c8ff709
CY
725out_free_cpages:
726 for (i = 0; i < cc->nr_cpages; i++) {
727 if (cc->cpages[i])
5e6bbde9 728 f2fs_compress_free_page(cc->cpages[i]);
4c8ff709 729 }
31083031 730 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
4c8ff709
CY
731 cc->cpages = NULL;
732destroy_compress_ctx:
23b1faaa
CY
733 if (cops->destroy_compress_ctx)
734 cops->destroy_compress_ctx(cc);
4c8ff709
CY
735out:
736 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
737 cc->clen, ret);
738 return ret;
739}
740
7f59b277 741static void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
4c8ff709 742{
4c8ff709 743 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
7f59b277 744 struct f2fs_inode_info *fi = F2FS_I(dic->inode);
4c8ff709
CY
745 const struct f2fs_compress_ops *cops =
746 f2fs_cops[fi->i_compress_algorithm];
747 int ret;
b2f57a8e 748 int i;
4c8ff709 749
4c8ff709
CY
750 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
751 dic->cluster_size, fi->i_compress_algorithm);
752
4c8ff709
CY
753 if (dic->failed) {
754 ret = -EIO;
7f59b277 755 goto out_end_io;
4c8ff709
CY
756 }
757
31083031 758 dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
b2f57a8e
CY
759 if (!dic->tpages) {
760 ret = -ENOMEM;
7f59b277 761 goto out_end_io;
b2f57a8e
CY
762 }
763
764 for (i = 0; i < dic->cluster_size; i++) {
765 if (dic->rpages[i]) {
766 dic->tpages[i] = dic->rpages[i];
767 continue;
768 }
769
770 dic->tpages[i] = f2fs_compress_alloc_page();
771 if (!dic->tpages[i]) {
772 ret = -ENOMEM;
7f59b277 773 goto out_end_io;
b2f57a8e
CY
774 }
775 }
776
23b1faaa
CY
777 if (cops->init_decompress_ctx) {
778 ret = cops->init_decompress_ctx(dic);
779 if (ret)
7f59b277 780 goto out_end_io;
23b1faaa
CY
781 }
782
6fcaebac 783 dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
4c8ff709
CY
784 if (!dic->rbuf) {
785 ret = -ENOMEM;
7f59b277 786 goto out_destroy_decompress_ctx;
4c8ff709
CY
787 }
788
6fcaebac 789 dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
4c8ff709
CY
790 if (!dic->cbuf) {
791 ret = -ENOMEM;
792 goto out_vunmap_rbuf;
793 }
794
795 dic->clen = le32_to_cpu(dic->cbuf->clen);
796 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
797
798 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
799 ret = -EFSCORRUPTED;
800 goto out_vunmap_cbuf;
801 }
802
803 ret = cops->decompress_pages(dic);
804
75e91c88 805 if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
b28f047b
CY
806 u32 provided = le32_to_cpu(dic->cbuf->chksum);
807 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
808
809 if (provided != calculated) {
810 if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
811 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
812 printk_ratelimited(
813 "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
814 KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
815 provided, calculated);
816 }
817 set_sbi_flag(sbi, SBI_NEED_FSCK);
b28f047b
CY
818 }
819 }
820
4c8ff709 821out_vunmap_cbuf:
6fcaebac 822 vm_unmap_ram(dic->cbuf, dic->nr_cpages);
4c8ff709 823out_vunmap_rbuf:
6fcaebac 824 vm_unmap_ram(dic->rbuf, dic->cluster_size);
7f59b277 825out_destroy_decompress_ctx:
23b1faaa
CY
826 if (cops->destroy_decompress_ctx)
827 cops->destroy_decompress_ctx(dic);
7f59b277 828out_end_io:
4c8ff709
CY
829 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
830 dic->clen, ret);
7f59b277
EB
831 f2fs_decompress_end_io(dic, ret);
832}
833
834/*
835 * This is called when a page of a compressed cluster has been read from disk
836 * (or failed to be read from disk). It checks whether this page was the last
837 * page being waited on in the cluster, and if so, it decompresses the cluster
838 * (or in the case of a failure, cleans up without actually decompressing).
839 */
840void f2fs_end_read_compressed_page(struct page *page, bool failed)
841{
842 struct decompress_io_ctx *dic =
843 (struct decompress_io_ctx *)page_private(page);
844 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
845
846 dec_page_count(sbi, F2FS_RD_DATA);
847
848 if (failed)
849 WRITE_ONCE(dic->failed, true);
850
851 if (atomic_dec_and_test(&dic->remaining_pages))
852 f2fs_decompress_cluster(dic);
4c8ff709
CY
853}
854
855static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
856{
857 if (cc->cluster_idx == NULL_CLUSTER)
858 return true;
859 return cc->cluster_idx == cluster_idx(cc, index);
860}
861
862bool f2fs_cluster_is_empty(struct compress_ctx *cc)
863{
864 return cc->nr_rpages == 0;
865}
866
867static bool f2fs_cluster_is_full(struct compress_ctx *cc)
868{
869 return cc->cluster_size == cc->nr_rpages;
870}
871
872bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
873{
874 if (f2fs_cluster_is_empty(cc))
875 return true;
876 return is_page_in_cluster(cc, index);
877}
878
879static bool __cluster_may_compress(struct compress_ctx *cc)
880{
4c8ff709
CY
881 loff_t i_size = i_size_read(cc->inode);
882 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
883 int i;
884
885 for (i = 0; i < cc->cluster_size; i++) {
886 struct page *page = cc->rpages[i];
887
8af85f71 888 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
4c8ff709
CY
889
890 /* beyond EOF */
891 if (page->index >= nr_pages)
892 return false;
893 }
894 return true;
895}
896
1a67cbe1 897static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
4c8ff709
CY
898{
899 struct dnode_of_data dn;
900 int ret;
901
902 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
903 ret = f2fs_get_dnode_of_data(&dn, start_idx_of_cluster(cc),
904 LOOKUP_NODE);
905 if (ret) {
906 if (ret == -ENOENT)
907 ret = 0;
908 goto fail;
909 }
910
911 if (dn.data_blkaddr == COMPRESS_ADDR) {
912 int i;
913
914 ret = 1;
915 for (i = 1; i < cc->cluster_size; i++) {
916 block_t blkaddr;
917
a2ced1ce 918 blkaddr = data_blkaddr(dn.inode,
4c8ff709 919 dn.node_page, dn.ofs_in_node + i);
1a67cbe1
CY
920 if (compr) {
921 if (__is_valid_data_blkaddr(blkaddr))
922 ret++;
923 } else {
924 if (blkaddr != NULL_ADDR)
925 ret++;
926 }
4c8ff709
CY
927 }
928 }
929fail:
930 f2fs_put_dnode(&dn);
931 return ret;
932}
933
1a67cbe1
CY
934/* return # of compressed blocks in compressed cluster */
935static int f2fs_compressed_blocks(struct compress_ctx *cc)
936{
937 return __f2fs_cluster_blocks(cc, true);
938}
939
940/* return # of valid blocks in compressed cluster */
d078319d 941static int f2fs_cluster_blocks(struct compress_ctx *cc)
1a67cbe1
CY
942{
943 return __f2fs_cluster_blocks(cc, false);
944}
945
4c8ff709
CY
946int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
947{
948 struct compress_ctx cc = {
949 .inode = inode,
950 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
951 .cluster_size = F2FS_I(inode)->i_cluster_size,
952 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
953 };
954
d078319d 955 return f2fs_cluster_blocks(&cc);
4c8ff709
CY
956}
957
958static bool cluster_may_compress(struct compress_ctx *cc)
959{
602a16d5 960 if (!f2fs_need_compress_data(cc->inode))
4c8ff709
CY
961 return false;
962 if (f2fs_is_atomic_file(cc->inode))
963 return false;
964 if (f2fs_is_mmap_file(cc->inode))
965 return false;
966 if (!f2fs_cluster_is_full(cc))
967 return false;
dc35d73a
CY
968 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
969 return false;
4c8ff709
CY
970 return __cluster_may_compress(cc);
971}
972
973static void set_cluster_writeback(struct compress_ctx *cc)
974{
975 int i;
976
977 for (i = 0; i < cc->cluster_size; i++) {
978 if (cc->rpages[i])
979 set_page_writeback(cc->rpages[i]);
980 }
981}
982
983static void set_cluster_dirty(struct compress_ctx *cc)
984{
985 int i;
986
987 for (i = 0; i < cc->cluster_size; i++)
988 if (cc->rpages[i])
989 set_page_dirty(cc->rpages[i]);
990}
991
992static int prepare_compress_overwrite(struct compress_ctx *cc,
993 struct page **pagep, pgoff_t index, void **fsdata)
994{
995 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
996 struct address_space *mapping = cc->inode->i_mapping;
997 struct page *page;
998 struct dnode_of_data dn;
999 sector_t last_block_in_bio;
1000 unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
1001 pgoff_t start_idx = start_idx_of_cluster(cc);
1002 int i, ret;
1003 bool prealloc;
1004
1005retry:
d078319d 1006 ret = f2fs_cluster_blocks(cc);
4c8ff709
CY
1007 if (ret <= 0)
1008 return ret;
1009
1010 /* compressed case */
1011 prealloc = (ret < cc->cluster_size);
1012
1013 ret = f2fs_init_compress_ctx(cc);
1014 if (ret)
1015 return ret;
1016
1017 /* keep page reference to avoid page reclaim */
1018 for (i = 0; i < cc->cluster_size; i++) {
1019 page = f2fs_pagecache_get_page(mapping, start_idx + i,
1020 fgp_flag, GFP_NOFS);
1021 if (!page) {
1022 ret = -ENOMEM;
1023 goto unlock_pages;
1024 }
1025
1026 if (PageUptodate(page))
a949dc5f 1027 f2fs_put_page(page, 1);
4c8ff709
CY
1028 else
1029 f2fs_compress_ctx_add_page(cc, page);
1030 }
1031
1032 if (!f2fs_cluster_is_empty(cc)) {
1033 struct bio *bio = NULL;
1034
1035 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
0683728a 1036 &last_block_in_bio, false, true);
a949dc5f 1037 f2fs_put_rpages(cc);
8bfbfb0d 1038 f2fs_destroy_compress_ctx(cc, true);
4c8ff709 1039 if (ret)
a949dc5f 1040 goto out;
4c8ff709
CY
1041 if (bio)
1042 f2fs_submit_bio(sbi, bio, DATA);
1043
1044 ret = f2fs_init_compress_ctx(cc);
1045 if (ret)
a949dc5f 1046 goto out;
4c8ff709
CY
1047 }
1048
1049 for (i = 0; i < cc->cluster_size; i++) {
1050 f2fs_bug_on(sbi, cc->rpages[i]);
1051
1052 page = find_lock_page(mapping, start_idx + i);
a949dc5f
CY
1053 if (!page) {
1054 /* page can be truncated */
1055 goto release_and_retry;
1056 }
4c8ff709
CY
1057
1058 f2fs_wait_on_page_writeback(page, DATA, true, true);
4c8ff709 1059 f2fs_compress_ctx_add_page(cc, page);
4c8ff709
CY
1060
1061 if (!PageUptodate(page)) {
a949dc5f
CY
1062release_and_retry:
1063 f2fs_put_rpages(cc);
4c8ff709 1064 f2fs_unlock_rpages(cc, i + 1);
8bfbfb0d 1065 f2fs_destroy_compress_ctx(cc, true);
4c8ff709
CY
1066 goto retry;
1067 }
1068 }
1069
1070 if (prealloc) {
0ef81833 1071 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
4c8ff709
CY
1072
1073 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
1074
1075 for (i = cc->cluster_size - 1; i > 0; i--) {
1076 ret = f2fs_get_block(&dn, start_idx + i);
1077 if (ret) {
1078 i = cc->cluster_size;
1079 break;
1080 }
1081
1082 if (dn.data_blkaddr != NEW_ADDR)
1083 break;
1084 }
1085
0ef81833 1086 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
4c8ff709
CY
1087 }
1088
1089 if (likely(!ret)) {
1090 *fsdata = cc->rpages;
1091 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1092 return cc->cluster_size;
1093 }
1094
1095unlock_pages:
a949dc5f 1096 f2fs_put_rpages(cc);
4c8ff709 1097 f2fs_unlock_rpages(cc, i);
8bfbfb0d 1098 f2fs_destroy_compress_ctx(cc, true);
a949dc5f 1099out:
4c8ff709
CY
1100 return ret;
1101}
1102
1103int f2fs_prepare_compress_overwrite(struct inode *inode,
1104 struct page **pagep, pgoff_t index, void **fsdata)
1105{
1106 struct compress_ctx cc = {
1107 .inode = inode,
1108 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1109 .cluster_size = F2FS_I(inode)->i_cluster_size,
1110 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1111 .rpages = NULL,
1112 .nr_rpages = 0,
1113 };
1114
1115 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1116}
1117
1118bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1119 pgoff_t index, unsigned copied)
1120
1121{
1122 struct compress_ctx cc = {
31083031 1123 .inode = inode,
4c8ff709
CY
1124 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1125 .cluster_size = F2FS_I(inode)->i_cluster_size,
1126 .rpages = fsdata,
1127 };
1128 bool first_index = (index == cc.rpages[0]->index);
1129
1130 if (copied)
1131 set_cluster_dirty(&cc);
1132
1133 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
8bfbfb0d 1134 f2fs_destroy_compress_ctx(&cc, false);
4c8ff709
CY
1135
1136 return first_index;
1137}
1138
3265d3db
CY
1139int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1140{
1141 void *fsdata = NULL;
1142 struct page *pagep;
1143 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1144 pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1145 log_cluster_size;
1146 int err;
1147
1148 err = f2fs_is_compressed_cluster(inode, start_idx);
1149 if (err < 0)
1150 return err;
1151
1152 /* truncate normal cluster */
1153 if (!err)
1154 return f2fs_do_truncate_blocks(inode, from, lock);
1155
1156 /* truncate compressed cluster */
1157 err = f2fs_prepare_compress_overwrite(inode, &pagep,
1158 start_idx, &fsdata);
1159
1160 /* should not be a normal cluster */
1161 f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1162
1163 if (err <= 0)
1164 return err;
1165
1166 if (err > 0) {
1167 struct page **rpages = fsdata;
1168 int cluster_size = F2FS_I(inode)->i_cluster_size;
1169 int i;
1170
1171 for (i = cluster_size - 1; i >= 0; i--) {
1172 loff_t start = rpages[i]->index << PAGE_SHIFT;
1173
1174 if (from <= start) {
1175 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1176 } else {
1177 zero_user_segment(rpages[i], from - start,
1178 PAGE_SIZE);
1179 break;
1180 }
1181 }
1182
1183 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1184 }
1185 return 0;
1186}
1187
4c8ff709
CY
1188static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1189 int *submitted,
1190 struct writeback_control *wbc,
1191 enum iostat_type io_type)
1192{
1193 struct inode *inode = cc->inode;
1194 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1195 struct f2fs_inode_info *fi = F2FS_I(inode);
1196 struct f2fs_io_info fio = {
1197 .sbi = sbi,
1198 .ino = cc->inode->i_ino,
1199 .type = DATA,
1200 .op = REQ_OP_WRITE,
1201 .op_flags = wbc_to_write_flags(wbc),
1202 .old_blkaddr = NEW_ADDR,
1203 .page = NULL,
1204 .encrypted_page = NULL,
1205 .compressed_page = NULL,
1206 .submitted = false,
4c8ff709
CY
1207 .io_type = io_type,
1208 .io_wbc = wbc,
27aacd28 1209 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
4c8ff709
CY
1210 };
1211 struct dnode_of_data dn;
1212 struct node_info ni;
1213 struct compress_io_ctx *cic;
1214 pgoff_t start_idx = start_idx_of_cluster(cc);
1215 unsigned int last_index = cc->cluster_size - 1;
1216 loff_t psize;
1217 int i, err;
1218
79963d96
CY
1219 if (IS_NOQUOTA(inode)) {
1220 /*
1221 * We need to wait for node_write to avoid block allocation during
1222 * checkpoint. This can only happen to quota writes which can cause
1223 * the below discard race condition.
1224 */
1225 down_read(&sbi->node_write);
1226 } else if (!f2fs_trylock_op(sbi)) {
31083031 1227 goto out_free;
79963d96 1228 }
4c8ff709 1229
df77fbd8 1230 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
4c8ff709
CY
1231
1232 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1233 if (err)
1234 goto out_unlock_op;
1235
1236 for (i = 0; i < cc->cluster_size; i++) {
a2ced1ce 1237 if (data_blkaddr(dn.inode, dn.node_page,
4c8ff709
CY
1238 dn.ofs_in_node + i) == NULL_ADDR)
1239 goto out_put_dnode;
1240 }
1241
1242 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1243
1244 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1245 if (err)
1246 goto out_put_dnode;
1247
1248 fio.version = ni.version;
1249
c68d6c88 1250 cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
4c8ff709
CY
1251 if (!cic)
1252 goto out_put_dnode;
1253
1254 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1255 cic->inode = inode;
e6c3948d 1256 atomic_set(&cic->pending_pages, cc->nr_cpages);
31083031 1257 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
4c8ff709
CY
1258 if (!cic->rpages)
1259 goto out_put_cic;
1260
1261 cic->nr_rpages = cc->cluster_size;
1262
1263 for (i = 0; i < cc->nr_cpages; i++) {
1264 f2fs_set_compressed_page(cc->cpages[i], inode,
887347a0 1265 cc->rpages[i + 1]->index, cic);
4c8ff709 1266 fio.compressed_page = cc->cpages[i];
f567adb0
CY
1267
1268 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1269 dn.ofs_in_node + i + 1);
1270
1271 /* wait for GCed page writeback via META_MAPPING */
1272 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1273
4c8ff709
CY
1274 if (fio.encrypted) {
1275 fio.page = cc->rpages[i + 1];
1276 err = f2fs_encrypt_one_page(&fio);
1277 if (err)
1278 goto out_destroy_crypt;
1279 cc->cpages[i] = fio.encrypted_page;
1280 }
1281 }
1282
1283 set_cluster_writeback(cc);
1284
1285 for (i = 0; i < cc->cluster_size; i++)
1286 cic->rpages[i] = cc->rpages[i];
1287
1288 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1289 block_t blkaddr;
1290
a2ced1ce 1291 blkaddr = f2fs_data_blkaddr(&dn);
95978caa 1292 fio.page = cc->rpages[i];
4c8ff709
CY
1293 fio.old_blkaddr = blkaddr;
1294
1295 /* cluster header */
1296 if (i == 0) {
1297 if (blkaddr == COMPRESS_ADDR)
1298 fio.compr_blocks++;
1299 if (__is_valid_data_blkaddr(blkaddr))
1300 f2fs_invalidate_blocks(sbi, blkaddr);
1301 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1302 goto unlock_continue;
1303 }
1304
1305 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1306 fio.compr_blocks++;
1307
1308 if (i > cc->nr_cpages) {
1309 if (__is_valid_data_blkaddr(blkaddr)) {
1310 f2fs_invalidate_blocks(sbi, blkaddr);
1311 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1312 }
1313 goto unlock_continue;
1314 }
1315
1316 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1317
1318 if (fio.encrypted)
1319 fio.encrypted_page = cc->cpages[i - 1];
1320 else
1321 fio.compressed_page = cc->cpages[i - 1];
1322
1323 cc->cpages[i - 1] = NULL;
1324 f2fs_outplace_write_data(&dn, &fio);
1325 (*submitted)++;
1326unlock_continue:
1327 inode_dec_dirty_pages(cc->inode);
1328 unlock_page(fio.page);
1329 }
1330
1331 if (fio.compr_blocks)
1332 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1333 f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
5ac443e2 1334 add_compr_block_stat(inode, cc->nr_cpages);
4c8ff709
CY
1335
1336 set_inode_flag(cc->inode, FI_APPEND_WRITE);
1337 if (cc->cluster_idx == 0)
1338 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1339
1340 f2fs_put_dnode(&dn);
79963d96
CY
1341 if (IS_NOQUOTA(inode))
1342 up_read(&sbi->node_write);
1343 else
435cbab9 1344 f2fs_unlock_op(sbi);
4c8ff709 1345
c10c9820 1346 spin_lock(&fi->i_size_lock);
4c8ff709
CY
1347 if (fi->last_disk_size < psize)
1348 fi->last_disk_size = psize;
c10c9820 1349 spin_unlock(&fi->i_size_lock);
4c8ff709
CY
1350
1351 f2fs_put_rpages(cc);
31083031
CY
1352 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1353 cc->cpages = NULL;
8bfbfb0d 1354 f2fs_destroy_compress_ctx(cc, false);
4c8ff709
CY
1355 return 0;
1356
1357out_destroy_crypt:
31083031 1358 page_array_free(cc->inode, cic->rpages, cc->cluster_size);
4c8ff709
CY
1359
1360 for (--i; i >= 0; i--)
1361 fscrypt_finalize_bounce_page(&cc->cpages[i]);
1362 for (i = 0; i < cc->nr_cpages; i++) {
1363 if (!cc->cpages[i])
1364 continue;
a12cc5b4
CY
1365 f2fs_compress_free_page(cc->cpages[i]);
1366 cc->cpages[i] = NULL;
4c8ff709
CY
1367 }
1368out_put_cic:
c68d6c88 1369 kmem_cache_free(cic_entry_slab, cic);
4c8ff709
CY
1370out_put_dnode:
1371 f2fs_put_dnode(&dn);
1372out_unlock_op:
79963d96
CY
1373 if (IS_NOQUOTA(inode))
1374 up_read(&sbi->node_write);
1375 else
435cbab9 1376 f2fs_unlock_op(sbi);
31083031
CY
1377out_free:
1378 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1379 cc->cpages = NULL;
4c8ff709
CY
1380 return -EAGAIN;
1381}
1382
1383void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1384{
1385 struct f2fs_sb_info *sbi = bio->bi_private;
1386 struct compress_io_ctx *cic =
1387 (struct compress_io_ctx *)page_private(page);
1388 int i;
1389
1390 if (unlikely(bio->bi_status))
1391 mapping_set_error(cic->inode->i_mapping, -EIO);
1392
5e6bbde9 1393 f2fs_compress_free_page(page);
4c8ff709
CY
1394
1395 dec_page_count(sbi, F2FS_WB_DATA);
1396
e6c3948d 1397 if (atomic_dec_return(&cic->pending_pages))
4c8ff709
CY
1398 return;
1399
1400 for (i = 0; i < cic->nr_rpages; i++) {
1401 WARN_ON(!cic->rpages[i]);
1402 clear_cold_data(cic->rpages[i]);
1403 end_page_writeback(cic->rpages[i]);
1404 }
1405
31083031 1406 page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
c68d6c88 1407 kmem_cache_free(cic_entry_slab, cic);
4c8ff709
CY
1408}
1409
1410static int f2fs_write_raw_pages(struct compress_ctx *cc,
1411 int *submitted,
1412 struct writeback_control *wbc,
1413 enum iostat_type io_type)
1414{
1415 struct address_space *mapping = cc->inode->i_mapping;
1416 int _submitted, compr_blocks, ret;
1417 int i = -1, err = 0;
1418
1419 compr_blocks = f2fs_compressed_blocks(cc);
1420 if (compr_blocks < 0) {
1421 err = compr_blocks;
1422 goto out_err;
1423 }
1424
1425 for (i = 0; i < cc->cluster_size; i++) {
1426 if (!cc->rpages[i])
1427 continue;
1428retry_write:
1429 if (cc->rpages[i]->mapping != mapping) {
1430 unlock_page(cc->rpages[i]);
1431 continue;
1432 }
1433
1434 BUG_ON(!PageLocked(cc->rpages[i]));
1435
1436 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1437 NULL, NULL, wbc, io_type,
3afae09f 1438 compr_blocks, false);
4c8ff709
CY
1439 if (ret) {
1440 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1441 unlock_page(cc->rpages[i]);
1442 ret = 0;
1443 } else if (ret == -EAGAIN) {
466357dc
CY
1444 /*
1445 * for quota file, just redirty left pages to
1446 * avoid deadlock caused by cluster update race
1447 * from foreground operation.
1448 */
1449 if (IS_NOQUOTA(cc->inode)) {
1450 err = 0;
1451 goto out_err;
1452 }
4c8ff709
CY
1453 ret = 0;
1454 cond_resched();
5df7731f
CY
1455 congestion_wait(BLK_RW_ASYNC,
1456 DEFAULT_IO_TIMEOUT);
4c8ff709 1457 lock_page(cc->rpages[i]);
eb1353cf
CY
1458
1459 if (!PageDirty(cc->rpages[i])) {
1460 unlock_page(cc->rpages[i]);
1461 continue;
1462 }
1463
4c8ff709
CY
1464 clear_page_dirty_for_io(cc->rpages[i]);
1465 goto retry_write;
1466 }
1467 err = ret;
466357dc 1468 goto out_err;
4c8ff709
CY
1469 }
1470
1471 *submitted += _submitted;
1472 }
3afae09f
CY
1473
1474 f2fs_balance_fs(F2FS_M_SB(mapping), true);
1475
4c8ff709 1476 return 0;
4c8ff709
CY
1477out_err:
1478 for (++i; i < cc->cluster_size; i++) {
1479 if (!cc->rpages[i])
1480 continue;
1481 redirty_page_for_writepage(wbc, cc->rpages[i]);
1482 unlock_page(cc->rpages[i]);
1483 }
1484 return err;
1485}
1486
1487int f2fs_write_multi_pages(struct compress_ctx *cc,
1488 int *submitted,
1489 struct writeback_control *wbc,
1490 enum iostat_type io_type)
1491{
4c8ff709
CY
1492 int err;
1493
1494 *submitted = 0;
1495 if (cluster_may_compress(cc)) {
1496 err = f2fs_compress_pages(cc);
1497 if (err == -EAGAIN) {
1498 goto write;
1499 } else if (err) {
1500 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1501 goto destroy_out;
1502 }
1503
1504 err = f2fs_write_compressed_pages(cc, submitted,
1505 wbc, io_type);
4c8ff709
CY
1506 if (!err)
1507 return 0;
1508 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1509 }
1510write:
1511 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1512
1513 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1514 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1515destroy_out:
8bfbfb0d 1516 f2fs_destroy_compress_ctx(cc, false);
4c8ff709
CY
1517 return err;
1518}
1519
7f59b277
EB
1520static void f2fs_free_dic(struct decompress_io_ctx *dic);
1521
4c8ff709
CY
1522struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1523{
4c8ff709
CY
1524 struct decompress_io_ctx *dic;
1525 pgoff_t start_idx = start_idx_of_cluster(cc);
1526 int i;
1527
c68d6c88 1528 dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
4c8ff709
CY
1529 if (!dic)
1530 return ERR_PTR(-ENOMEM);
1531
31083031 1532 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
4c8ff709 1533 if (!dic->rpages) {
c68d6c88 1534 kmem_cache_free(dic_entry_slab, dic);
4c8ff709
CY
1535 return ERR_PTR(-ENOMEM);
1536 }
1537
1538 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1539 dic->inode = cc->inode;
7f59b277 1540 atomic_set(&dic->remaining_pages, cc->nr_cpages);
4c8ff709
CY
1541 dic->cluster_idx = cc->cluster_idx;
1542 dic->cluster_size = cc->cluster_size;
1543 dic->log_cluster_size = cc->log_cluster_size;
1544 dic->nr_cpages = cc->nr_cpages;
7f59b277 1545 refcount_set(&dic->refcnt, 1);
4c8ff709 1546 dic->failed = false;
7f59b277 1547 dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
4c8ff709
CY
1548
1549 for (i = 0; i < dic->cluster_size; i++)
1550 dic->rpages[i] = cc->rpages[i];
1551 dic->nr_rpages = cc->cluster_size;
1552
31083031 1553 dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
4c8ff709
CY
1554 if (!dic->cpages)
1555 goto out_free;
1556
1557 for (i = 0; i < dic->nr_cpages; i++) {
1558 struct page *page;
1559
5e6bbde9 1560 page = f2fs_compress_alloc_page();
4c8ff709
CY
1561 if (!page)
1562 goto out_free;
1563
1564 f2fs_set_compressed_page(page, cc->inode,
887347a0 1565 start_idx + i + 1, dic);
4c8ff709
CY
1566 dic->cpages[i] = page;
1567 }
1568
4c8ff709
CY
1569 return dic;
1570
1571out_free:
1572 f2fs_free_dic(dic);
1573 return ERR_PTR(-ENOMEM);
1574}
1575
7f59b277 1576static void f2fs_free_dic(struct decompress_io_ctx *dic)
4c8ff709
CY
1577{
1578 int i;
1579
1580 if (dic->tpages) {
1581 for (i = 0; i < dic->cluster_size; i++) {
1582 if (dic->rpages[i])
1583 continue;
8908e753
CY
1584 if (!dic->tpages[i])
1585 continue;
5e6bbde9 1586 f2fs_compress_free_page(dic->tpages[i]);
4c8ff709 1587 }
31083031 1588 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
4c8ff709
CY
1589 }
1590
1591 if (dic->cpages) {
1592 for (i = 0; i < dic->nr_cpages; i++) {
1593 if (!dic->cpages[i])
1594 continue;
5e6bbde9 1595 f2fs_compress_free_page(dic->cpages[i]);
4c8ff709 1596 }
31083031 1597 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
4c8ff709
CY
1598 }
1599
31083031 1600 page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
c68d6c88 1601 kmem_cache_free(dic_entry_slab, dic);
4c8ff709
CY
1602}
1603
7f59b277
EB
1604static void f2fs_put_dic(struct decompress_io_ctx *dic)
1605{
1606 if (refcount_dec_and_test(&dic->refcnt))
1607 f2fs_free_dic(dic);
1608}
1609
1610/*
1611 * Update and unlock the cluster's pagecache pages, and release the reference to
1612 * the decompress_io_ctx that was being held for I/O completion.
1613 */
1614static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
4c8ff709
CY
1615{
1616 int i;
1617
7f59b277
EB
1618 for (i = 0; i < dic->cluster_size; i++) {
1619 struct page *rpage = dic->rpages[i];
4c8ff709
CY
1620
1621 if (!rpage)
1622 continue;
1623
7f59b277
EB
1624 /* PG_error was set if verity failed. */
1625 if (failed || PageError(rpage)) {
1626 ClearPageUptodate(rpage);
1627 /* will re-read again later */
1628 ClearPageError(rpage);
1629 } else {
23c51bed 1630 SetPageUptodate(rpage);
4c8ff709
CY
1631 }
1632 unlock_page(rpage);
1633 }
7f59b277
EB
1634
1635 f2fs_put_dic(dic);
1636}
1637
1638static void f2fs_verify_cluster(struct work_struct *work)
1639{
1640 struct decompress_io_ctx *dic =
1641 container_of(work, struct decompress_io_ctx, verity_work);
1642 int i;
1643
1644 /* Verify the cluster's decompressed pages with fs-verity. */
1645 for (i = 0; i < dic->cluster_size; i++) {
1646 struct page *rpage = dic->rpages[i];
1647
1648 if (rpage && !fsverity_verify_page(rpage))
1649 SetPageError(rpage);
1650 }
1651
1652 __f2fs_decompress_end_io(dic, false);
1653}
1654
1655/*
1656 * This is called when a compressed cluster has been decompressed
1657 * (or failed to be read and/or decompressed).
1658 */
1659void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1660{
1661 if (!failed && dic->need_verity) {
1662 /*
1663 * Note that to avoid deadlocks, the verity work can't be done
1664 * on the decompression workqueue. This is because verifying
1665 * the data pages can involve reading metadata pages from the
1666 * file, and these metadata pages may be compressed.
1667 */
1668 INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1669 fsverity_enqueue_verify_work(&dic->verity_work);
1670 } else {
1671 __f2fs_decompress_end_io(dic, failed);
1672 }
1673}
1674
1675/*
1676 * Put a reference to a compressed page's decompress_io_ctx.
1677 *
1678 * This is called when the page is no longer needed and can be freed.
1679 */
1680void f2fs_put_page_dic(struct page *page)
1681{
1682 struct decompress_io_ctx *dic =
1683 (struct decompress_io_ctx *)page_private(page);
1684
1685 f2fs_put_dic(dic);
4c8ff709 1686}
31083031
CY
1687
1688int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1689{
1690 dev_t dev = sbi->sb->s_bdev->bd_dev;
1691 char slab_name[32];
1692
1693 sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1694
1695 sbi->page_array_slab_size = sizeof(struct page *) <<
1696 F2FS_OPTION(sbi).compress_log_size;
1697
1698 sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1699 sbi->page_array_slab_size);
1700 if (!sbi->page_array_slab)
1701 return -ENOMEM;
1702 return 0;
1703}
1704
1705void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1706{
1707 kmem_cache_destroy(sbi->page_array_slab);
1708}
c68d6c88
CY
1709
1710static int __init f2fs_init_cic_cache(void)
1711{
1712 cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1713 sizeof(struct compress_io_ctx));
1714 if (!cic_entry_slab)
1715 return -ENOMEM;
1716 return 0;
1717}
1718
1719static void f2fs_destroy_cic_cache(void)
1720{
1721 kmem_cache_destroy(cic_entry_slab);
1722}
1723
1724static int __init f2fs_init_dic_cache(void)
1725{
1726 dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1727 sizeof(struct decompress_io_ctx));
1728 if (!dic_entry_slab)
1729 return -ENOMEM;
1730 return 0;
1731}
1732
1733static void f2fs_destroy_dic_cache(void)
1734{
1735 kmem_cache_destroy(dic_entry_slab);
1736}
1737
1738int __init f2fs_init_compress_cache(void)
1739{
1740 int err;
1741
1742 err = f2fs_init_cic_cache();
1743 if (err)
1744 goto out;
1745 err = f2fs_init_dic_cache();
1746 if (err)
1747 goto free_cic;
1748 return 0;
1749free_cic:
1750 f2fs_destroy_cic_cache();
1751out:
1752 return -ENOMEM;
1753}
1754
1755void f2fs_destroy_compress_cache(void)
1756{
1757 f2fs_destroy_dic_cache();
1758 f2fs_destroy_cic_cache();
1759}