lockd: Update the NLMv4 nlm_res arguments decoder to use struct xdr_stream
[linux-block.git] / fs / btrfs / compression.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
c8b97818
CM
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
c8b97818
CM
4 */
5
6#include <linux/kernel.h>
7#include <linux/bio.h>
c8b97818
CM
8#include <linux/file.h>
9#include <linux/fs.h>
10#include <linux/pagemap.h>
11#include <linux/highmem.h>
12#include <linux/time.h>
13#include <linux/init.h>
14#include <linux/string.h>
c8b97818 15#include <linux/backing-dev.h>
c8b97818 16#include <linux/writeback.h>
5a0e3ad6 17#include <linux/slab.h>
fe308533 18#include <linux/sched/mm.h>
19562430 19#include <linux/log2.h>
d5178578 20#include <crypto/hash.h>
602cbe91 21#include "misc.h"
c8b97818
CM
22#include "ctree.h"
23#include "disk-io.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
26#include "volumes.h"
27#include "ordered-data.h"
c8b97818
CM
28#include "compression.h"
29#include "extent_io.h"
30#include "extent_map.h"
31
e128f9c3
DS
32static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
33
34const char* btrfs_compress_type2str(enum btrfs_compression_type type)
35{
36 switch (type) {
37 case BTRFS_COMPRESS_ZLIB:
38 case BTRFS_COMPRESS_LZO:
39 case BTRFS_COMPRESS_ZSTD:
40 case BTRFS_COMPRESS_NONE:
41 return btrfs_compress_types[type];
ce96b7ff
CX
42 default:
43 break;
e128f9c3
DS
44 }
45
46 return NULL;
47}
48
aa53e3bf
JT
49bool btrfs_compress_is_valid_type(const char *str, size_t len)
50{
51 int i;
52
53 for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
54 size_t comp_len = strlen(btrfs_compress_types[i]);
55
56 if (len < comp_len)
57 continue;
58
59 if (!strncmp(btrfs_compress_types[i], str, comp_len))
60 return true;
61 }
62 return false;
63}
64
1e4eb746
DS
65static int compression_compress_pages(int type, struct list_head *ws,
66 struct address_space *mapping, u64 start, struct page **pages,
67 unsigned long *out_pages, unsigned long *total_in,
68 unsigned long *total_out)
69{
70 switch (type) {
71 case BTRFS_COMPRESS_ZLIB:
72 return zlib_compress_pages(ws, mapping, start, pages,
73 out_pages, total_in, total_out);
74 case BTRFS_COMPRESS_LZO:
75 return lzo_compress_pages(ws, mapping, start, pages,
76 out_pages, total_in, total_out);
77 case BTRFS_COMPRESS_ZSTD:
78 return zstd_compress_pages(ws, mapping, start, pages,
79 out_pages, total_in, total_out);
80 case BTRFS_COMPRESS_NONE:
81 default:
82 /*
1d8ba9e7
QW
83 * This can happen when compression races with remount setting
84 * it to 'no compress', while caller doesn't call
85 * inode_need_compress() to check if we really need to
86 * compress.
87 *
88 * Not a big deal, just need to inform caller that we
89 * haven't allocated any pages yet.
1e4eb746 90 */
1d8ba9e7 91 *out_pages = 0;
1e4eb746
DS
92 return -E2BIG;
93 }
94}
95
96static int compression_decompress_bio(int type, struct list_head *ws,
97 struct compressed_bio *cb)
98{
99 switch (type) {
100 case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
101 case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
102 case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
103 case BTRFS_COMPRESS_NONE:
104 default:
105 /*
106 * This can't happen, the type is validated several times
107 * before we get here.
108 */
109 BUG();
110 }
111}
112
113static int compression_decompress(int type, struct list_head *ws,
114 unsigned char *data_in, struct page *dest_page,
115 unsigned long start_byte, size_t srclen, size_t destlen)
116{
117 switch (type) {
118 case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
119 start_byte, srclen, destlen);
120 case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
121 start_byte, srclen, destlen);
122 case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
123 start_byte, srclen, destlen);
124 case BTRFS_COMPRESS_NONE:
125 default:
126 /*
127 * This can't happen, the type is validated several times
128 * before we get here.
129 */
130 BUG();
131 }
132}
133
8140dc30 134static int btrfs_decompress_bio(struct compressed_bio *cb);
48a3b636 135
2ff7e61e 136static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
d20f7043
CM
137 unsigned long disk_size)
138{
d20f7043 139 return sizeof(struct compressed_bio) +
713cebfb 140 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
d20f7043
CM
141}
142
5a9472fe 143static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
d20f7043
CM
144 u64 disk_start)
145{
10fe6ca8 146 struct btrfs_fs_info *fs_info = inode->root->fs_info;
d5178578 147 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
223486c2 148 const u32 csum_size = fs_info->csum_size;
04d4ba4c 149 const u32 sectorsize = fs_info->sectorsize;
d20f7043
CM
150 struct page *page;
151 unsigned long i;
152 char *kaddr;
d5178578 153 u8 csum[BTRFS_CSUM_SIZE];
5a9472fe 154 struct compressed_bio *cb = bio->bi_private;
10fe6ca8 155 u8 *cb_sum = cb->sums;
d20f7043 156
42437a63 157 if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM))
d20f7043
CM
158 return 0;
159
d5178578
JT
160 shash->tfm = fs_info->csum_shash;
161
d20f7043 162 for (i = 0; i < cb->nr_pages; i++) {
04d4ba4c
QW
163 u32 pg_offset;
164 u32 bytes_left = PAGE_SIZE;
d20f7043 165 page = cb->compressed_pages[i];
d20f7043 166
04d4ba4c
QW
167 /* Determine the remaining bytes inside the page first */
168 if (i == cb->nr_pages - 1)
169 bytes_left = cb->compressed_len - i * PAGE_SIZE;
170
171 /* Hash through the page sector by sector */
172 for (pg_offset = 0; pg_offset < bytes_left;
173 pg_offset += sectorsize) {
174 kaddr = kmap_atomic(page);
175 crypto_shash_digest(shash, kaddr + pg_offset,
176 sectorsize, csum);
177 kunmap_atomic(kaddr);
178
179 if (memcmp(&csum, cb_sum, csum_size) != 0) {
180 btrfs_print_data_csum_error(inode, disk_start,
181 csum, cb_sum, cb->mirror_num);
182 if (btrfs_io_bio(bio)->device)
183 btrfs_dev_stat_inc_and_print(
184 btrfs_io_bio(bio)->device,
185 BTRFS_DEV_STAT_CORRUPTION_ERRS);
186 return -EIO;
187 }
188 cb_sum += csum_size;
189 disk_start += sectorsize;
d20f7043 190 }
d20f7043 191 }
93c4c033 192 return 0;
d20f7043
CM
193}
194
c8b97818
CM
195/* when we finish reading compressed pages from the disk, we
196 * decompress them and then run the bio end_io routines on the
197 * decompressed pages (in the inode address space).
198 *
199 * This allows the checksumming and other IO error handling routines
200 * to work normally
201 *
202 * The compressed pages are freed here, and it must be run
203 * in process context
204 */
4246a0b6 205static void end_compressed_bio_read(struct bio *bio)
c8b97818 206{
c8b97818
CM
207 struct compressed_bio *cb = bio->bi_private;
208 struct inode *inode;
209 struct page *page;
210 unsigned long index;
cf1167d5 211 unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
e6311f24 212 int ret = 0;
c8b97818 213
4e4cbee9 214 if (bio->bi_status)
c8b97818
CM
215 cb->errors = 1;
216
217 /* if there are more bios still pending for this compressed
218 * extent, just exit
219 */
a50299ae 220 if (!refcount_dec_and_test(&cb->pending_bios))
c8b97818
CM
221 goto out;
222
cf1167d5
LB
223 /*
224 * Record the correct mirror_num in cb->orig_bio so that
225 * read-repair can work properly.
226 */
cf1167d5
LB
227 btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
228 cb->mirror_num = mirror;
229
e6311f24
LB
230 /*
231 * Some IO in this cb have failed, just skip checksum as there
232 * is no way it could be correct.
233 */
234 if (cb->errors == 1)
235 goto csum_failed;
236
d20f7043 237 inode = cb->inode;
5a9472fe 238 ret = check_compressed_csum(BTRFS_I(inode), bio,
1201b58b 239 bio->bi_iter.bi_sector << 9);
d20f7043
CM
240 if (ret)
241 goto csum_failed;
242
c8b97818
CM
243 /* ok, we're the last bio for this extent, lets start
244 * the decompression.
245 */
8140dc30
AJ
246 ret = btrfs_decompress_bio(cb);
247
d20f7043 248csum_failed:
c8b97818
CM
249 if (ret)
250 cb->errors = 1;
251
252 /* release the compressed pages */
253 index = 0;
254 for (index = 0; index < cb->nr_pages; index++) {
255 page = cb->compressed_pages[index];
256 page->mapping = NULL;
09cbfeaf 257 put_page(page);
c8b97818
CM
258 }
259
260 /* do io completion on the original bio */
771ed689 261 if (cb->errors) {
c8b97818 262 bio_io_error(cb->orig_bio);
d20f7043 263 } else {
2c30c71b 264 struct bio_vec *bvec;
6dc4f100 265 struct bvec_iter_all iter_all;
d20f7043
CM
266
267 /*
268 * we have verified the checksum already, set page
269 * checked so the end_io handlers know about it
270 */
c09abff8 271 ASSERT(!bio_flagged(bio, BIO_CLONED));
2b070cfe 272 bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
d20f7043 273 SetPageChecked(bvec->bv_page);
2c30c71b 274
4246a0b6 275 bio_endio(cb->orig_bio);
d20f7043 276 }
c8b97818
CM
277
278 /* finally free the cb struct */
279 kfree(cb->compressed_pages);
280 kfree(cb);
281out:
282 bio_put(bio);
283}
284
285/*
286 * Clear the writeback bits on all of the file
287 * pages for a compressed write
288 */
7bdcefc1
FM
289static noinline void end_compressed_writeback(struct inode *inode,
290 const struct compressed_bio *cb)
c8b97818 291{
09cbfeaf
KS
292 unsigned long index = cb->start >> PAGE_SHIFT;
293 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
c8b97818
CM
294 struct page *pages[16];
295 unsigned long nr_pages = end_index - index + 1;
296 int i;
297 int ret;
298
7bdcefc1
FM
299 if (cb->errors)
300 mapping_set_error(inode->i_mapping, -EIO);
301
d397712b 302 while (nr_pages > 0) {
c8b97818 303 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
304 min_t(unsigned long,
305 nr_pages, ARRAY_SIZE(pages)), pages);
c8b97818
CM
306 if (ret == 0) {
307 nr_pages -= 1;
308 index += 1;
309 continue;
310 }
311 for (i = 0; i < ret; i++) {
7bdcefc1
FM
312 if (cb->errors)
313 SetPageError(pages[i]);
c8b97818 314 end_page_writeback(pages[i]);
09cbfeaf 315 put_page(pages[i]);
c8b97818
CM
316 }
317 nr_pages -= ret;
318 index += ret;
319 }
320 /* the inode may be gone now */
c8b97818
CM
321}
322
323/*
324 * do the cleanup once all the compressed pages hit the disk.
325 * This will clear writeback on the file pages and free the compressed
326 * pages.
327 *
328 * This also calls the writeback end hooks for the file pages so that
329 * metadata and checksums can be updated in the file.
330 */
4246a0b6 331static void end_compressed_bio_write(struct bio *bio)
c8b97818 332{
c8b97818
CM
333 struct compressed_bio *cb = bio->bi_private;
334 struct inode *inode;
335 struct page *page;
336 unsigned long index;
337
4e4cbee9 338 if (bio->bi_status)
c8b97818
CM
339 cb->errors = 1;
340
341 /* if there are more bios still pending for this compressed
342 * extent, just exit
343 */
a50299ae 344 if (!refcount_dec_and_test(&cb->pending_bios))
c8b97818
CM
345 goto out;
346
347 /* ok, we're the last bio for this extent, step one is to
348 * call back into the FS and do all the end_io operations
349 */
350 inode = cb->inode;
70b99e69 351 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
7087a9d8 352 btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
c629732d 353 cb->start, cb->start + cb->len - 1,
6a8d2136 354 bio->bi_status == BLK_STS_OK);
70b99e69 355 cb->compressed_pages[0]->mapping = NULL;
c8b97818 356
7bdcefc1 357 end_compressed_writeback(inode, cb);
c8b97818
CM
358 /* note, our inode could be gone now */
359
360 /*
361 * release the compressed pages, these came from alloc_page and
362 * are not attached to the inode at all
363 */
364 index = 0;
365 for (index = 0; index < cb->nr_pages; index++) {
366 page = cb->compressed_pages[index];
367 page->mapping = NULL;
09cbfeaf 368 put_page(page);
c8b97818
CM
369 }
370
371 /* finally free the cb struct */
372 kfree(cb->compressed_pages);
373 kfree(cb);
374out:
375 bio_put(bio);
376}
377
378/*
379 * worker function to build and submit bios for previously compressed pages.
380 * The corresponding pages in the inode should be marked for writeback
381 * and the compressed pages should have a reference on them for dropping
382 * when the IO is complete.
383 *
384 * This also checksums the file bytes and gets things ready for
385 * the end io hooks.
386 */
c7ee1819 387blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
c8b97818
CM
388 unsigned long len, u64 disk_start,
389 unsigned long compressed_len,
390 struct page **compressed_pages,
f82b7359 391 unsigned long nr_pages,
ec39f769
CM
392 unsigned int write_flags,
393 struct cgroup_subsys_state *blkcg_css)
c8b97818 394{
c7ee1819 395 struct btrfs_fs_info *fs_info = inode->root->fs_info;
c8b97818 396 struct bio *bio = NULL;
c8b97818
CM
397 struct compressed_bio *cb;
398 unsigned long bytes_left;
306e16ce 399 int pg_index = 0;
c8b97818
CM
400 struct page *page;
401 u64 first_byte = disk_start;
4e4cbee9 402 blk_status_t ret;
c7ee1819 403 int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
c8b97818 404
fdb1e121 405 WARN_ON(!PAGE_ALIGNED(start));
2ff7e61e 406 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
dac97e51 407 if (!cb)
4e4cbee9 408 return BLK_STS_RESOURCE;
a50299ae 409 refcount_set(&cb->pending_bios, 0);
c8b97818 410 cb->errors = 0;
c7ee1819 411 cb->inode = &inode->vfs_inode;
c8b97818
CM
412 cb->start = start;
413 cb->len = len;
d20f7043 414 cb->mirror_num = 0;
c8b97818
CM
415 cb->compressed_pages = compressed_pages;
416 cb->compressed_len = compressed_len;
417 cb->orig_bio = NULL;
418 cb->nr_pages = nr_pages;
419
e749af44 420 bio = btrfs_bio_alloc(first_byte);
f82b7359 421 bio->bi_opf = REQ_OP_WRITE | write_flags;
c8b97818
CM
422 bio->bi_private = cb;
423 bio->bi_end_io = end_compressed_bio_write;
ec39f769
CM
424
425 if (blkcg_css) {
426 bio->bi_opf |= REQ_CGROUP_PUNT;
46bcff2b 427 kthread_associate_blkcg(blkcg_css);
ec39f769 428 }
a50299ae 429 refcount_set(&cb->pending_bios, 1);
c8b97818
CM
430
431 /* create and submit bios for the compressed pages */
432 bytes_left = compressed_len;
306e16ce 433 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
4e4cbee9
CH
434 int submit = 0;
435
306e16ce 436 page = compressed_pages[pg_index];
c7ee1819 437 page->mapping = inode->vfs_inode.i_mapping;
4f024f37 438 if (bio->bi_iter.bi_size)
da12fe54
NB
439 submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
440 0);
c8b97818 441
70b99e69 442 page->mapping = NULL;
4e4cbee9 443 if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
09cbfeaf 444 PAGE_SIZE) {
af09abfe
CM
445 /*
446 * inc the count before we submit the bio so
447 * we know the end IO handler won't happen before
448 * we inc the count. Otherwise, the cb might get
449 * freed before we're done setting it up
450 */
a50299ae 451 refcount_inc(&cb->pending_bios);
0b246afa
JM
452 ret = btrfs_bio_wq_end_io(fs_info, bio,
453 BTRFS_WQ_ENDIO_DATA);
79787eaa 454 BUG_ON(ret); /* -ENOMEM */
c8b97818 455
e55179b3 456 if (!skip_sum) {
c7ee1819 457 ret = btrfs_csum_one_bio(inode, bio, start, 1);
79787eaa 458 BUG_ON(ret); /* -ENOMEM */
e55179b3 459 }
d20f7043 460
08635bae 461 ret = btrfs_map_bio(fs_info, bio, 0);
f5daf2c7 462 if (ret) {
4e4cbee9 463 bio->bi_status = ret;
f5daf2c7
LB
464 bio_endio(bio);
465 }
c8b97818 466
e749af44 467 bio = btrfs_bio_alloc(first_byte);
f82b7359 468 bio->bi_opf = REQ_OP_WRITE | write_flags;
c8b97818
CM
469 bio->bi_private = cb;
470 bio->bi_end_io = end_compressed_bio_write;
46bcff2b 471 if (blkcg_css)
7b62e66c 472 bio->bi_opf |= REQ_CGROUP_PUNT;
09cbfeaf 473 bio_add_page(bio, page, PAGE_SIZE, 0);
c8b97818 474 }
09cbfeaf 475 if (bytes_left < PAGE_SIZE) {
0b246afa 476 btrfs_info(fs_info,
efe120a0 477 "bytes left %lu compress len %lu nr %lu",
cfbc246e
CM
478 bytes_left, cb->compressed_len, cb->nr_pages);
479 }
09cbfeaf
KS
480 bytes_left -= PAGE_SIZE;
481 first_byte += PAGE_SIZE;
771ed689 482 cond_resched();
c8b97818 483 }
c8b97818 484
0b246afa 485 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
79787eaa 486 BUG_ON(ret); /* -ENOMEM */
c8b97818 487
e55179b3 488 if (!skip_sum) {
c7ee1819 489 ret = btrfs_csum_one_bio(inode, bio, start, 1);
79787eaa 490 BUG_ON(ret); /* -ENOMEM */
e55179b3 491 }
d20f7043 492
08635bae 493 ret = btrfs_map_bio(fs_info, bio, 0);
f5daf2c7 494 if (ret) {
4e4cbee9 495 bio->bi_status = ret;
f5daf2c7
LB
496 bio_endio(bio);
497 }
c8b97818 498
46bcff2b
DZ
499 if (blkcg_css)
500 kthread_associate_blkcg(NULL);
501
c8b97818
CM
502 return 0;
503}
504
2a4d0c90
CH
505static u64 bio_end_offset(struct bio *bio)
506{
c45a8f2d 507 struct bio_vec *last = bio_last_bvec_all(bio);
2a4d0c90
CH
508
509 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
510}
511
771ed689
CM
512static noinline int add_ra_bio_pages(struct inode *inode,
513 u64 compressed_end,
514 struct compressed_bio *cb)
515{
516 unsigned long end_index;
306e16ce 517 unsigned long pg_index;
771ed689
CM
518 u64 last_offset;
519 u64 isize = i_size_read(inode);
520 int ret;
521 struct page *page;
522 unsigned long nr_pages = 0;
523 struct extent_map *em;
524 struct address_space *mapping = inode->i_mapping;
771ed689
CM
525 struct extent_map_tree *em_tree;
526 struct extent_io_tree *tree;
527 u64 end;
528 int misses = 0;
529
2a4d0c90 530 last_offset = bio_end_offset(cb->orig_bio);
771ed689
CM
531 em_tree = &BTRFS_I(inode)->extent_tree;
532 tree = &BTRFS_I(inode)->io_tree;
533
534 if (isize == 0)
535 return 0;
536
09cbfeaf 537 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
771ed689 538
d397712b 539 while (last_offset < compressed_end) {
09cbfeaf 540 pg_index = last_offset >> PAGE_SHIFT;
771ed689 541
306e16ce 542 if (pg_index > end_index)
771ed689
CM
543 break;
544
0a943c65 545 page = xa_load(&mapping->i_pages, pg_index);
3159f943 546 if (page && !xa_is_value(page)) {
771ed689
CM
547 misses++;
548 if (misses > 4)
549 break;
550 goto next;
551 }
552
c62d2555
MH
553 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
554 ~__GFP_FS));
771ed689
CM
555 if (!page)
556 break;
557
c62d2555 558 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
09cbfeaf 559 put_page(page);
771ed689
CM
560 goto next;
561 }
562
771ed689
CM
563 /*
564 * at this point, we have a locked page in the page cache
565 * for these bytes in the file. But, we have to make
566 * sure they map to this compressed extent on disk.
567 */
32443de3
QW
568 ret = set_page_extent_mapped(page);
569 if (ret < 0) {
570 unlock_page(page);
571 put_page(page);
572 break;
573 }
574
575 end = last_offset + PAGE_SIZE - 1;
d0082371 576 lock_extent(tree, last_offset, end);
890871be 577 read_lock(&em_tree->lock);
771ed689 578 em = lookup_extent_mapping(em_tree, last_offset,
09cbfeaf 579 PAGE_SIZE);
890871be 580 read_unlock(&em_tree->lock);
771ed689
CM
581
582 if (!em || last_offset < em->start ||
09cbfeaf 583 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
4f024f37 584 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
771ed689 585 free_extent_map(em);
d0082371 586 unlock_extent(tree, last_offset, end);
771ed689 587 unlock_page(page);
09cbfeaf 588 put_page(page);
771ed689
CM
589 break;
590 }
591 free_extent_map(em);
592
593 if (page->index == end_index) {
7073017a 594 size_t zero_offset = offset_in_page(isize);
771ed689
CM
595
596 if (zero_offset) {
597 int zeros;
09cbfeaf 598 zeros = PAGE_SIZE - zero_offset;
d048b9c2 599 memzero_page(page, zero_offset, zeros);
771ed689 600 flush_dcache_page(page);
771ed689
CM
601 }
602 }
603
604 ret = bio_add_page(cb->orig_bio, page,
09cbfeaf 605 PAGE_SIZE, 0);
771ed689 606
09cbfeaf 607 if (ret == PAGE_SIZE) {
771ed689 608 nr_pages++;
09cbfeaf 609 put_page(page);
771ed689 610 } else {
d0082371 611 unlock_extent(tree, last_offset, end);
771ed689 612 unlock_page(page);
09cbfeaf 613 put_page(page);
771ed689
CM
614 break;
615 }
616next:
09cbfeaf 617 last_offset += PAGE_SIZE;
771ed689 618 }
771ed689
CM
619 return 0;
620}
621
c8b97818
CM
622/*
623 * for a compressed read, the bio we get passed has all the inode pages
624 * in it. We don't actually do IO on those pages but allocate new ones
625 * to hold the compressed pages on disk.
626 *
4f024f37 627 * bio->bi_iter.bi_sector points to the compressed extent on disk
c8b97818 628 * bio->bi_io_vec points to all of the inode pages
c8b97818
CM
629 *
630 * After the compressed pages are read, we copy the bytes into the
631 * bio we were passed and then call the bio end_io calls
632 */
4e4cbee9 633blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
c8b97818
CM
634 int mirror_num, unsigned long bio_flags)
635{
0b246afa 636 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
c8b97818
CM
637 struct extent_map_tree *em_tree;
638 struct compressed_bio *cb;
c8b97818
CM
639 unsigned long compressed_len;
640 unsigned long nr_pages;
306e16ce 641 unsigned long pg_index;
c8b97818 642 struct page *page;
c8b97818 643 struct bio *comp_bio;
1201b58b 644 u64 cur_disk_byte = bio->bi_iter.bi_sector << 9;
e04ca626
CM
645 u64 em_len;
646 u64 em_start;
c8b97818 647 struct extent_map *em;
4e4cbee9 648 blk_status_t ret = BLK_STS_RESOURCE;
15e3004a 649 int faili = 0;
10fe6ca8 650 u8 *sums;
c8b97818 651
c8b97818
CM
652 em_tree = &BTRFS_I(inode)->extent_tree;
653
654 /* we need the actual starting offset of this extent in the file */
890871be 655 read_lock(&em_tree->lock);
c8b97818 656 em = lookup_extent_mapping(em_tree,
263663cd 657 page_offset(bio_first_page_all(bio)),
be6a1361 658 fs_info->sectorsize);
890871be 659 read_unlock(&em_tree->lock);
285190d9 660 if (!em)
4e4cbee9 661 return BLK_STS_IOERR;
c8b97818 662
d20f7043 663 compressed_len = em->block_len;
2ff7e61e 664 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
6b82ce8d 665 if (!cb)
666 goto out;
667
a50299ae 668 refcount_set(&cb->pending_bios, 0);
c8b97818
CM
669 cb->errors = 0;
670 cb->inode = inode;
d20f7043 671 cb->mirror_num = mirror_num;
10fe6ca8 672 sums = cb->sums;
c8b97818 673
ff5b7ee3 674 cb->start = em->orig_start;
e04ca626
CM
675 em_len = em->len;
676 em_start = em->start;
d20f7043 677
c8b97818 678 free_extent_map(em);
e04ca626 679 em = NULL;
c8b97818 680
81381053 681 cb->len = bio->bi_iter.bi_size;
c8b97818 682 cb->compressed_len = compressed_len;
261507a0 683 cb->compress_type = extent_compress_type(bio_flags);
c8b97818
CM
684 cb->orig_bio = bio;
685
09cbfeaf 686 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
31e818fe 687 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
c8b97818 688 GFP_NOFS);
6b82ce8d 689 if (!cb->compressed_pages)
690 goto fail1;
691
306e16ce
DS
692 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
693 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
c8b97818 694 __GFP_HIGHMEM);
15e3004a
JB
695 if (!cb->compressed_pages[pg_index]) {
696 faili = pg_index - 1;
0e9350de 697 ret = BLK_STS_RESOURCE;
6b82ce8d 698 goto fail2;
15e3004a 699 }
c8b97818 700 }
15e3004a 701 faili = nr_pages - 1;
c8b97818
CM
702 cb->nr_pages = nr_pages;
703
7f042a83 704 add_ra_bio_pages(inode, em_start + em_len, cb);
771ed689 705
771ed689 706 /* include any pages we added in add_ra-bio_pages */
81381053 707 cb->len = bio->bi_iter.bi_size;
771ed689 708
e749af44 709 comp_bio = btrfs_bio_alloc(cur_disk_byte);
ebcc3263 710 comp_bio->bi_opf = REQ_OP_READ;
c8b97818
CM
711 comp_bio->bi_private = cb;
712 comp_bio->bi_end_io = end_compressed_bio_read;
a50299ae 713 refcount_set(&cb->pending_bios, 1);
c8b97818 714
306e16ce 715 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
be6a1361 716 u32 pg_len = PAGE_SIZE;
4e4cbee9
CH
717 int submit = 0;
718
be6a1361
QW
719 /*
720 * To handle subpage case, we need to make sure the bio only
721 * covers the range we need.
722 *
723 * If we're at the last page, truncate the length to only cover
724 * the remaining part.
725 */
726 if (pg_index == nr_pages - 1)
727 pg_len = min_t(u32, PAGE_SIZE,
728 compressed_len - pg_index * PAGE_SIZE);
729
306e16ce 730 page = cb->compressed_pages[pg_index];
c8b97818 731 page->mapping = inode->i_mapping;
09cbfeaf 732 page->index = em_start >> PAGE_SHIFT;
d20f7043 733
4f024f37 734 if (comp_bio->bi_iter.bi_size)
be6a1361 735 submit = btrfs_bio_fits_in_stripe(page, pg_len,
da12fe54 736 comp_bio, 0);
c8b97818 737
70b99e69 738 page->mapping = NULL;
be6a1361 739 if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) {
10fe6ca8
JT
740 unsigned int nr_sectors;
741
0b246afa
JM
742 ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
743 BTRFS_WQ_ENDIO_DATA);
79787eaa 744 BUG_ON(ret); /* -ENOMEM */
c8b97818 745
af09abfe
CM
746 /*
747 * inc the count before we submit the bio so
748 * we know the end IO handler won't happen before
749 * we inc the count. Otherwise, the cb might get
750 * freed before we're done setting it up
751 */
a50299ae 752 refcount_inc(&cb->pending_bios);
af09abfe 753
6275193e 754 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
334c16d8 755 BUG_ON(ret); /* -ENOMEM */
10fe6ca8
JT
756
757 nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
758 fs_info->sectorsize);
713cebfb 759 sums += fs_info->csum_size * nr_sectors;
d20f7043 760
08635bae 761 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
4246a0b6 762 if (ret) {
4e4cbee9 763 comp_bio->bi_status = ret;
4246a0b6
CH
764 bio_endio(comp_bio);
765 }
c8b97818 766
e749af44 767 comp_bio = btrfs_bio_alloc(cur_disk_byte);
ebcc3263 768 comp_bio->bi_opf = REQ_OP_READ;
771ed689
CM
769 comp_bio->bi_private = cb;
770 comp_bio->bi_end_io = end_compressed_bio_read;
771
be6a1361 772 bio_add_page(comp_bio, page, pg_len, 0);
c8b97818 773 }
be6a1361 774 cur_disk_byte += pg_len;
c8b97818 775 }
c8b97818 776
0b246afa 777 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
79787eaa 778 BUG_ON(ret); /* -ENOMEM */
c8b97818 779
6275193e 780 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
334c16d8 781 BUG_ON(ret); /* -ENOMEM */
d20f7043 782
08635bae 783 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
4246a0b6 784 if (ret) {
4e4cbee9 785 comp_bio->bi_status = ret;
4246a0b6
CH
786 bio_endio(comp_bio);
787 }
c8b97818 788
c8b97818 789 return 0;
6b82ce8d 790
791fail2:
15e3004a
JB
792 while (faili >= 0) {
793 __free_page(cb->compressed_pages[faili]);
794 faili--;
795 }
6b82ce8d 796
797 kfree(cb->compressed_pages);
798fail1:
799 kfree(cb);
800out:
801 free_extent_map(em);
802 return ret;
c8b97818 803}
261507a0 804
17b5a6c1
TT
805/*
806 * Heuristic uses systematic sampling to collect data from the input data
807 * range, the logic can be tuned by the following constants:
808 *
809 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
810 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
811 */
812#define SAMPLING_READ_SIZE (16)
813#define SAMPLING_INTERVAL (256)
814
815/*
816 * For statistical analysis of the input data we consider bytes that form a
817 * Galois Field of 256 objects. Each object has an attribute count, ie. how
818 * many times the object appeared in the sample.
819 */
820#define BUCKET_SIZE (256)
821
822/*
823 * The size of the sample is based on a statistical sampling rule of thumb.
824 * The common way is to perform sampling tests as long as the number of
825 * elements in each cell is at least 5.
826 *
827 * Instead of 5, we choose 32 to obtain more accurate results.
828 * If the data contain the maximum number of symbols, which is 256, we obtain a
829 * sample size bound by 8192.
830 *
831 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
832 * from up to 512 locations.
833 */
834#define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
835 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
836
837struct bucket_item {
838 u32 count;
839};
4e439a0b
TT
840
841struct heuristic_ws {
17b5a6c1
TT
842 /* Partial copy of input data */
843 u8 *sample;
a440d48c 844 u32 sample_size;
17b5a6c1
TT
845 /* Buckets store counters for each byte value */
846 struct bucket_item *bucket;
440c840c
TT
847 /* Sorting buffer */
848 struct bucket_item *bucket_b;
4e439a0b
TT
849 struct list_head list;
850};
851
92ee5530
DZ
852static struct workspace_manager heuristic_wsm;
853
4e439a0b
TT
854static void free_heuristic_ws(struct list_head *ws)
855{
856 struct heuristic_ws *workspace;
857
858 workspace = list_entry(ws, struct heuristic_ws, list);
859
17b5a6c1
TT
860 kvfree(workspace->sample);
861 kfree(workspace->bucket);
440c840c 862 kfree(workspace->bucket_b);
4e439a0b
TT
863 kfree(workspace);
864}
865
7bf49943 866static struct list_head *alloc_heuristic_ws(unsigned int level)
4e439a0b
TT
867{
868 struct heuristic_ws *ws;
869
870 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
871 if (!ws)
872 return ERR_PTR(-ENOMEM);
873
17b5a6c1
TT
874 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
875 if (!ws->sample)
876 goto fail;
877
878 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
879 if (!ws->bucket)
880 goto fail;
4e439a0b 881
440c840c
TT
882 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
883 if (!ws->bucket_b)
884 goto fail;
885
17b5a6c1 886 INIT_LIST_HEAD(&ws->list);
4e439a0b 887 return &ws->list;
17b5a6c1
TT
888fail:
889 free_heuristic_ws(&ws->list);
890 return ERR_PTR(-ENOMEM);
4e439a0b
TT
891}
892
ca4ac360 893const struct btrfs_compress_op btrfs_heuristic_compress = {
be951045 894 .workspace_manager = &heuristic_wsm,
ca4ac360
DZ
895};
896
e8c9f186 897static const struct btrfs_compress_op * const btrfs_compress_op[] = {
ca4ac360
DZ
898 /* The heuristic is represented as compression type 0 */
899 &btrfs_heuristic_compress,
261507a0 900 &btrfs_zlib_compress,
a6fa6fae 901 &btrfs_lzo_compress,
5c1aab1d 902 &btrfs_zstd_compress,
261507a0
LZ
903};
904
c778df14
DS
905static struct list_head *alloc_workspace(int type, unsigned int level)
906{
907 switch (type) {
908 case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
909 case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
910 case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level);
911 case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
912 default:
913 /*
914 * This can't happen, the type is validated several times
915 * before we get here.
916 */
917 BUG();
918 }
919}
920
1e002351
DS
921static void free_workspace(int type, struct list_head *ws)
922{
923 switch (type) {
924 case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
925 case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
926 case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
927 case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
928 default:
929 /*
930 * This can't happen, the type is validated several times
931 * before we get here.
932 */
933 BUG();
934 }
935}
936
d5517033 937static void btrfs_init_workspace_manager(int type)
261507a0 938{
0cf25213 939 struct workspace_manager *wsm;
4e439a0b 940 struct list_head *workspace;
261507a0 941
0cf25213 942 wsm = btrfs_compress_op[type]->workspace_manager;
92ee5530
DZ
943 INIT_LIST_HEAD(&wsm->idle_ws);
944 spin_lock_init(&wsm->ws_lock);
945 atomic_set(&wsm->total_ws, 0);
946 init_waitqueue_head(&wsm->ws_wait);
f77dd0d6 947
1666edab
DZ
948 /*
949 * Preallocate one workspace for each compression type so we can
950 * guarantee forward progress in the worst case
951 */
c778df14 952 workspace = alloc_workspace(type, 0);
1666edab
DZ
953 if (IS_ERR(workspace)) {
954 pr_warn(
955 "BTRFS: cannot preallocate compression workspace, will try later\n");
956 } else {
92ee5530
DZ
957 atomic_set(&wsm->total_ws, 1);
958 wsm->free_ws = 1;
959 list_add(workspace, &wsm->idle_ws);
1666edab
DZ
960 }
961}
962
2510307e 963static void btrfs_cleanup_workspace_manager(int type)
1666edab 964{
2dba7143 965 struct workspace_manager *wsman;
1666edab
DZ
966 struct list_head *ws;
967
2dba7143 968 wsman = btrfs_compress_op[type]->workspace_manager;
1666edab
DZ
969 while (!list_empty(&wsman->idle_ws)) {
970 ws = wsman->idle_ws.next;
971 list_del(ws);
1e002351 972 free_workspace(type, ws);
1666edab 973 atomic_dec(&wsman->total_ws);
261507a0 974 }
261507a0
LZ
975}
976
977/*
e721e49d
DS
978 * This finds an available workspace or allocates a new one.
979 * If it's not possible to allocate a new one, waits until there's one.
980 * Preallocation makes a forward progress guarantees and we do not return
981 * errors.
261507a0 982 */
5907a9bb 983struct list_head *btrfs_get_workspace(int type, unsigned int level)
261507a0 984{
5907a9bb 985 struct workspace_manager *wsm;
261507a0
LZ
986 struct list_head *workspace;
987 int cpus = num_online_cpus();
fe308533 988 unsigned nofs_flag;
4e439a0b
TT
989 struct list_head *idle_ws;
990 spinlock_t *ws_lock;
991 atomic_t *total_ws;
992 wait_queue_head_t *ws_wait;
993 int *free_ws;
994
5907a9bb 995 wsm = btrfs_compress_op[type]->workspace_manager;
92ee5530
DZ
996 idle_ws = &wsm->idle_ws;
997 ws_lock = &wsm->ws_lock;
998 total_ws = &wsm->total_ws;
999 ws_wait = &wsm->ws_wait;
1000 free_ws = &wsm->free_ws;
261507a0 1001
261507a0 1002again:
d9187649
BL
1003 spin_lock(ws_lock);
1004 if (!list_empty(idle_ws)) {
1005 workspace = idle_ws->next;
261507a0 1006 list_del(workspace);
6ac10a6a 1007 (*free_ws)--;
d9187649 1008 spin_unlock(ws_lock);
261507a0
LZ
1009 return workspace;
1010
1011 }
6ac10a6a 1012 if (atomic_read(total_ws) > cpus) {
261507a0
LZ
1013 DEFINE_WAIT(wait);
1014
d9187649
BL
1015 spin_unlock(ws_lock);
1016 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
6ac10a6a 1017 if (atomic_read(total_ws) > cpus && !*free_ws)
261507a0 1018 schedule();
d9187649 1019 finish_wait(ws_wait, &wait);
261507a0
LZ
1020 goto again;
1021 }
6ac10a6a 1022 atomic_inc(total_ws);
d9187649 1023 spin_unlock(ws_lock);
261507a0 1024
fe308533
DS
1025 /*
1026 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
1027 * to turn it off here because we might get called from the restricted
1028 * context of btrfs_compress_bio/btrfs_compress_pages
1029 */
1030 nofs_flag = memalloc_nofs_save();
c778df14 1031 workspace = alloc_workspace(type, level);
fe308533
DS
1032 memalloc_nofs_restore(nofs_flag);
1033
261507a0 1034 if (IS_ERR(workspace)) {
6ac10a6a 1035 atomic_dec(total_ws);
d9187649 1036 wake_up(ws_wait);
e721e49d
DS
1037
1038 /*
1039 * Do not return the error but go back to waiting. There's a
1040 * workspace preallocated for each type and the compression
1041 * time is bounded so we get to a workspace eventually. This
1042 * makes our caller's life easier.
52356716
DS
1043 *
1044 * To prevent silent and low-probability deadlocks (when the
1045 * initial preallocation fails), check if there are any
1046 * workspaces at all.
e721e49d 1047 */
52356716
DS
1048 if (atomic_read(total_ws) == 0) {
1049 static DEFINE_RATELIMIT_STATE(_rs,
1050 /* once per minute */ 60 * HZ,
1051 /* no burst */ 1);
1052
1053 if (__ratelimit(&_rs)) {
ab8d0fc4 1054 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
52356716
DS
1055 }
1056 }
e721e49d 1057 goto again;
261507a0
LZ
1058 }
1059 return workspace;
1060}
1061
7bf49943 1062static struct list_head *get_workspace(int type, int level)
929f4baf 1063{
6a0d1272 1064 switch (type) {
5907a9bb 1065 case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
6a0d1272 1066 case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
5907a9bb 1067 case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level);
6a0d1272
DS
1068 case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
1069 default:
1070 /*
1071 * This can't happen, the type is validated several times
1072 * before we get here.
1073 */
1074 BUG();
1075 }
929f4baf
DZ
1076}
1077
261507a0
LZ
1078/*
1079 * put a workspace struct back on the list or free it if we have enough
1080 * idle ones sitting around
1081 */
a3bbd2a9 1082void btrfs_put_workspace(int type, struct list_head *ws)
261507a0 1083{
a3bbd2a9 1084 struct workspace_manager *wsm;
4e439a0b
TT
1085 struct list_head *idle_ws;
1086 spinlock_t *ws_lock;
1087 atomic_t *total_ws;
1088 wait_queue_head_t *ws_wait;
1089 int *free_ws;
1090
a3bbd2a9 1091 wsm = btrfs_compress_op[type]->workspace_manager;
92ee5530
DZ
1092 idle_ws = &wsm->idle_ws;
1093 ws_lock = &wsm->ws_lock;
1094 total_ws = &wsm->total_ws;
1095 ws_wait = &wsm->ws_wait;
1096 free_ws = &wsm->free_ws;
d9187649
BL
1097
1098 spin_lock(ws_lock);
26b28dce 1099 if (*free_ws <= num_online_cpus()) {
929f4baf 1100 list_add(ws, idle_ws);
6ac10a6a 1101 (*free_ws)++;
d9187649 1102 spin_unlock(ws_lock);
261507a0
LZ
1103 goto wake;
1104 }
d9187649 1105 spin_unlock(ws_lock);
261507a0 1106
1e002351 1107 free_workspace(type, ws);
6ac10a6a 1108 atomic_dec(total_ws);
261507a0 1109wake:
093258e6 1110 cond_wake_up(ws_wait);
261507a0
LZ
1111}
1112
929f4baf
DZ
1113static void put_workspace(int type, struct list_head *ws)
1114{
bd3a5287 1115 switch (type) {
a3bbd2a9
DS
1116 case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1117 case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1118 case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws);
bd3a5287
DS
1119 case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1120 default:
1121 /*
1122 * This can't happen, the type is validated several times
1123 * before we get here.
1124 */
1125 BUG();
1126 }
929f4baf
DZ
1127}
1128
adbab642
AJ
1129/*
1130 * Adjust @level according to the limits of the compression algorithm or
1131 * fallback to default
1132 */
1133static unsigned int btrfs_compress_set_level(int type, unsigned level)
1134{
1135 const struct btrfs_compress_op *ops = btrfs_compress_op[type];
1136
1137 if (level == 0)
1138 level = ops->default_level;
1139 else
1140 level = min(level, ops->max_level);
1141
1142 return level;
1143}
1144
261507a0 1145/*
38c31464
DS
1146 * Given an address space and start and length, compress the bytes into @pages
1147 * that are allocated on demand.
261507a0 1148 *
f51d2b59
DS
1149 * @type_level is encoded algorithm and level, where level 0 means whatever
1150 * default the algorithm chooses and is opaque here;
1151 * - compression algo are 0-3
1152 * - the level are bits 4-7
1153 *
4d3a800e
DS
1154 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1155 * and returns number of actually allocated pages
261507a0 1156 *
38c31464
DS
1157 * @total_in is used to return the number of bytes actually read. It
1158 * may be smaller than the input length if we had to exit early because we
261507a0
LZ
1159 * ran out of room in the pages array or because we cross the
1160 * max_out threshold.
1161 *
38c31464
DS
1162 * @total_out is an in/out parameter, must be set to the input length and will
1163 * be also used to return the total number of compressed bytes
261507a0 1164 *
38c31464 1165 * @max_out tells us the max number of bytes that we're allowed to
261507a0
LZ
1166 * stuff into pages
1167 */
f51d2b59 1168int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
38c31464 1169 u64 start, struct page **pages,
261507a0
LZ
1170 unsigned long *out_pages,
1171 unsigned long *total_in,
e5d74902 1172 unsigned long *total_out)
261507a0 1173{
1972708a 1174 int type = btrfs_compress_type(type_level);
7bf49943 1175 int level = btrfs_compress_level(type_level);
261507a0
LZ
1176 struct list_head *workspace;
1177 int ret;
1178
b0c1fe1e 1179 level = btrfs_compress_set_level(type, level);
7bf49943 1180 workspace = get_workspace(type, level);
1e4eb746
DS
1181 ret = compression_compress_pages(type, workspace, mapping, start, pages,
1182 out_pages, total_in, total_out);
929f4baf 1183 put_workspace(type, workspace);
261507a0
LZ
1184 return ret;
1185}
1186
1187/*
1188 * pages_in is an array of pages with compressed data.
1189 *
1190 * disk_start is the starting logical offset of this array in the file
1191 *
974b1adc 1192 * orig_bio contains the pages from the file that we want to decompress into
261507a0
LZ
1193 *
1194 * srclen is the number of bytes in pages_in
1195 *
1196 * The basic idea is that we have a bio that was created by readpages.
1197 * The pages in the bio are for the uncompressed data, and they may not
1198 * be contiguous. They all correspond to the range of bytes covered by
1199 * the compressed extent.
1200 */
8140dc30 1201static int btrfs_decompress_bio(struct compressed_bio *cb)
261507a0
LZ
1202{
1203 struct list_head *workspace;
1204 int ret;
8140dc30 1205 int type = cb->compress_type;
261507a0 1206
7bf49943 1207 workspace = get_workspace(type, 0);
1e4eb746 1208 ret = compression_decompress_bio(type, workspace, cb);
929f4baf 1209 put_workspace(type, workspace);
e1ddce71 1210
261507a0
LZ
1211 return ret;
1212}
1213
1214/*
1215 * a less complex decompression routine. Our compressed data fits in a
1216 * single page, and we want to read a single page out of it.
1217 * start_byte tells us the offset into the compressed data we're interested in
1218 */
1219int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1220 unsigned long start_byte, size_t srclen, size_t destlen)
1221{
1222 struct list_head *workspace;
1223 int ret;
1224
7bf49943 1225 workspace = get_workspace(type, 0);
1e4eb746
DS
1226 ret = compression_decompress(type, workspace, data_in, dest_page,
1227 start_byte, srclen, destlen);
929f4baf 1228 put_workspace(type, workspace);
7bf49943 1229
261507a0
LZ
1230 return ret;
1231}
1232
1666edab
DZ
1233void __init btrfs_init_compress(void)
1234{
d5517033
DS
1235 btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
1236 btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
1237 btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
1238 zstd_init_workspace_manager();
1666edab
DZ
1239}
1240
e67c718b 1241void __cold btrfs_exit_compress(void)
261507a0 1242{
2510307e
DS
1243 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
1244 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
1245 btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
1246 zstd_cleanup_workspace_manager();
261507a0 1247}
3a39c18d
LZ
1248
1249/*
1250 * Copy uncompressed data from working buffer to pages.
1251 *
1252 * buf_start is the byte offset we're of the start of our workspace buffer.
1253 *
1254 * total_out is the last byte of the buffer
1255 */
14a3357b 1256int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
3a39c18d 1257 unsigned long total_out, u64 disk_start,
974b1adc 1258 struct bio *bio)
3a39c18d
LZ
1259{
1260 unsigned long buf_offset;
1261 unsigned long current_buf_start;
1262 unsigned long start_byte;
6e78b3f7 1263 unsigned long prev_start_byte;
3a39c18d
LZ
1264 unsigned long working_bytes = total_out - buf_start;
1265 unsigned long bytes;
974b1adc 1266 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
3a39c18d
LZ
1267
1268 /*
1269 * start byte is the first byte of the page we're currently
1270 * copying into relative to the start of the compressed data.
1271 */
974b1adc 1272 start_byte = page_offset(bvec.bv_page) - disk_start;
3a39c18d
LZ
1273
1274 /* we haven't yet hit data corresponding to this page */
1275 if (total_out <= start_byte)
1276 return 1;
1277
1278 /*
1279 * the start of the data we care about is offset into
1280 * the middle of our working buffer
1281 */
1282 if (total_out > start_byte && buf_start < start_byte) {
1283 buf_offset = start_byte - buf_start;
1284 working_bytes -= buf_offset;
1285 } else {
1286 buf_offset = 0;
1287 }
1288 current_buf_start = buf_start;
1289
1290 /* copy bytes from the working buffer into the pages */
1291 while (working_bytes > 0) {
974b1adc 1292 bytes = min_t(unsigned long, bvec.bv_len,
3fd396af 1293 PAGE_SIZE - (buf_offset % PAGE_SIZE));
3a39c18d 1294 bytes = min(bytes, working_bytes);
974b1adc 1295
3590ec58
IW
1296 memcpy_to_page(bvec.bv_page, bvec.bv_offset, buf + buf_offset,
1297 bytes);
974b1adc 1298 flush_dcache_page(bvec.bv_page);
3a39c18d 1299
3a39c18d
LZ
1300 buf_offset += bytes;
1301 working_bytes -= bytes;
1302 current_buf_start += bytes;
1303
1304 /* check if we need to pick another page */
974b1adc
CH
1305 bio_advance(bio, bytes);
1306 if (!bio->bi_iter.bi_size)
1307 return 0;
1308 bvec = bio_iter_iovec(bio, bio->bi_iter);
6e78b3f7 1309 prev_start_byte = start_byte;
974b1adc 1310 start_byte = page_offset(bvec.bv_page) - disk_start;
3a39c18d 1311
974b1adc 1312 /*
6e78b3f7
OS
1313 * We need to make sure we're only adjusting
1314 * our offset into compression working buffer when
1315 * we're switching pages. Otherwise we can incorrectly
1316 * keep copying when we were actually done.
974b1adc 1317 */
6e78b3f7
OS
1318 if (start_byte != prev_start_byte) {
1319 /*
1320 * make sure our new page is covered by this
1321 * working buffer
1322 */
1323 if (total_out <= start_byte)
1324 return 1;
3a39c18d 1325
6e78b3f7
OS
1326 /*
1327 * the next page in the biovec might not be adjacent
1328 * to the last page, but it might still be found
1329 * inside this working buffer. bump our offset pointer
1330 */
1331 if (total_out > start_byte &&
1332 current_buf_start < start_byte) {
1333 buf_offset = start_byte - buf_start;
1334 working_bytes = total_out - start_byte;
1335 current_buf_start = buf_start + buf_offset;
1336 }
3a39c18d
LZ
1337 }
1338 }
1339
1340 return 1;
1341}
c2fcdcdf 1342
19562430
TT
1343/*
1344 * Shannon Entropy calculation
1345 *
52042d8e 1346 * Pure byte distribution analysis fails to determine compressibility of data.
19562430
TT
1347 * Try calculating entropy to estimate the average minimum number of bits
1348 * needed to encode the sampled data.
1349 *
1350 * For convenience, return the percentage of needed bits, instead of amount of
1351 * bits directly.
1352 *
1353 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1354 * and can be compressible with high probability
1355 *
1356 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1357 *
1358 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1359 */
1360#define ENTROPY_LVL_ACEPTABLE (65)
1361#define ENTROPY_LVL_HIGH (80)
1362
1363/*
1364 * For increasead precision in shannon_entropy calculation,
1365 * let's do pow(n, M) to save more digits after comma:
1366 *
1367 * - maximum int bit length is 64
1368 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1369 * - 13 * 4 = 52 < 64 -> M = 4
1370 *
1371 * So use pow(n, 4).
1372 */
1373static inline u32 ilog2_w(u64 n)
1374{
1375 return ilog2(n * n * n * n);
1376}
1377
1378static u32 shannon_entropy(struct heuristic_ws *ws)
1379{
1380 const u32 entropy_max = 8 * ilog2_w(2);
1381 u32 entropy_sum = 0;
1382 u32 p, p_base, sz_base;
1383 u32 i;
1384
1385 sz_base = ilog2_w(ws->sample_size);
1386 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1387 p = ws->bucket[i].count;
1388 p_base = ilog2_w(p);
1389 entropy_sum += p * (sz_base - p_base);
1390 }
1391
1392 entropy_sum /= ws->sample_size;
1393 return entropy_sum * 100 / entropy_max;
1394}
1395
440c840c
TT
1396#define RADIX_BASE 4U
1397#define COUNTERS_SIZE (1U << RADIX_BASE)
1398
1399static u8 get4bits(u64 num, int shift) {
1400 u8 low4bits;
1401
1402 num >>= shift;
1403 /* Reverse order */
1404 low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1405 return low4bits;
1406}
1407
440c840c
TT
1408/*
1409 * Use 4 bits as radix base
52042d8e 1410 * Use 16 u32 counters for calculating new position in buf array
440c840c
TT
1411 *
1412 * @array - array that will be sorted
1413 * @array_buf - buffer array to store sorting results
1414 * must be equal in size to @array
1415 * @num - array size
440c840c 1416 */
23ae8c63 1417static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
36243c91 1418 int num)
858177d3 1419{
440c840c
TT
1420 u64 max_num;
1421 u64 buf_num;
1422 u32 counters[COUNTERS_SIZE];
1423 u32 new_addr;
1424 u32 addr;
1425 int bitlen;
1426 int shift;
1427 int i;
858177d3 1428
440c840c
TT
1429 /*
1430 * Try avoid useless loop iterations for small numbers stored in big
1431 * counters. Example: 48 33 4 ... in 64bit array
1432 */
23ae8c63 1433 max_num = array[0].count;
440c840c 1434 for (i = 1; i < num; i++) {
23ae8c63 1435 buf_num = array[i].count;
440c840c
TT
1436 if (buf_num > max_num)
1437 max_num = buf_num;
1438 }
1439
1440 buf_num = ilog2(max_num);
1441 bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1442
1443 shift = 0;
1444 while (shift < bitlen) {
1445 memset(counters, 0, sizeof(counters));
1446
1447 for (i = 0; i < num; i++) {
23ae8c63 1448 buf_num = array[i].count;
440c840c
TT
1449 addr = get4bits(buf_num, shift);
1450 counters[addr]++;
1451 }
1452
1453 for (i = 1; i < COUNTERS_SIZE; i++)
1454 counters[i] += counters[i - 1];
1455
1456 for (i = num - 1; i >= 0; i--) {
23ae8c63 1457 buf_num = array[i].count;
440c840c
TT
1458 addr = get4bits(buf_num, shift);
1459 counters[addr]--;
1460 new_addr = counters[addr];
7add17be 1461 array_buf[new_addr] = array[i];
440c840c
TT
1462 }
1463
1464 shift += RADIX_BASE;
1465
1466 /*
1467 * Normal radix expects to move data from a temporary array, to
1468 * the main one. But that requires some CPU time. Avoid that
1469 * by doing another sort iteration to original array instead of
1470 * memcpy()
1471 */
1472 memset(counters, 0, sizeof(counters));
1473
1474 for (i = 0; i < num; i ++) {
23ae8c63 1475 buf_num = array_buf[i].count;
440c840c
TT
1476 addr = get4bits(buf_num, shift);
1477 counters[addr]++;
1478 }
1479
1480 for (i = 1; i < COUNTERS_SIZE; i++)
1481 counters[i] += counters[i - 1];
1482
1483 for (i = num - 1; i >= 0; i--) {
23ae8c63 1484 buf_num = array_buf[i].count;
440c840c
TT
1485 addr = get4bits(buf_num, shift);
1486 counters[addr]--;
1487 new_addr = counters[addr];
7add17be 1488 array[new_addr] = array_buf[i];
440c840c
TT
1489 }
1490
1491 shift += RADIX_BASE;
1492 }
858177d3
TT
1493}
1494
1495/*
1496 * Size of the core byte set - how many bytes cover 90% of the sample
1497 *
1498 * There are several types of structured binary data that use nearly all byte
1499 * values. The distribution can be uniform and counts in all buckets will be
1500 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1501 *
1502 * Other possibility is normal (Gaussian) distribution, where the data could
1503 * be potentially compressible, but we have to take a few more steps to decide
1504 * how much.
1505 *
1506 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1507 * compression algo can easy fix that
1508 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1509 * probability is not compressible
1510 */
1511#define BYTE_CORE_SET_LOW (64)
1512#define BYTE_CORE_SET_HIGH (200)
1513
1514static int byte_core_set_size(struct heuristic_ws *ws)
1515{
1516 u32 i;
1517 u32 coreset_sum = 0;
1518 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1519 struct bucket_item *bucket = ws->bucket;
1520
1521 /* Sort in reverse order */
36243c91 1522 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
858177d3
TT
1523
1524 for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1525 coreset_sum += bucket[i].count;
1526
1527 if (coreset_sum > core_set_threshold)
1528 return i;
1529
1530 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1531 coreset_sum += bucket[i].count;
1532 if (coreset_sum > core_set_threshold)
1533 break;
1534 }
1535
1536 return i;
1537}
1538
a288e92c
TT
1539/*
1540 * Count byte values in buckets.
1541 * This heuristic can detect textual data (configs, xml, json, html, etc).
1542 * Because in most text-like data byte set is restricted to limited number of
1543 * possible characters, and that restriction in most cases makes data easy to
1544 * compress.
1545 *
1546 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1547 * less - compressible
1548 * more - need additional analysis
1549 */
1550#define BYTE_SET_THRESHOLD (64)
1551
1552static u32 byte_set_size(const struct heuristic_ws *ws)
1553{
1554 u32 i;
1555 u32 byte_set_size = 0;
1556
1557 for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1558 if (ws->bucket[i].count > 0)
1559 byte_set_size++;
1560 }
1561
1562 /*
1563 * Continue collecting count of byte values in buckets. If the byte
1564 * set size is bigger then the threshold, it's pointless to continue,
1565 * the detection technique would fail for this type of data.
1566 */
1567 for (; i < BUCKET_SIZE; i++) {
1568 if (ws->bucket[i].count > 0) {
1569 byte_set_size++;
1570 if (byte_set_size > BYTE_SET_THRESHOLD)
1571 return byte_set_size;
1572 }
1573 }
1574
1575 return byte_set_size;
1576}
1577
1fe4f6fa
TT
1578static bool sample_repeated_patterns(struct heuristic_ws *ws)
1579{
1580 const u32 half_of_sample = ws->sample_size / 2;
1581 const u8 *data = ws->sample;
1582
1583 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1584}
1585
a440d48c
TT
1586static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1587 struct heuristic_ws *ws)
1588{
1589 struct page *page;
1590 u64 index, index_end;
1591 u32 i, curr_sample_pos;
1592 u8 *in_data;
1593
1594 /*
1595 * Compression handles the input data by chunks of 128KiB
1596 * (defined by BTRFS_MAX_UNCOMPRESSED)
1597 *
1598 * We do the same for the heuristic and loop over the whole range.
1599 *
1600 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1601 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1602 */
1603 if (end - start > BTRFS_MAX_UNCOMPRESSED)
1604 end = start + BTRFS_MAX_UNCOMPRESSED;
1605
1606 index = start >> PAGE_SHIFT;
1607 index_end = end >> PAGE_SHIFT;
1608
1609 /* Don't miss unaligned end */
1610 if (!IS_ALIGNED(end, PAGE_SIZE))
1611 index_end++;
1612
1613 curr_sample_pos = 0;
1614 while (index < index_end) {
1615 page = find_get_page(inode->i_mapping, index);
58c1a35c 1616 in_data = kmap_local_page(page);
a440d48c
TT
1617 /* Handle case where the start is not aligned to PAGE_SIZE */
1618 i = start % PAGE_SIZE;
1619 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1620 /* Don't sample any garbage from the last page */
1621 if (start > end - SAMPLING_READ_SIZE)
1622 break;
1623 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1624 SAMPLING_READ_SIZE);
1625 i += SAMPLING_INTERVAL;
1626 start += SAMPLING_INTERVAL;
1627 curr_sample_pos += SAMPLING_READ_SIZE;
1628 }
58c1a35c 1629 kunmap_local(in_data);
a440d48c
TT
1630 put_page(page);
1631
1632 index++;
1633 }
1634
1635 ws->sample_size = curr_sample_pos;
1636}
1637
c2fcdcdf
TT
1638/*
1639 * Compression heuristic.
1640 *
1641 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1642 * quickly (compared to direct compression) detect data characteristics
1643 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1644 * data.
1645 *
1646 * The following types of analysis can be performed:
1647 * - detect mostly zero data
1648 * - detect data with low "byte set" size (text, etc)
1649 * - detect data with low/high "core byte" set
1650 *
1651 * Return non-zero if the compression should be done, 0 otherwise.
1652 */
1653int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1654{
7bf49943 1655 struct list_head *ws_list = get_workspace(0, 0);
4e439a0b 1656 struct heuristic_ws *ws;
a440d48c
TT
1657 u32 i;
1658 u8 byte;
19562430 1659 int ret = 0;
c2fcdcdf 1660
4e439a0b
TT
1661 ws = list_entry(ws_list, struct heuristic_ws, list);
1662
a440d48c
TT
1663 heuristic_collect_sample(inode, start, end, ws);
1664
1fe4f6fa
TT
1665 if (sample_repeated_patterns(ws)) {
1666 ret = 1;
1667 goto out;
1668 }
1669
a440d48c
TT
1670 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1671
1672 for (i = 0; i < ws->sample_size; i++) {
1673 byte = ws->sample[i];
1674 ws->bucket[byte].count++;
c2fcdcdf
TT
1675 }
1676
a288e92c
TT
1677 i = byte_set_size(ws);
1678 if (i < BYTE_SET_THRESHOLD) {
1679 ret = 2;
1680 goto out;
1681 }
1682
858177d3
TT
1683 i = byte_core_set_size(ws);
1684 if (i <= BYTE_CORE_SET_LOW) {
1685 ret = 3;
1686 goto out;
1687 }
1688
1689 if (i >= BYTE_CORE_SET_HIGH) {
1690 ret = 0;
1691 goto out;
1692 }
1693
19562430
TT
1694 i = shannon_entropy(ws);
1695 if (i <= ENTROPY_LVL_ACEPTABLE) {
1696 ret = 4;
1697 goto out;
1698 }
1699
1700 /*
1701 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1702 * needed to give green light to compression.
1703 *
1704 * For now just assume that compression at that level is not worth the
1705 * resources because:
1706 *
1707 * 1. it is possible to defrag the data later
1708 *
1709 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1710 * values, every bucket has counter at level ~54. The heuristic would
1711 * be confused. This can happen when data have some internal repeated
1712 * patterns like "abbacbbc...". This can be detected by analyzing
1713 * pairs of bytes, which is too costly.
1714 */
1715 if (i < ENTROPY_LVL_HIGH) {
1716 ret = 5;
1717 goto out;
1718 } else {
1719 ret = 0;
1720 goto out;
1721 }
1722
1fe4f6fa 1723out:
929f4baf 1724 put_workspace(0, ws_list);
c2fcdcdf
TT
1725 return ret;
1726}
f51d2b59 1727
d0ab62ce
DZ
1728/*
1729 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1730 * level, unrecognized string will set the default level
1731 */
1732unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
f51d2b59 1733{
d0ab62ce
DZ
1734 unsigned int level = 0;
1735 int ret;
1736
1737 if (!type)
f51d2b59
DS
1738 return 0;
1739
d0ab62ce
DZ
1740 if (str[0] == ':') {
1741 ret = kstrtouint(str + 1, 10, &level);
1742 if (ret)
1743 level = 0;
1744 }
1745
b0c1fe1e
DS
1746 level = btrfs_compress_set_level(type, level);
1747
1748 return level;
1749}