btrfs: replace GPL boilerplate by SPDX -- headers
[linux-2.6-block.git] / fs / btrfs / compression.c
CommitLineData
c8b97818
CM
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/kernel.h>
20#include <linux/bio.h>
21#include <linux/buffer_head.h>
22#include <linux/file.h>
23#include <linux/fs.h>
24#include <linux/pagemap.h>
25#include <linux/highmem.h>
26#include <linux/time.h>
27#include <linux/init.h>
28#include <linux/string.h>
c8b97818
CM
29#include <linux/backing-dev.h>
30#include <linux/mpage.h>
31#include <linux/swap.h>
32#include <linux/writeback.h>
33#include <linux/bit_spinlock.h>
5a0e3ad6 34#include <linux/slab.h>
fe308533 35#include <linux/sched/mm.h>
19562430 36#include <linux/log2.h>
c8b97818
CM
37#include "ctree.h"
38#include "disk-io.h"
39#include "transaction.h"
40#include "btrfs_inode.h"
41#include "volumes.h"
42#include "ordered-data.h"
c8b97818
CM
43#include "compression.h"
44#include "extent_io.h"
45#include "extent_map.h"
46
e128f9c3
DS
47static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
48
49const char* btrfs_compress_type2str(enum btrfs_compression_type type)
50{
51 switch (type) {
52 case BTRFS_COMPRESS_ZLIB:
53 case BTRFS_COMPRESS_LZO:
54 case BTRFS_COMPRESS_ZSTD:
55 case BTRFS_COMPRESS_NONE:
56 return btrfs_compress_types[type];
57 }
58
59 return NULL;
60}
61
8140dc30 62static int btrfs_decompress_bio(struct compressed_bio *cb);
48a3b636 63
2ff7e61e 64static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
d20f7043
CM
65 unsigned long disk_size)
66{
0b246afa 67 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
6c41761f 68
d20f7043 69 return sizeof(struct compressed_bio) +
0b246afa 70 (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
d20f7043
CM
71}
72
f898ac6a 73static int check_compressed_csum(struct btrfs_inode *inode,
d20f7043
CM
74 struct compressed_bio *cb,
75 u64 disk_start)
76{
77 int ret;
d20f7043
CM
78 struct page *page;
79 unsigned long i;
80 char *kaddr;
81 u32 csum;
82 u32 *cb_sum = &cb->sums;
83
f898ac6a 84 if (inode->flags & BTRFS_INODE_NODATASUM)
d20f7043
CM
85 return 0;
86
87 for (i = 0; i < cb->nr_pages; i++) {
88 page = cb->compressed_pages[i];
89 csum = ~(u32)0;
90
7ac687d9 91 kaddr = kmap_atomic(page);
09cbfeaf 92 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
0b5e3daf 93 btrfs_csum_final(csum, (u8 *)&csum);
7ac687d9 94 kunmap_atomic(kaddr);
d20f7043
CM
95
96 if (csum != *cb_sum) {
f898ac6a 97 btrfs_print_data_csum_error(inode, disk_start, csum,
0970a22e 98 *cb_sum, cb->mirror_num);
d20f7043
CM
99 ret = -EIO;
100 goto fail;
101 }
102 cb_sum++;
103
104 }
105 ret = 0;
106fail:
107 return ret;
108}
109
c8b97818
CM
110/* when we finish reading compressed pages from the disk, we
111 * decompress them and then run the bio end_io routines on the
112 * decompressed pages (in the inode address space).
113 *
114 * This allows the checksumming and other IO error handling routines
115 * to work normally
116 *
117 * The compressed pages are freed here, and it must be run
118 * in process context
119 */
4246a0b6 120static void end_compressed_bio_read(struct bio *bio)
c8b97818 121{
c8b97818
CM
122 struct compressed_bio *cb = bio->bi_private;
123 struct inode *inode;
124 struct page *page;
125 unsigned long index;
cf1167d5 126 unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
e6311f24 127 int ret = 0;
c8b97818 128
4e4cbee9 129 if (bio->bi_status)
c8b97818
CM
130 cb->errors = 1;
131
132 /* if there are more bios still pending for this compressed
133 * extent, just exit
134 */
a50299ae 135 if (!refcount_dec_and_test(&cb->pending_bios))
c8b97818
CM
136 goto out;
137
cf1167d5
LB
138 /*
139 * Record the correct mirror_num in cb->orig_bio so that
140 * read-repair can work properly.
141 */
142 ASSERT(btrfs_io_bio(cb->orig_bio));
143 btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
144 cb->mirror_num = mirror;
145
e6311f24
LB
146 /*
147 * Some IO in this cb have failed, just skip checksum as there
148 * is no way it could be correct.
149 */
150 if (cb->errors == 1)
151 goto csum_failed;
152
d20f7043 153 inode = cb->inode;
f898ac6a 154 ret = check_compressed_csum(BTRFS_I(inode), cb,
4f024f37 155 (u64)bio->bi_iter.bi_sector << 9);
d20f7043
CM
156 if (ret)
157 goto csum_failed;
158
c8b97818
CM
159 /* ok, we're the last bio for this extent, lets start
160 * the decompression.
161 */
8140dc30
AJ
162 ret = btrfs_decompress_bio(cb);
163
d20f7043 164csum_failed:
c8b97818
CM
165 if (ret)
166 cb->errors = 1;
167
168 /* release the compressed pages */
169 index = 0;
170 for (index = 0; index < cb->nr_pages; index++) {
171 page = cb->compressed_pages[index];
172 page->mapping = NULL;
09cbfeaf 173 put_page(page);
c8b97818
CM
174 }
175
176 /* do io completion on the original bio */
771ed689 177 if (cb->errors) {
c8b97818 178 bio_io_error(cb->orig_bio);
d20f7043 179 } else {
2c30c71b
KO
180 int i;
181 struct bio_vec *bvec;
d20f7043
CM
182
183 /*
184 * we have verified the checksum already, set page
185 * checked so the end_io handlers know about it
186 */
c09abff8 187 ASSERT(!bio_flagged(bio, BIO_CLONED));
2c30c71b 188 bio_for_each_segment_all(bvec, cb->orig_bio, i)
d20f7043 189 SetPageChecked(bvec->bv_page);
2c30c71b 190
4246a0b6 191 bio_endio(cb->orig_bio);
d20f7043 192 }
c8b97818
CM
193
194 /* finally free the cb struct */
195 kfree(cb->compressed_pages);
196 kfree(cb);
197out:
198 bio_put(bio);
199}
200
201/*
202 * Clear the writeback bits on all of the file
203 * pages for a compressed write
204 */
7bdcefc1
FM
205static noinline void end_compressed_writeback(struct inode *inode,
206 const struct compressed_bio *cb)
c8b97818 207{
09cbfeaf
KS
208 unsigned long index = cb->start >> PAGE_SHIFT;
209 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
c8b97818
CM
210 struct page *pages[16];
211 unsigned long nr_pages = end_index - index + 1;
212 int i;
213 int ret;
214
7bdcefc1
FM
215 if (cb->errors)
216 mapping_set_error(inode->i_mapping, -EIO);
217
d397712b 218 while (nr_pages > 0) {
c8b97818 219 ret = find_get_pages_contig(inode->i_mapping, index,
5b050f04
CM
220 min_t(unsigned long,
221 nr_pages, ARRAY_SIZE(pages)), pages);
c8b97818
CM
222 if (ret == 0) {
223 nr_pages -= 1;
224 index += 1;
225 continue;
226 }
227 for (i = 0; i < ret; i++) {
7bdcefc1
FM
228 if (cb->errors)
229 SetPageError(pages[i]);
c8b97818 230 end_page_writeback(pages[i]);
09cbfeaf 231 put_page(pages[i]);
c8b97818
CM
232 }
233 nr_pages -= ret;
234 index += ret;
235 }
236 /* the inode may be gone now */
c8b97818
CM
237}
238
239/*
240 * do the cleanup once all the compressed pages hit the disk.
241 * This will clear writeback on the file pages and free the compressed
242 * pages.
243 *
244 * This also calls the writeback end hooks for the file pages so that
245 * metadata and checksums can be updated in the file.
246 */
4246a0b6 247static void end_compressed_bio_write(struct bio *bio)
c8b97818
CM
248{
249 struct extent_io_tree *tree;
250 struct compressed_bio *cb = bio->bi_private;
251 struct inode *inode;
252 struct page *page;
253 unsigned long index;
254
4e4cbee9 255 if (bio->bi_status)
c8b97818
CM
256 cb->errors = 1;
257
258 /* if there are more bios still pending for this compressed
259 * extent, just exit
260 */
a50299ae 261 if (!refcount_dec_and_test(&cb->pending_bios))
c8b97818
CM
262 goto out;
263
264 /* ok, we're the last bio for this extent, step one is to
265 * call back into the FS and do all the end_io operations
266 */
267 inode = cb->inode;
268 tree = &BTRFS_I(inode)->io_tree;
70b99e69 269 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
c8b97818
CM
270 tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
271 cb->start,
272 cb->start + cb->len - 1,
7bdcefc1 273 NULL,
2dbe0c77
AJ
274 bio->bi_status ?
275 BLK_STS_OK : BLK_STS_NOTSUPP);
70b99e69 276 cb->compressed_pages[0]->mapping = NULL;
c8b97818 277
7bdcefc1 278 end_compressed_writeback(inode, cb);
c8b97818
CM
279 /* note, our inode could be gone now */
280
281 /*
282 * release the compressed pages, these came from alloc_page and
283 * are not attached to the inode at all
284 */
285 index = 0;
286 for (index = 0; index < cb->nr_pages; index++) {
287 page = cb->compressed_pages[index];
288 page->mapping = NULL;
09cbfeaf 289 put_page(page);
c8b97818
CM
290 }
291
292 /* finally free the cb struct */
293 kfree(cb->compressed_pages);
294 kfree(cb);
295out:
296 bio_put(bio);
297}
298
299/*
300 * worker function to build and submit bios for previously compressed pages.
301 * The corresponding pages in the inode should be marked for writeback
302 * and the compressed pages should have a reference on them for dropping
303 * when the IO is complete.
304 *
305 * This also checksums the file bytes and gets things ready for
306 * the end io hooks.
307 */
4e4cbee9 308blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
c8b97818
CM
309 unsigned long len, u64 disk_start,
310 unsigned long compressed_len,
311 struct page **compressed_pages,
f82b7359
LB
312 unsigned long nr_pages,
313 unsigned int write_flags)
c8b97818 314{
0b246afa 315 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
c8b97818 316 struct bio *bio = NULL;
c8b97818
CM
317 struct compressed_bio *cb;
318 unsigned long bytes_left;
319 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
306e16ce 320 int pg_index = 0;
c8b97818
CM
321 struct page *page;
322 u64 first_byte = disk_start;
323 struct block_device *bdev;
4e4cbee9 324 blk_status_t ret;
e55179b3 325 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
c8b97818 326
09cbfeaf 327 WARN_ON(start & ((u64)PAGE_SIZE - 1));
2ff7e61e 328 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
dac97e51 329 if (!cb)
4e4cbee9 330 return BLK_STS_RESOURCE;
a50299ae 331 refcount_set(&cb->pending_bios, 0);
c8b97818
CM
332 cb->errors = 0;
333 cb->inode = inode;
334 cb->start = start;
335 cb->len = len;
d20f7043 336 cb->mirror_num = 0;
c8b97818
CM
337 cb->compressed_pages = compressed_pages;
338 cb->compressed_len = compressed_len;
339 cb->orig_bio = NULL;
340 cb->nr_pages = nr_pages;
341
0b246afa 342 bdev = fs_info->fs_devices->latest_bdev;
c8b97818 343
c821e7f3 344 bio = btrfs_bio_alloc(bdev, first_byte);
f82b7359 345 bio->bi_opf = REQ_OP_WRITE | write_flags;
c8b97818
CM
346 bio->bi_private = cb;
347 bio->bi_end_io = end_compressed_bio_write;
a50299ae 348 refcount_set(&cb->pending_bios, 1);
c8b97818
CM
349
350 /* create and submit bios for the compressed pages */
351 bytes_left = compressed_len;
306e16ce 352 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
4e4cbee9
CH
353 int submit = 0;
354
306e16ce 355 page = compressed_pages[pg_index];
c8b97818 356 page->mapping = inode->i_mapping;
4f024f37 357 if (bio->bi_iter.bi_size)
4e4cbee9 358 submit = io_tree->ops->merge_bio_hook(page, 0,
09cbfeaf 359 PAGE_SIZE,
c8b97818 360 bio, 0);
c8b97818 361
70b99e69 362 page->mapping = NULL;
4e4cbee9 363 if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
09cbfeaf 364 PAGE_SIZE) {
af09abfe
CM
365 /*
366 * inc the count before we submit the bio so
367 * we know the end IO handler won't happen before
368 * we inc the count. Otherwise, the cb might get
369 * freed before we're done setting it up
370 */
a50299ae 371 refcount_inc(&cb->pending_bios);
0b246afa
JM
372 ret = btrfs_bio_wq_end_io(fs_info, bio,
373 BTRFS_WQ_ENDIO_DATA);
79787eaa 374 BUG_ON(ret); /* -ENOMEM */
c8b97818 375
e55179b3 376 if (!skip_sum) {
2ff7e61e 377 ret = btrfs_csum_one_bio(inode, bio, start, 1);
79787eaa 378 BUG_ON(ret); /* -ENOMEM */
e55179b3 379 }
d20f7043 380
2ff7e61e 381 ret = btrfs_map_bio(fs_info, bio, 0, 1);
f5daf2c7 382 if (ret) {
4e4cbee9 383 bio->bi_status = ret;
f5daf2c7
LB
384 bio_endio(bio);
385 }
c8b97818 386
c821e7f3 387 bio = btrfs_bio_alloc(bdev, first_byte);
f82b7359 388 bio->bi_opf = REQ_OP_WRITE | write_flags;
c8b97818
CM
389 bio->bi_private = cb;
390 bio->bi_end_io = end_compressed_bio_write;
09cbfeaf 391 bio_add_page(bio, page, PAGE_SIZE, 0);
c8b97818 392 }
09cbfeaf 393 if (bytes_left < PAGE_SIZE) {
0b246afa 394 btrfs_info(fs_info,
efe120a0 395 "bytes left %lu compress len %lu nr %lu",
cfbc246e
CM
396 bytes_left, cb->compressed_len, cb->nr_pages);
397 }
09cbfeaf
KS
398 bytes_left -= PAGE_SIZE;
399 first_byte += PAGE_SIZE;
771ed689 400 cond_resched();
c8b97818 401 }
c8b97818 402
0b246afa 403 ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
79787eaa 404 BUG_ON(ret); /* -ENOMEM */
c8b97818 405
e55179b3 406 if (!skip_sum) {
2ff7e61e 407 ret = btrfs_csum_one_bio(inode, bio, start, 1);
79787eaa 408 BUG_ON(ret); /* -ENOMEM */
e55179b3 409 }
d20f7043 410
2ff7e61e 411 ret = btrfs_map_bio(fs_info, bio, 0, 1);
f5daf2c7 412 if (ret) {
4e4cbee9 413 bio->bi_status = ret;
f5daf2c7
LB
414 bio_endio(bio);
415 }
c8b97818 416
c8b97818
CM
417 return 0;
418}
419
2a4d0c90
CH
420static u64 bio_end_offset(struct bio *bio)
421{
c45a8f2d 422 struct bio_vec *last = bio_last_bvec_all(bio);
2a4d0c90
CH
423
424 return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
425}
426
771ed689
CM
427static noinline int add_ra_bio_pages(struct inode *inode,
428 u64 compressed_end,
429 struct compressed_bio *cb)
430{
431 unsigned long end_index;
306e16ce 432 unsigned long pg_index;
771ed689
CM
433 u64 last_offset;
434 u64 isize = i_size_read(inode);
435 int ret;
436 struct page *page;
437 unsigned long nr_pages = 0;
438 struct extent_map *em;
439 struct address_space *mapping = inode->i_mapping;
771ed689
CM
440 struct extent_map_tree *em_tree;
441 struct extent_io_tree *tree;
442 u64 end;
443 int misses = 0;
444
2a4d0c90 445 last_offset = bio_end_offset(cb->orig_bio);
771ed689
CM
446 em_tree = &BTRFS_I(inode)->extent_tree;
447 tree = &BTRFS_I(inode)->io_tree;
448
449 if (isize == 0)
450 return 0;
451
09cbfeaf 452 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
771ed689 453
d397712b 454 while (last_offset < compressed_end) {
09cbfeaf 455 pg_index = last_offset >> PAGE_SHIFT;
771ed689 456
306e16ce 457 if (pg_index > end_index)
771ed689
CM
458 break;
459
460 rcu_read_lock();
306e16ce 461 page = radix_tree_lookup(&mapping->page_tree, pg_index);
771ed689 462 rcu_read_unlock();
0cd6144a 463 if (page && !radix_tree_exceptional_entry(page)) {
771ed689
CM
464 misses++;
465 if (misses > 4)
466 break;
467 goto next;
468 }
469
c62d2555
MH
470 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
471 ~__GFP_FS));
771ed689
CM
472 if (!page)
473 break;
474
c62d2555 475 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
09cbfeaf 476 put_page(page);
771ed689
CM
477 goto next;
478 }
479
09cbfeaf 480 end = last_offset + PAGE_SIZE - 1;
771ed689
CM
481 /*
482 * at this point, we have a locked page in the page cache
483 * for these bytes in the file. But, we have to make
484 * sure they map to this compressed extent on disk.
485 */
486 set_page_extent_mapped(page);
d0082371 487 lock_extent(tree, last_offset, end);
890871be 488 read_lock(&em_tree->lock);
771ed689 489 em = lookup_extent_mapping(em_tree, last_offset,
09cbfeaf 490 PAGE_SIZE);
890871be 491 read_unlock(&em_tree->lock);
771ed689
CM
492
493 if (!em || last_offset < em->start ||
09cbfeaf 494 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
4f024f37 495 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
771ed689 496 free_extent_map(em);
d0082371 497 unlock_extent(tree, last_offset, end);
771ed689 498 unlock_page(page);
09cbfeaf 499 put_page(page);
771ed689
CM
500 break;
501 }
502 free_extent_map(em);
503
504 if (page->index == end_index) {
505 char *userpage;
09cbfeaf 506 size_t zero_offset = isize & (PAGE_SIZE - 1);
771ed689
CM
507
508 if (zero_offset) {
509 int zeros;
09cbfeaf 510 zeros = PAGE_SIZE - zero_offset;
7ac687d9 511 userpage = kmap_atomic(page);
771ed689
CM
512 memset(userpage + zero_offset, 0, zeros);
513 flush_dcache_page(page);
7ac687d9 514 kunmap_atomic(userpage);
771ed689
CM
515 }
516 }
517
518 ret = bio_add_page(cb->orig_bio, page,
09cbfeaf 519 PAGE_SIZE, 0);
771ed689 520
09cbfeaf 521 if (ret == PAGE_SIZE) {
771ed689 522 nr_pages++;
09cbfeaf 523 put_page(page);
771ed689 524 } else {
d0082371 525 unlock_extent(tree, last_offset, end);
771ed689 526 unlock_page(page);
09cbfeaf 527 put_page(page);
771ed689
CM
528 break;
529 }
530next:
09cbfeaf 531 last_offset += PAGE_SIZE;
771ed689 532 }
771ed689
CM
533 return 0;
534}
535
c8b97818
CM
536/*
537 * for a compressed read, the bio we get passed has all the inode pages
538 * in it. We don't actually do IO on those pages but allocate new ones
539 * to hold the compressed pages on disk.
540 *
4f024f37 541 * bio->bi_iter.bi_sector points to the compressed extent on disk
c8b97818 542 * bio->bi_io_vec points to all of the inode pages
c8b97818
CM
543 *
544 * After the compressed pages are read, we copy the bytes into the
545 * bio we were passed and then call the bio end_io calls
546 */
4e4cbee9 547blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
c8b97818
CM
548 int mirror_num, unsigned long bio_flags)
549{
0b246afa 550 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
c8b97818
CM
551 struct extent_io_tree *tree;
552 struct extent_map_tree *em_tree;
553 struct compressed_bio *cb;
c8b97818
CM
554 unsigned long compressed_len;
555 unsigned long nr_pages;
306e16ce 556 unsigned long pg_index;
c8b97818
CM
557 struct page *page;
558 struct block_device *bdev;
559 struct bio *comp_bio;
4f024f37 560 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
e04ca626
CM
561 u64 em_len;
562 u64 em_start;
c8b97818 563 struct extent_map *em;
4e4cbee9 564 blk_status_t ret = BLK_STS_RESOURCE;
15e3004a 565 int faili = 0;
d20f7043 566 u32 *sums;
c8b97818
CM
567
568 tree = &BTRFS_I(inode)->io_tree;
569 em_tree = &BTRFS_I(inode)->extent_tree;
570
571 /* we need the actual starting offset of this extent in the file */
890871be 572 read_lock(&em_tree->lock);
c8b97818 573 em = lookup_extent_mapping(em_tree,
263663cd 574 page_offset(bio_first_page_all(bio)),
09cbfeaf 575 PAGE_SIZE);
890871be 576 read_unlock(&em_tree->lock);
285190d9 577 if (!em)
4e4cbee9 578 return BLK_STS_IOERR;
c8b97818 579
d20f7043 580 compressed_len = em->block_len;
2ff7e61e 581 cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
6b82ce8d 582 if (!cb)
583 goto out;
584
a50299ae 585 refcount_set(&cb->pending_bios, 0);
c8b97818
CM
586 cb->errors = 0;
587 cb->inode = inode;
d20f7043
CM
588 cb->mirror_num = mirror_num;
589 sums = &cb->sums;
c8b97818 590
ff5b7ee3 591 cb->start = em->orig_start;
e04ca626
CM
592 em_len = em->len;
593 em_start = em->start;
d20f7043 594
c8b97818 595 free_extent_map(em);
e04ca626 596 em = NULL;
c8b97818 597
81381053 598 cb->len = bio->bi_iter.bi_size;
c8b97818 599 cb->compressed_len = compressed_len;
261507a0 600 cb->compress_type = extent_compress_type(bio_flags);
c8b97818
CM
601 cb->orig_bio = bio;
602
09cbfeaf 603 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
31e818fe 604 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
c8b97818 605 GFP_NOFS);
6b82ce8d 606 if (!cb->compressed_pages)
607 goto fail1;
608
0b246afa 609 bdev = fs_info->fs_devices->latest_bdev;
c8b97818 610
306e16ce
DS
611 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
612 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
c8b97818 613 __GFP_HIGHMEM);
15e3004a
JB
614 if (!cb->compressed_pages[pg_index]) {
615 faili = pg_index - 1;
0e9350de 616 ret = BLK_STS_RESOURCE;
6b82ce8d 617 goto fail2;
15e3004a 618 }
c8b97818 619 }
15e3004a 620 faili = nr_pages - 1;
c8b97818
CM
621 cb->nr_pages = nr_pages;
622
7f042a83 623 add_ra_bio_pages(inode, em_start + em_len, cb);
771ed689 624
771ed689 625 /* include any pages we added in add_ra-bio_pages */
81381053 626 cb->len = bio->bi_iter.bi_size;
771ed689 627
c821e7f3 628 comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
37226b21 629 bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
c8b97818
CM
630 comp_bio->bi_private = cb;
631 comp_bio->bi_end_io = end_compressed_bio_read;
a50299ae 632 refcount_set(&cb->pending_bios, 1);
c8b97818 633
306e16ce 634 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
4e4cbee9
CH
635 int submit = 0;
636
306e16ce 637 page = cb->compressed_pages[pg_index];
c8b97818 638 page->mapping = inode->i_mapping;
09cbfeaf 639 page->index = em_start >> PAGE_SHIFT;
d20f7043 640
4f024f37 641 if (comp_bio->bi_iter.bi_size)
4e4cbee9 642 submit = tree->ops->merge_bio_hook(page, 0,
09cbfeaf 643 PAGE_SIZE,
c8b97818 644 comp_bio, 0);
c8b97818 645
70b99e69 646 page->mapping = NULL;
4e4cbee9 647 if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
09cbfeaf 648 PAGE_SIZE) {
0b246afa
JM
649 ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
650 BTRFS_WQ_ENDIO_DATA);
79787eaa 651 BUG_ON(ret); /* -ENOMEM */
c8b97818 652
af09abfe
CM
653 /*
654 * inc the count before we submit the bio so
655 * we know the end IO handler won't happen before
656 * we inc the count. Otherwise, the cb might get
657 * freed before we're done setting it up
658 */
a50299ae 659 refcount_inc(&cb->pending_bios);
af09abfe 660
6cbff00f 661 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
2ff7e61e
JM
662 ret = btrfs_lookup_bio_sums(inode, comp_bio,
663 sums);
79787eaa 664 BUG_ON(ret); /* -ENOMEM */
d20f7043 665 }
ed6078f7 666 sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
0b246afa 667 fs_info->sectorsize);
d20f7043 668
2ff7e61e 669 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
4246a0b6 670 if (ret) {
4e4cbee9 671 comp_bio->bi_status = ret;
4246a0b6
CH
672 bio_endio(comp_bio);
673 }
c8b97818 674
c821e7f3 675 comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
37226b21 676 bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
771ed689
CM
677 comp_bio->bi_private = cb;
678 comp_bio->bi_end_io = end_compressed_bio_read;
679
09cbfeaf 680 bio_add_page(comp_bio, page, PAGE_SIZE, 0);
c8b97818 681 }
09cbfeaf 682 cur_disk_byte += PAGE_SIZE;
c8b97818 683 }
c8b97818 684
0b246afa 685 ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
79787eaa 686 BUG_ON(ret); /* -ENOMEM */
c8b97818 687
c2db1073 688 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
2ff7e61e 689 ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
79787eaa 690 BUG_ON(ret); /* -ENOMEM */
c2db1073 691 }
d20f7043 692
2ff7e61e 693 ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
4246a0b6 694 if (ret) {
4e4cbee9 695 comp_bio->bi_status = ret;
4246a0b6
CH
696 bio_endio(comp_bio);
697 }
c8b97818 698
c8b97818 699 return 0;
6b82ce8d 700
701fail2:
15e3004a
JB
702 while (faili >= 0) {
703 __free_page(cb->compressed_pages[faili]);
704 faili--;
705 }
6b82ce8d 706
707 kfree(cb->compressed_pages);
708fail1:
709 kfree(cb);
710out:
711 free_extent_map(em);
712 return ret;
c8b97818 713}
261507a0 714
17b5a6c1
TT
715/*
716 * Heuristic uses systematic sampling to collect data from the input data
717 * range, the logic can be tuned by the following constants:
718 *
719 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
720 * @SAMPLING_INTERVAL - range from which the sampled data can be collected
721 */
722#define SAMPLING_READ_SIZE (16)
723#define SAMPLING_INTERVAL (256)
724
725/*
726 * For statistical analysis of the input data we consider bytes that form a
727 * Galois Field of 256 objects. Each object has an attribute count, ie. how
728 * many times the object appeared in the sample.
729 */
730#define BUCKET_SIZE (256)
731
732/*
733 * The size of the sample is based on a statistical sampling rule of thumb.
734 * The common way is to perform sampling tests as long as the number of
735 * elements in each cell is at least 5.
736 *
737 * Instead of 5, we choose 32 to obtain more accurate results.
738 * If the data contain the maximum number of symbols, which is 256, we obtain a
739 * sample size bound by 8192.
740 *
741 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
742 * from up to 512 locations.
743 */
744#define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \
745 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
746
747struct bucket_item {
748 u32 count;
749};
4e439a0b
TT
750
751struct heuristic_ws {
17b5a6c1
TT
752 /* Partial copy of input data */
753 u8 *sample;
a440d48c 754 u32 sample_size;
17b5a6c1
TT
755 /* Buckets store counters for each byte value */
756 struct bucket_item *bucket;
440c840c
TT
757 /* Sorting buffer */
758 struct bucket_item *bucket_b;
4e439a0b
TT
759 struct list_head list;
760};
761
762static void free_heuristic_ws(struct list_head *ws)
763{
764 struct heuristic_ws *workspace;
765
766 workspace = list_entry(ws, struct heuristic_ws, list);
767
17b5a6c1
TT
768 kvfree(workspace->sample);
769 kfree(workspace->bucket);
440c840c 770 kfree(workspace->bucket_b);
4e439a0b
TT
771 kfree(workspace);
772}
773
774static struct list_head *alloc_heuristic_ws(void)
775{
776 struct heuristic_ws *ws;
777
778 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
779 if (!ws)
780 return ERR_PTR(-ENOMEM);
781
17b5a6c1
TT
782 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
783 if (!ws->sample)
784 goto fail;
785
786 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
787 if (!ws->bucket)
788 goto fail;
4e439a0b 789
440c840c
TT
790 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
791 if (!ws->bucket_b)
792 goto fail;
793
17b5a6c1 794 INIT_LIST_HEAD(&ws->list);
4e439a0b 795 return &ws->list;
17b5a6c1
TT
796fail:
797 free_heuristic_ws(&ws->list);
798 return ERR_PTR(-ENOMEM);
4e439a0b
TT
799}
800
801struct workspaces_list {
d9187649
BL
802 struct list_head idle_ws;
803 spinlock_t ws_lock;
6ac10a6a
DS
804 /* Number of free workspaces */
805 int free_ws;
806 /* Total number of allocated workspaces */
807 atomic_t total_ws;
808 /* Waiters for a free workspace */
d9187649 809 wait_queue_head_t ws_wait;
4e439a0b
TT
810};
811
812static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
813
814static struct workspaces_list btrfs_heuristic_ws;
261507a0 815
e8c9f186 816static const struct btrfs_compress_op * const btrfs_compress_op[] = {
261507a0 817 &btrfs_zlib_compress,
a6fa6fae 818 &btrfs_lzo_compress,
5c1aab1d 819 &btrfs_zstd_compress,
261507a0
LZ
820};
821
143bede5 822void __init btrfs_init_compress(void)
261507a0 823{
4e439a0b 824 struct list_head *workspace;
261507a0
LZ
825 int i;
826
4e439a0b
TT
827 INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws);
828 spin_lock_init(&btrfs_heuristic_ws.ws_lock);
829 atomic_set(&btrfs_heuristic_ws.total_ws, 0);
830 init_waitqueue_head(&btrfs_heuristic_ws.ws_wait);
f77dd0d6 831
4e439a0b
TT
832 workspace = alloc_heuristic_ws();
833 if (IS_ERR(workspace)) {
834 pr_warn(
835 "BTRFS: cannot preallocate heuristic workspace, will try later\n");
836 } else {
837 atomic_set(&btrfs_heuristic_ws.total_ws, 1);
838 btrfs_heuristic_ws.free_ws = 1;
839 list_add(workspace, &btrfs_heuristic_ws.idle_ws);
840 }
841
842 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
d9187649
BL
843 INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
844 spin_lock_init(&btrfs_comp_ws[i].ws_lock);
6ac10a6a 845 atomic_set(&btrfs_comp_ws[i].total_ws, 0);
d9187649 846 init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
f77dd0d6
DS
847
848 /*
849 * Preallocate one workspace for each compression type so
850 * we can guarantee forward progress in the worst case
851 */
852 workspace = btrfs_compress_op[i]->alloc_workspace();
853 if (IS_ERR(workspace)) {
62e85577 854 pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
f77dd0d6
DS
855 } else {
856 atomic_set(&btrfs_comp_ws[i].total_ws, 1);
857 btrfs_comp_ws[i].free_ws = 1;
858 list_add(workspace, &btrfs_comp_ws[i].idle_ws);
859 }
261507a0 860 }
261507a0
LZ
861}
862
863/*
e721e49d
DS
864 * This finds an available workspace or allocates a new one.
865 * If it's not possible to allocate a new one, waits until there's one.
866 * Preallocation makes a forward progress guarantees and we do not return
867 * errors.
261507a0 868 */
4e439a0b 869static struct list_head *__find_workspace(int type, bool heuristic)
261507a0
LZ
870{
871 struct list_head *workspace;
872 int cpus = num_online_cpus();
873 int idx = type - 1;
fe308533 874 unsigned nofs_flag;
4e439a0b
TT
875 struct list_head *idle_ws;
876 spinlock_t *ws_lock;
877 atomic_t *total_ws;
878 wait_queue_head_t *ws_wait;
879 int *free_ws;
880
881 if (heuristic) {
882 idle_ws = &btrfs_heuristic_ws.idle_ws;
883 ws_lock = &btrfs_heuristic_ws.ws_lock;
884 total_ws = &btrfs_heuristic_ws.total_ws;
885 ws_wait = &btrfs_heuristic_ws.ws_wait;
886 free_ws = &btrfs_heuristic_ws.free_ws;
887 } else {
888 idle_ws = &btrfs_comp_ws[idx].idle_ws;
889 ws_lock = &btrfs_comp_ws[idx].ws_lock;
890 total_ws = &btrfs_comp_ws[idx].total_ws;
891 ws_wait = &btrfs_comp_ws[idx].ws_wait;
892 free_ws = &btrfs_comp_ws[idx].free_ws;
893 }
261507a0 894
261507a0 895again:
d9187649
BL
896 spin_lock(ws_lock);
897 if (!list_empty(idle_ws)) {
898 workspace = idle_ws->next;
261507a0 899 list_del(workspace);
6ac10a6a 900 (*free_ws)--;
d9187649 901 spin_unlock(ws_lock);
261507a0
LZ
902 return workspace;
903
904 }
6ac10a6a 905 if (atomic_read(total_ws) > cpus) {
261507a0
LZ
906 DEFINE_WAIT(wait);
907
d9187649
BL
908 spin_unlock(ws_lock);
909 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
6ac10a6a 910 if (atomic_read(total_ws) > cpus && !*free_ws)
261507a0 911 schedule();
d9187649 912 finish_wait(ws_wait, &wait);
261507a0
LZ
913 goto again;
914 }
6ac10a6a 915 atomic_inc(total_ws);
d9187649 916 spin_unlock(ws_lock);
261507a0 917
fe308533
DS
918 /*
919 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
920 * to turn it off here because we might get called from the restricted
921 * context of btrfs_compress_bio/btrfs_compress_pages
922 */
923 nofs_flag = memalloc_nofs_save();
4e439a0b
TT
924 if (heuristic)
925 workspace = alloc_heuristic_ws();
926 else
927 workspace = btrfs_compress_op[idx]->alloc_workspace();
fe308533
DS
928 memalloc_nofs_restore(nofs_flag);
929
261507a0 930 if (IS_ERR(workspace)) {
6ac10a6a 931 atomic_dec(total_ws);
d9187649 932 wake_up(ws_wait);
e721e49d
DS
933
934 /*
935 * Do not return the error but go back to waiting. There's a
936 * workspace preallocated for each type and the compression
937 * time is bounded so we get to a workspace eventually. This
938 * makes our caller's life easier.
52356716
DS
939 *
940 * To prevent silent and low-probability deadlocks (when the
941 * initial preallocation fails), check if there are any
942 * workspaces at all.
e721e49d 943 */
52356716
DS
944 if (atomic_read(total_ws) == 0) {
945 static DEFINE_RATELIMIT_STATE(_rs,
946 /* once per minute */ 60 * HZ,
947 /* no burst */ 1);
948
949 if (__ratelimit(&_rs)) {
ab8d0fc4 950 pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
52356716
DS
951 }
952 }
e721e49d 953 goto again;
261507a0
LZ
954 }
955 return workspace;
956}
957
4e439a0b
TT
958static struct list_head *find_workspace(int type)
959{
960 return __find_workspace(type, false);
961}
962
261507a0
LZ
963/*
964 * put a workspace struct back on the list or free it if we have enough
965 * idle ones sitting around
966 */
4e439a0b
TT
967static void __free_workspace(int type, struct list_head *workspace,
968 bool heuristic)
261507a0
LZ
969{
970 int idx = type - 1;
4e439a0b
TT
971 struct list_head *idle_ws;
972 spinlock_t *ws_lock;
973 atomic_t *total_ws;
974 wait_queue_head_t *ws_wait;
975 int *free_ws;
976
977 if (heuristic) {
978 idle_ws = &btrfs_heuristic_ws.idle_ws;
979 ws_lock = &btrfs_heuristic_ws.ws_lock;
980 total_ws = &btrfs_heuristic_ws.total_ws;
981 ws_wait = &btrfs_heuristic_ws.ws_wait;
982 free_ws = &btrfs_heuristic_ws.free_ws;
983 } else {
984 idle_ws = &btrfs_comp_ws[idx].idle_ws;
985 ws_lock = &btrfs_comp_ws[idx].ws_lock;
986 total_ws = &btrfs_comp_ws[idx].total_ws;
987 ws_wait = &btrfs_comp_ws[idx].ws_wait;
988 free_ws = &btrfs_comp_ws[idx].free_ws;
989 }
d9187649
BL
990
991 spin_lock(ws_lock);
26b28dce 992 if (*free_ws <= num_online_cpus()) {
d9187649 993 list_add(workspace, idle_ws);
6ac10a6a 994 (*free_ws)++;
d9187649 995 spin_unlock(ws_lock);
261507a0
LZ
996 goto wake;
997 }
d9187649 998 spin_unlock(ws_lock);
261507a0 999
4e439a0b
TT
1000 if (heuristic)
1001 free_heuristic_ws(workspace);
1002 else
1003 btrfs_compress_op[idx]->free_workspace(workspace);
6ac10a6a 1004 atomic_dec(total_ws);
261507a0 1005wake:
a83342aa
DS
1006 /*
1007 * Make sure counter is updated before we wake up waiters.
1008 */
66657b31 1009 smp_mb();
d9187649
BL
1010 if (waitqueue_active(ws_wait))
1011 wake_up(ws_wait);
261507a0
LZ
1012}
1013
4e439a0b
TT
1014static void free_workspace(int type, struct list_head *ws)
1015{
1016 return __free_workspace(type, ws, false);
1017}
1018
261507a0
LZ
1019/*
1020 * cleanup function for module exit
1021 */
1022static void free_workspaces(void)
1023{
1024 struct list_head *workspace;
1025 int i;
1026
4e439a0b
TT
1027 while (!list_empty(&btrfs_heuristic_ws.idle_ws)) {
1028 workspace = btrfs_heuristic_ws.idle_ws.next;
1029 list_del(workspace);
1030 free_heuristic_ws(workspace);
1031 atomic_dec(&btrfs_heuristic_ws.total_ws);
1032 }
1033
261507a0 1034 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
d9187649
BL
1035 while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
1036 workspace = btrfs_comp_ws[i].idle_ws.next;
261507a0
LZ
1037 list_del(workspace);
1038 btrfs_compress_op[i]->free_workspace(workspace);
6ac10a6a 1039 atomic_dec(&btrfs_comp_ws[i].total_ws);
261507a0
LZ
1040 }
1041 }
1042}
1043
1044/*
38c31464
DS
1045 * Given an address space and start and length, compress the bytes into @pages
1046 * that are allocated on demand.
261507a0 1047 *
f51d2b59
DS
1048 * @type_level is encoded algorithm and level, where level 0 means whatever
1049 * default the algorithm chooses and is opaque here;
1050 * - compression algo are 0-3
1051 * - the level are bits 4-7
1052 *
4d3a800e
DS
1053 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
1054 * and returns number of actually allocated pages
261507a0 1055 *
38c31464
DS
1056 * @total_in is used to return the number of bytes actually read. It
1057 * may be smaller than the input length if we had to exit early because we
261507a0
LZ
1058 * ran out of room in the pages array or because we cross the
1059 * max_out threshold.
1060 *
38c31464
DS
1061 * @total_out is an in/out parameter, must be set to the input length and will
1062 * be also used to return the total number of compressed bytes
261507a0 1063 *
38c31464 1064 * @max_out tells us the max number of bytes that we're allowed to
261507a0
LZ
1065 * stuff into pages
1066 */
f51d2b59 1067int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
38c31464 1068 u64 start, struct page **pages,
261507a0
LZ
1069 unsigned long *out_pages,
1070 unsigned long *total_in,
e5d74902 1071 unsigned long *total_out)
261507a0
LZ
1072{
1073 struct list_head *workspace;
1074 int ret;
f51d2b59 1075 int type = type_level & 0xF;
261507a0
LZ
1076
1077 workspace = find_workspace(type);
261507a0 1078
f51d2b59 1079 btrfs_compress_op[type - 1]->set_level(workspace, type_level);
261507a0 1080 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
38c31464 1081 start, pages,
4d3a800e 1082 out_pages,
e5d74902 1083 total_in, total_out);
261507a0
LZ
1084 free_workspace(type, workspace);
1085 return ret;
1086}
1087
1088/*
1089 * pages_in is an array of pages with compressed data.
1090 *
1091 * disk_start is the starting logical offset of this array in the file
1092 *
974b1adc 1093 * orig_bio contains the pages from the file that we want to decompress into
261507a0
LZ
1094 *
1095 * srclen is the number of bytes in pages_in
1096 *
1097 * The basic idea is that we have a bio that was created by readpages.
1098 * The pages in the bio are for the uncompressed data, and they may not
1099 * be contiguous. They all correspond to the range of bytes covered by
1100 * the compressed extent.
1101 */
8140dc30 1102static int btrfs_decompress_bio(struct compressed_bio *cb)
261507a0
LZ
1103{
1104 struct list_head *workspace;
1105 int ret;
8140dc30 1106 int type = cb->compress_type;
261507a0
LZ
1107
1108 workspace = find_workspace(type);
e1ddce71 1109 ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
261507a0 1110 free_workspace(type, workspace);
e1ddce71 1111
261507a0
LZ
1112 return ret;
1113}
1114
1115/*
1116 * a less complex decompression routine. Our compressed data fits in a
1117 * single page, and we want to read a single page out of it.
1118 * start_byte tells us the offset into the compressed data we're interested in
1119 */
1120int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
1121 unsigned long start_byte, size_t srclen, size_t destlen)
1122{
1123 struct list_head *workspace;
1124 int ret;
1125
1126 workspace = find_workspace(type);
261507a0
LZ
1127
1128 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
1129 dest_page, start_byte,
1130 srclen, destlen);
1131
1132 free_workspace(type, workspace);
1133 return ret;
1134}
1135
e67c718b 1136void __cold btrfs_exit_compress(void)
261507a0
LZ
1137{
1138 free_workspaces();
1139}
3a39c18d
LZ
1140
1141/*
1142 * Copy uncompressed data from working buffer to pages.
1143 *
1144 * buf_start is the byte offset we're of the start of our workspace buffer.
1145 *
1146 * total_out is the last byte of the buffer
1147 */
14a3357b 1148int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
3a39c18d 1149 unsigned long total_out, u64 disk_start,
974b1adc 1150 struct bio *bio)
3a39c18d
LZ
1151{
1152 unsigned long buf_offset;
1153 unsigned long current_buf_start;
1154 unsigned long start_byte;
6e78b3f7 1155 unsigned long prev_start_byte;
3a39c18d
LZ
1156 unsigned long working_bytes = total_out - buf_start;
1157 unsigned long bytes;
1158 char *kaddr;
974b1adc 1159 struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
3a39c18d
LZ
1160
1161 /*
1162 * start byte is the first byte of the page we're currently
1163 * copying into relative to the start of the compressed data.
1164 */
974b1adc 1165 start_byte = page_offset(bvec.bv_page) - disk_start;
3a39c18d
LZ
1166
1167 /* we haven't yet hit data corresponding to this page */
1168 if (total_out <= start_byte)
1169 return 1;
1170
1171 /*
1172 * the start of the data we care about is offset into
1173 * the middle of our working buffer
1174 */
1175 if (total_out > start_byte && buf_start < start_byte) {
1176 buf_offset = start_byte - buf_start;
1177 working_bytes -= buf_offset;
1178 } else {
1179 buf_offset = 0;
1180 }
1181 current_buf_start = buf_start;
1182
1183 /* copy bytes from the working buffer into the pages */
1184 while (working_bytes > 0) {
974b1adc
CH
1185 bytes = min_t(unsigned long, bvec.bv_len,
1186 PAGE_SIZE - buf_offset);
3a39c18d 1187 bytes = min(bytes, working_bytes);
974b1adc
CH
1188
1189 kaddr = kmap_atomic(bvec.bv_page);
1190 memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
7ac687d9 1191 kunmap_atomic(kaddr);
974b1adc 1192 flush_dcache_page(bvec.bv_page);
3a39c18d 1193
3a39c18d
LZ
1194 buf_offset += bytes;
1195 working_bytes -= bytes;
1196 current_buf_start += bytes;
1197
1198 /* check if we need to pick another page */
974b1adc
CH
1199 bio_advance(bio, bytes);
1200 if (!bio->bi_iter.bi_size)
1201 return 0;
1202 bvec = bio_iter_iovec(bio, bio->bi_iter);
6e78b3f7 1203 prev_start_byte = start_byte;
974b1adc 1204 start_byte = page_offset(bvec.bv_page) - disk_start;
3a39c18d 1205
974b1adc 1206 /*
6e78b3f7
OS
1207 * We need to make sure we're only adjusting
1208 * our offset into compression working buffer when
1209 * we're switching pages. Otherwise we can incorrectly
1210 * keep copying when we were actually done.
974b1adc 1211 */
6e78b3f7
OS
1212 if (start_byte != prev_start_byte) {
1213 /*
1214 * make sure our new page is covered by this
1215 * working buffer
1216 */
1217 if (total_out <= start_byte)
1218 return 1;
3a39c18d 1219
6e78b3f7
OS
1220 /*
1221 * the next page in the biovec might not be adjacent
1222 * to the last page, but it might still be found
1223 * inside this working buffer. bump our offset pointer
1224 */
1225 if (total_out > start_byte &&
1226 current_buf_start < start_byte) {
1227 buf_offset = start_byte - buf_start;
1228 working_bytes = total_out - start_byte;
1229 current_buf_start = buf_start + buf_offset;
1230 }
3a39c18d
LZ
1231 }
1232 }
1233
1234 return 1;
1235}
c2fcdcdf 1236
19562430
TT
1237/*
1238 * Shannon Entropy calculation
1239 *
1240 * Pure byte distribution analysis fails to determine compressiability of data.
1241 * Try calculating entropy to estimate the average minimum number of bits
1242 * needed to encode the sampled data.
1243 *
1244 * For convenience, return the percentage of needed bits, instead of amount of
1245 * bits directly.
1246 *
1247 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1248 * and can be compressible with high probability
1249 *
1250 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1251 *
1252 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1253 */
1254#define ENTROPY_LVL_ACEPTABLE (65)
1255#define ENTROPY_LVL_HIGH (80)
1256
1257/*
1258 * For increasead precision in shannon_entropy calculation,
1259 * let's do pow(n, M) to save more digits after comma:
1260 *
1261 * - maximum int bit length is 64
1262 * - ilog2(MAX_SAMPLE_SIZE) -> 13
1263 * - 13 * 4 = 52 < 64 -> M = 4
1264 *
1265 * So use pow(n, 4).
1266 */
1267static inline u32 ilog2_w(u64 n)
1268{
1269 return ilog2(n * n * n * n);
1270}
1271
1272static u32 shannon_entropy(struct heuristic_ws *ws)
1273{
1274 const u32 entropy_max = 8 * ilog2_w(2);
1275 u32 entropy_sum = 0;
1276 u32 p, p_base, sz_base;
1277 u32 i;
1278
1279 sz_base = ilog2_w(ws->sample_size);
1280 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1281 p = ws->bucket[i].count;
1282 p_base = ilog2_w(p);
1283 entropy_sum += p * (sz_base - p_base);
1284 }
1285
1286 entropy_sum /= ws->sample_size;
1287 return entropy_sum * 100 / entropy_max;
1288}
1289
440c840c
TT
1290#define RADIX_BASE 4U
1291#define COUNTERS_SIZE (1U << RADIX_BASE)
1292
1293static u8 get4bits(u64 num, int shift) {
1294 u8 low4bits;
1295
1296 num >>= shift;
1297 /* Reverse order */
1298 low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1299 return low4bits;
1300}
1301
440c840c
TT
1302/*
1303 * Use 4 bits as radix base
1304 * Use 16 u32 counters for calculating new possition in buf array
1305 *
1306 * @array - array that will be sorted
1307 * @array_buf - buffer array to store sorting results
1308 * must be equal in size to @array
1309 * @num - array size
440c840c 1310 */
23ae8c63 1311static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
36243c91 1312 int num)
858177d3 1313{
440c840c
TT
1314 u64 max_num;
1315 u64 buf_num;
1316 u32 counters[COUNTERS_SIZE];
1317 u32 new_addr;
1318 u32 addr;
1319 int bitlen;
1320 int shift;
1321 int i;
858177d3 1322
440c840c
TT
1323 /*
1324 * Try avoid useless loop iterations for small numbers stored in big
1325 * counters. Example: 48 33 4 ... in 64bit array
1326 */
23ae8c63 1327 max_num = array[0].count;
440c840c 1328 for (i = 1; i < num; i++) {
23ae8c63 1329 buf_num = array[i].count;
440c840c
TT
1330 if (buf_num > max_num)
1331 max_num = buf_num;
1332 }
1333
1334 buf_num = ilog2(max_num);
1335 bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1336
1337 shift = 0;
1338 while (shift < bitlen) {
1339 memset(counters, 0, sizeof(counters));
1340
1341 for (i = 0; i < num; i++) {
23ae8c63 1342 buf_num = array[i].count;
440c840c
TT
1343 addr = get4bits(buf_num, shift);
1344 counters[addr]++;
1345 }
1346
1347 for (i = 1; i < COUNTERS_SIZE; i++)
1348 counters[i] += counters[i - 1];
1349
1350 for (i = num - 1; i >= 0; i--) {
23ae8c63 1351 buf_num = array[i].count;
440c840c
TT
1352 addr = get4bits(buf_num, shift);
1353 counters[addr]--;
1354 new_addr = counters[addr];
7add17be 1355 array_buf[new_addr] = array[i];
440c840c
TT
1356 }
1357
1358 shift += RADIX_BASE;
1359
1360 /*
1361 * Normal radix expects to move data from a temporary array, to
1362 * the main one. But that requires some CPU time. Avoid that
1363 * by doing another sort iteration to original array instead of
1364 * memcpy()
1365 */
1366 memset(counters, 0, sizeof(counters));
1367
1368 for (i = 0; i < num; i ++) {
23ae8c63 1369 buf_num = array_buf[i].count;
440c840c
TT
1370 addr = get4bits(buf_num, shift);
1371 counters[addr]++;
1372 }
1373
1374 for (i = 1; i < COUNTERS_SIZE; i++)
1375 counters[i] += counters[i - 1];
1376
1377 for (i = num - 1; i >= 0; i--) {
23ae8c63 1378 buf_num = array_buf[i].count;
440c840c
TT
1379 addr = get4bits(buf_num, shift);
1380 counters[addr]--;
1381 new_addr = counters[addr];
7add17be 1382 array[new_addr] = array_buf[i];
440c840c
TT
1383 }
1384
1385 shift += RADIX_BASE;
1386 }
858177d3
TT
1387}
1388
1389/*
1390 * Size of the core byte set - how many bytes cover 90% of the sample
1391 *
1392 * There are several types of structured binary data that use nearly all byte
1393 * values. The distribution can be uniform and counts in all buckets will be
1394 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1395 *
1396 * Other possibility is normal (Gaussian) distribution, where the data could
1397 * be potentially compressible, but we have to take a few more steps to decide
1398 * how much.
1399 *
1400 * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently,
1401 * compression algo can easy fix that
1402 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1403 * probability is not compressible
1404 */
1405#define BYTE_CORE_SET_LOW (64)
1406#define BYTE_CORE_SET_HIGH (200)
1407
1408static int byte_core_set_size(struct heuristic_ws *ws)
1409{
1410 u32 i;
1411 u32 coreset_sum = 0;
1412 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1413 struct bucket_item *bucket = ws->bucket;
1414
1415 /* Sort in reverse order */
36243c91 1416 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
858177d3
TT
1417
1418 for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1419 coreset_sum += bucket[i].count;
1420
1421 if (coreset_sum > core_set_threshold)
1422 return i;
1423
1424 for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1425 coreset_sum += bucket[i].count;
1426 if (coreset_sum > core_set_threshold)
1427 break;
1428 }
1429
1430 return i;
1431}
1432
a288e92c
TT
1433/*
1434 * Count byte values in buckets.
1435 * This heuristic can detect textual data (configs, xml, json, html, etc).
1436 * Because in most text-like data byte set is restricted to limited number of
1437 * possible characters, and that restriction in most cases makes data easy to
1438 * compress.
1439 *
1440 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1441 * less - compressible
1442 * more - need additional analysis
1443 */
1444#define BYTE_SET_THRESHOLD (64)
1445
1446static u32 byte_set_size(const struct heuristic_ws *ws)
1447{
1448 u32 i;
1449 u32 byte_set_size = 0;
1450
1451 for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1452 if (ws->bucket[i].count > 0)
1453 byte_set_size++;
1454 }
1455
1456 /*
1457 * Continue collecting count of byte values in buckets. If the byte
1458 * set size is bigger then the threshold, it's pointless to continue,
1459 * the detection technique would fail for this type of data.
1460 */
1461 for (; i < BUCKET_SIZE; i++) {
1462 if (ws->bucket[i].count > 0) {
1463 byte_set_size++;
1464 if (byte_set_size > BYTE_SET_THRESHOLD)
1465 return byte_set_size;
1466 }
1467 }
1468
1469 return byte_set_size;
1470}
1471
1fe4f6fa
TT
1472static bool sample_repeated_patterns(struct heuristic_ws *ws)
1473{
1474 const u32 half_of_sample = ws->sample_size / 2;
1475 const u8 *data = ws->sample;
1476
1477 return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1478}
1479
a440d48c
TT
1480static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1481 struct heuristic_ws *ws)
1482{
1483 struct page *page;
1484 u64 index, index_end;
1485 u32 i, curr_sample_pos;
1486 u8 *in_data;
1487
1488 /*
1489 * Compression handles the input data by chunks of 128KiB
1490 * (defined by BTRFS_MAX_UNCOMPRESSED)
1491 *
1492 * We do the same for the heuristic and loop over the whole range.
1493 *
1494 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1495 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1496 */
1497 if (end - start > BTRFS_MAX_UNCOMPRESSED)
1498 end = start + BTRFS_MAX_UNCOMPRESSED;
1499
1500 index = start >> PAGE_SHIFT;
1501 index_end = end >> PAGE_SHIFT;
1502
1503 /* Don't miss unaligned end */
1504 if (!IS_ALIGNED(end, PAGE_SIZE))
1505 index_end++;
1506
1507 curr_sample_pos = 0;
1508 while (index < index_end) {
1509 page = find_get_page(inode->i_mapping, index);
1510 in_data = kmap(page);
1511 /* Handle case where the start is not aligned to PAGE_SIZE */
1512 i = start % PAGE_SIZE;
1513 while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1514 /* Don't sample any garbage from the last page */
1515 if (start > end - SAMPLING_READ_SIZE)
1516 break;
1517 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1518 SAMPLING_READ_SIZE);
1519 i += SAMPLING_INTERVAL;
1520 start += SAMPLING_INTERVAL;
1521 curr_sample_pos += SAMPLING_READ_SIZE;
1522 }
1523 kunmap(page);
1524 put_page(page);
1525
1526 index++;
1527 }
1528
1529 ws->sample_size = curr_sample_pos;
1530}
1531
c2fcdcdf
TT
1532/*
1533 * Compression heuristic.
1534 *
1535 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1536 * quickly (compared to direct compression) detect data characteristics
1537 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
1538 * data.
1539 *
1540 * The following types of analysis can be performed:
1541 * - detect mostly zero data
1542 * - detect data with low "byte set" size (text, etc)
1543 * - detect data with low/high "core byte" set
1544 *
1545 * Return non-zero if the compression should be done, 0 otherwise.
1546 */
1547int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1548{
4e439a0b
TT
1549 struct list_head *ws_list = __find_workspace(0, true);
1550 struct heuristic_ws *ws;
a440d48c
TT
1551 u32 i;
1552 u8 byte;
19562430 1553 int ret = 0;
c2fcdcdf 1554
4e439a0b
TT
1555 ws = list_entry(ws_list, struct heuristic_ws, list);
1556
a440d48c
TT
1557 heuristic_collect_sample(inode, start, end, ws);
1558
1fe4f6fa
TT
1559 if (sample_repeated_patterns(ws)) {
1560 ret = 1;
1561 goto out;
1562 }
1563
a440d48c
TT
1564 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1565
1566 for (i = 0; i < ws->sample_size; i++) {
1567 byte = ws->sample[i];
1568 ws->bucket[byte].count++;
c2fcdcdf
TT
1569 }
1570
a288e92c
TT
1571 i = byte_set_size(ws);
1572 if (i < BYTE_SET_THRESHOLD) {
1573 ret = 2;
1574 goto out;
1575 }
1576
858177d3
TT
1577 i = byte_core_set_size(ws);
1578 if (i <= BYTE_CORE_SET_LOW) {
1579 ret = 3;
1580 goto out;
1581 }
1582
1583 if (i >= BYTE_CORE_SET_HIGH) {
1584 ret = 0;
1585 goto out;
1586 }
1587
19562430
TT
1588 i = shannon_entropy(ws);
1589 if (i <= ENTROPY_LVL_ACEPTABLE) {
1590 ret = 4;
1591 goto out;
1592 }
1593
1594 /*
1595 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1596 * needed to give green light to compression.
1597 *
1598 * For now just assume that compression at that level is not worth the
1599 * resources because:
1600 *
1601 * 1. it is possible to defrag the data later
1602 *
1603 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1604 * values, every bucket has counter at level ~54. The heuristic would
1605 * be confused. This can happen when data have some internal repeated
1606 * patterns like "abbacbbc...". This can be detected by analyzing
1607 * pairs of bytes, which is too costly.
1608 */
1609 if (i < ENTROPY_LVL_HIGH) {
1610 ret = 5;
1611 goto out;
1612 } else {
1613 ret = 0;
1614 goto out;
1615 }
1616
1fe4f6fa 1617out:
4e439a0b 1618 __free_workspace(0, ws_list, true);
c2fcdcdf
TT
1619 return ret;
1620}
f51d2b59
DS
1621
1622unsigned int btrfs_compress_str2level(const char *str)
1623{
1624 if (strncmp(str, "zlib", 4) != 0)
1625 return 0;
1626
fa4d885a
AB
1627 /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
1628 if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
1629 return str[5] - '0';
f51d2b59 1630
eae8d825 1631 return BTRFS_ZLIB_DEFAULT_LEVEL;
f51d2b59 1632}