Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
c8b97818 CM |
2 | /* |
3 | * Copyright (C) 2008 Oracle. All rights reserved. | |
c8b97818 CM |
4 | */ |
5 | ||
6 | #include <linux/kernel.h> | |
7 | #include <linux/bio.h> | |
c8b97818 CM |
8 | #include <linux/file.h> |
9 | #include <linux/fs.h> | |
10 | #include <linux/pagemap.h> | |
11 | #include <linux/highmem.h> | |
12 | #include <linux/time.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/string.h> | |
c8b97818 | 15 | #include <linux/backing-dev.h> |
c8b97818 | 16 | #include <linux/writeback.h> |
5a0e3ad6 | 17 | #include <linux/slab.h> |
fe308533 | 18 | #include <linux/sched/mm.h> |
19562430 | 19 | #include <linux/log2.h> |
d5178578 | 20 | #include <crypto/hash.h> |
602cbe91 | 21 | #include "misc.h" |
c8b97818 CM |
22 | #include "ctree.h" |
23 | #include "disk-io.h" | |
24 | #include "transaction.h" | |
25 | #include "btrfs_inode.h" | |
26 | #include "volumes.h" | |
27 | #include "ordered-data.h" | |
c8b97818 CM |
28 | #include "compression.h" |
29 | #include "extent_io.h" | |
30 | #include "extent_map.h" | |
31 | ||
e128f9c3 DS |
32 | static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; |
33 | ||
34 | const char* btrfs_compress_type2str(enum btrfs_compression_type type) | |
35 | { | |
36 | switch (type) { | |
37 | case BTRFS_COMPRESS_ZLIB: | |
38 | case BTRFS_COMPRESS_LZO: | |
39 | case BTRFS_COMPRESS_ZSTD: | |
40 | case BTRFS_COMPRESS_NONE: | |
41 | return btrfs_compress_types[type]; | |
ce96b7ff CX |
42 | default: |
43 | break; | |
e128f9c3 DS |
44 | } |
45 | ||
46 | return NULL; | |
47 | } | |
48 | ||
aa53e3bf JT |
49 | bool btrfs_compress_is_valid_type(const char *str, size_t len) |
50 | { | |
51 | int i; | |
52 | ||
53 | for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) { | |
54 | size_t comp_len = strlen(btrfs_compress_types[i]); | |
55 | ||
56 | if (len < comp_len) | |
57 | continue; | |
58 | ||
59 | if (!strncmp(btrfs_compress_types[i], str, comp_len)) | |
60 | return true; | |
61 | } | |
62 | return false; | |
63 | } | |
64 | ||
1e4eb746 DS |
65 | static int compression_compress_pages(int type, struct list_head *ws, |
66 | struct address_space *mapping, u64 start, struct page **pages, | |
67 | unsigned long *out_pages, unsigned long *total_in, | |
68 | unsigned long *total_out) | |
69 | { | |
70 | switch (type) { | |
71 | case BTRFS_COMPRESS_ZLIB: | |
72 | return zlib_compress_pages(ws, mapping, start, pages, | |
73 | out_pages, total_in, total_out); | |
74 | case BTRFS_COMPRESS_LZO: | |
75 | return lzo_compress_pages(ws, mapping, start, pages, | |
76 | out_pages, total_in, total_out); | |
77 | case BTRFS_COMPRESS_ZSTD: | |
78 | return zstd_compress_pages(ws, mapping, start, pages, | |
79 | out_pages, total_in, total_out); | |
80 | case BTRFS_COMPRESS_NONE: | |
81 | default: | |
82 | /* | |
1d8ba9e7 QW |
83 | * This can happen when compression races with remount setting |
84 | * it to 'no compress', while caller doesn't call | |
85 | * inode_need_compress() to check if we really need to | |
86 | * compress. | |
87 | * | |
88 | * Not a big deal, just need to inform caller that we | |
89 | * haven't allocated any pages yet. | |
1e4eb746 | 90 | */ |
1d8ba9e7 | 91 | *out_pages = 0; |
1e4eb746 DS |
92 | return -E2BIG; |
93 | } | |
94 | } | |
95 | ||
96 | static int compression_decompress_bio(int type, struct list_head *ws, | |
97 | struct compressed_bio *cb) | |
98 | { | |
99 | switch (type) { | |
100 | case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb); | |
101 | case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb); | |
102 | case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb); | |
103 | case BTRFS_COMPRESS_NONE: | |
104 | default: | |
105 | /* | |
106 | * This can't happen, the type is validated several times | |
107 | * before we get here. | |
108 | */ | |
109 | BUG(); | |
110 | } | |
111 | } | |
112 | ||
113 | static int compression_decompress(int type, struct list_head *ws, | |
114 | unsigned char *data_in, struct page *dest_page, | |
115 | unsigned long start_byte, size_t srclen, size_t destlen) | |
116 | { | |
117 | switch (type) { | |
118 | case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page, | |
119 | start_byte, srclen, destlen); | |
120 | case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page, | |
121 | start_byte, srclen, destlen); | |
122 | case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page, | |
123 | start_byte, srclen, destlen); | |
124 | case BTRFS_COMPRESS_NONE: | |
125 | default: | |
126 | /* | |
127 | * This can't happen, the type is validated several times | |
128 | * before we get here. | |
129 | */ | |
130 | BUG(); | |
131 | } | |
132 | } | |
133 | ||
8140dc30 | 134 | static int btrfs_decompress_bio(struct compressed_bio *cb); |
48a3b636 | 135 | |
2ff7e61e | 136 | static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, |
d20f7043 CM |
137 | unsigned long disk_size) |
138 | { | |
d20f7043 | 139 | return sizeof(struct compressed_bio) + |
713cebfb | 140 | (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size; |
d20f7043 CM |
141 | } |
142 | ||
5a9472fe | 143 | static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio, |
d20f7043 CM |
144 | u64 disk_start) |
145 | { | |
10fe6ca8 | 146 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
d5178578 | 147 | SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
223486c2 | 148 | const u32 csum_size = fs_info->csum_size; |
04d4ba4c | 149 | const u32 sectorsize = fs_info->sectorsize; |
d20f7043 CM |
150 | struct page *page; |
151 | unsigned long i; | |
152 | char *kaddr; | |
d5178578 | 153 | u8 csum[BTRFS_CSUM_SIZE]; |
5a9472fe | 154 | struct compressed_bio *cb = bio->bi_private; |
10fe6ca8 | 155 | u8 *cb_sum = cb->sums; |
d20f7043 | 156 | |
42437a63 | 157 | if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM)) |
d20f7043 CM |
158 | return 0; |
159 | ||
d5178578 JT |
160 | shash->tfm = fs_info->csum_shash; |
161 | ||
d20f7043 | 162 | for (i = 0; i < cb->nr_pages; i++) { |
04d4ba4c QW |
163 | u32 pg_offset; |
164 | u32 bytes_left = PAGE_SIZE; | |
d20f7043 | 165 | page = cb->compressed_pages[i]; |
d20f7043 | 166 | |
04d4ba4c QW |
167 | /* Determine the remaining bytes inside the page first */ |
168 | if (i == cb->nr_pages - 1) | |
169 | bytes_left = cb->compressed_len - i * PAGE_SIZE; | |
170 | ||
171 | /* Hash through the page sector by sector */ | |
172 | for (pg_offset = 0; pg_offset < bytes_left; | |
173 | pg_offset += sectorsize) { | |
174 | kaddr = kmap_atomic(page); | |
175 | crypto_shash_digest(shash, kaddr + pg_offset, | |
176 | sectorsize, csum); | |
177 | kunmap_atomic(kaddr); | |
178 | ||
179 | if (memcmp(&csum, cb_sum, csum_size) != 0) { | |
180 | btrfs_print_data_csum_error(inode, disk_start, | |
181 | csum, cb_sum, cb->mirror_num); | |
182 | if (btrfs_io_bio(bio)->device) | |
183 | btrfs_dev_stat_inc_and_print( | |
184 | btrfs_io_bio(bio)->device, | |
185 | BTRFS_DEV_STAT_CORRUPTION_ERRS); | |
186 | return -EIO; | |
187 | } | |
188 | cb_sum += csum_size; | |
189 | disk_start += sectorsize; | |
d20f7043 | 190 | } |
d20f7043 | 191 | } |
93c4c033 | 192 | return 0; |
d20f7043 CM |
193 | } |
194 | ||
c8b97818 CM |
195 | /* when we finish reading compressed pages from the disk, we |
196 | * decompress them and then run the bio end_io routines on the | |
197 | * decompressed pages (in the inode address space). | |
198 | * | |
199 | * This allows the checksumming and other IO error handling routines | |
200 | * to work normally | |
201 | * | |
202 | * The compressed pages are freed here, and it must be run | |
203 | * in process context | |
204 | */ | |
4246a0b6 | 205 | static void end_compressed_bio_read(struct bio *bio) |
c8b97818 | 206 | { |
c8b97818 CM |
207 | struct compressed_bio *cb = bio->bi_private; |
208 | struct inode *inode; | |
209 | struct page *page; | |
210 | unsigned long index; | |
cf1167d5 | 211 | unsigned int mirror = btrfs_io_bio(bio)->mirror_num; |
e6311f24 | 212 | int ret = 0; |
c8b97818 | 213 | |
4e4cbee9 | 214 | if (bio->bi_status) |
c8b97818 CM |
215 | cb->errors = 1; |
216 | ||
217 | /* if there are more bios still pending for this compressed | |
218 | * extent, just exit | |
219 | */ | |
a50299ae | 220 | if (!refcount_dec_and_test(&cb->pending_bios)) |
c8b97818 CM |
221 | goto out; |
222 | ||
cf1167d5 LB |
223 | /* |
224 | * Record the correct mirror_num in cb->orig_bio so that | |
225 | * read-repair can work properly. | |
226 | */ | |
cf1167d5 LB |
227 | btrfs_io_bio(cb->orig_bio)->mirror_num = mirror; |
228 | cb->mirror_num = mirror; | |
229 | ||
e6311f24 LB |
230 | /* |
231 | * Some IO in this cb have failed, just skip checksum as there | |
232 | * is no way it could be correct. | |
233 | */ | |
234 | if (cb->errors == 1) | |
235 | goto csum_failed; | |
236 | ||
d20f7043 | 237 | inode = cb->inode; |
5a9472fe | 238 | ret = check_compressed_csum(BTRFS_I(inode), bio, |
1201b58b | 239 | bio->bi_iter.bi_sector << 9); |
d20f7043 CM |
240 | if (ret) |
241 | goto csum_failed; | |
242 | ||
c8b97818 CM |
243 | /* ok, we're the last bio for this extent, lets start |
244 | * the decompression. | |
245 | */ | |
8140dc30 AJ |
246 | ret = btrfs_decompress_bio(cb); |
247 | ||
d20f7043 | 248 | csum_failed: |
c8b97818 CM |
249 | if (ret) |
250 | cb->errors = 1; | |
251 | ||
252 | /* release the compressed pages */ | |
253 | index = 0; | |
254 | for (index = 0; index < cb->nr_pages; index++) { | |
255 | page = cb->compressed_pages[index]; | |
256 | page->mapping = NULL; | |
09cbfeaf | 257 | put_page(page); |
c8b97818 CM |
258 | } |
259 | ||
260 | /* do io completion on the original bio */ | |
771ed689 | 261 | if (cb->errors) { |
c8b97818 | 262 | bio_io_error(cb->orig_bio); |
d20f7043 | 263 | } else { |
2c30c71b | 264 | struct bio_vec *bvec; |
6dc4f100 | 265 | struct bvec_iter_all iter_all; |
d20f7043 CM |
266 | |
267 | /* | |
268 | * we have verified the checksum already, set page | |
269 | * checked so the end_io handlers know about it | |
270 | */ | |
c09abff8 | 271 | ASSERT(!bio_flagged(bio, BIO_CLONED)); |
2b070cfe | 272 | bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) |
d20f7043 | 273 | SetPageChecked(bvec->bv_page); |
2c30c71b | 274 | |
4246a0b6 | 275 | bio_endio(cb->orig_bio); |
d20f7043 | 276 | } |
c8b97818 CM |
277 | |
278 | /* finally free the cb struct */ | |
279 | kfree(cb->compressed_pages); | |
280 | kfree(cb); | |
281 | out: | |
282 | bio_put(bio); | |
283 | } | |
284 | ||
285 | /* | |
286 | * Clear the writeback bits on all of the file | |
287 | * pages for a compressed write | |
288 | */ | |
7bdcefc1 FM |
289 | static noinline void end_compressed_writeback(struct inode *inode, |
290 | const struct compressed_bio *cb) | |
c8b97818 | 291 | { |
09cbfeaf KS |
292 | unsigned long index = cb->start >> PAGE_SHIFT; |
293 | unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; | |
c8b97818 CM |
294 | struct page *pages[16]; |
295 | unsigned long nr_pages = end_index - index + 1; | |
296 | int i; | |
297 | int ret; | |
298 | ||
7bdcefc1 FM |
299 | if (cb->errors) |
300 | mapping_set_error(inode->i_mapping, -EIO); | |
301 | ||
d397712b | 302 | while (nr_pages > 0) { |
c8b97818 | 303 | ret = find_get_pages_contig(inode->i_mapping, index, |
5b050f04 CM |
304 | min_t(unsigned long, |
305 | nr_pages, ARRAY_SIZE(pages)), pages); | |
c8b97818 CM |
306 | if (ret == 0) { |
307 | nr_pages -= 1; | |
308 | index += 1; | |
309 | continue; | |
310 | } | |
311 | for (i = 0; i < ret; i++) { | |
7bdcefc1 FM |
312 | if (cb->errors) |
313 | SetPageError(pages[i]); | |
c8b97818 | 314 | end_page_writeback(pages[i]); |
09cbfeaf | 315 | put_page(pages[i]); |
c8b97818 CM |
316 | } |
317 | nr_pages -= ret; | |
318 | index += ret; | |
319 | } | |
320 | /* the inode may be gone now */ | |
c8b97818 CM |
321 | } |
322 | ||
323 | /* | |
324 | * do the cleanup once all the compressed pages hit the disk. | |
325 | * This will clear writeback on the file pages and free the compressed | |
326 | * pages. | |
327 | * | |
328 | * This also calls the writeback end hooks for the file pages so that | |
329 | * metadata and checksums can be updated in the file. | |
330 | */ | |
4246a0b6 | 331 | static void end_compressed_bio_write(struct bio *bio) |
c8b97818 | 332 | { |
c8b97818 CM |
333 | struct compressed_bio *cb = bio->bi_private; |
334 | struct inode *inode; | |
335 | struct page *page; | |
336 | unsigned long index; | |
337 | ||
4e4cbee9 | 338 | if (bio->bi_status) |
c8b97818 CM |
339 | cb->errors = 1; |
340 | ||
341 | /* if there are more bios still pending for this compressed | |
342 | * extent, just exit | |
343 | */ | |
a50299ae | 344 | if (!refcount_dec_and_test(&cb->pending_bios)) |
c8b97818 CM |
345 | goto out; |
346 | ||
347 | /* ok, we're the last bio for this extent, step one is to | |
348 | * call back into the FS and do all the end_io operations | |
349 | */ | |
350 | inode = cb->inode; | |
70b99e69 | 351 | cb->compressed_pages[0]->mapping = cb->inode->i_mapping; |
7087a9d8 | 352 | btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0], |
c629732d | 353 | cb->start, cb->start + cb->len - 1, |
6a8d2136 | 354 | bio->bi_status == BLK_STS_OK); |
70b99e69 | 355 | cb->compressed_pages[0]->mapping = NULL; |
c8b97818 | 356 | |
7bdcefc1 | 357 | end_compressed_writeback(inode, cb); |
c8b97818 CM |
358 | /* note, our inode could be gone now */ |
359 | ||
360 | /* | |
361 | * release the compressed pages, these came from alloc_page and | |
362 | * are not attached to the inode at all | |
363 | */ | |
364 | index = 0; | |
365 | for (index = 0; index < cb->nr_pages; index++) { | |
366 | page = cb->compressed_pages[index]; | |
367 | page->mapping = NULL; | |
09cbfeaf | 368 | put_page(page); |
c8b97818 CM |
369 | } |
370 | ||
371 | /* finally free the cb struct */ | |
372 | kfree(cb->compressed_pages); | |
373 | kfree(cb); | |
374 | out: | |
375 | bio_put(bio); | |
376 | } | |
377 | ||
378 | /* | |
379 | * worker function to build and submit bios for previously compressed pages. | |
380 | * The corresponding pages in the inode should be marked for writeback | |
381 | * and the compressed pages should have a reference on them for dropping | |
382 | * when the IO is complete. | |
383 | * | |
384 | * This also checksums the file bytes and gets things ready for | |
385 | * the end io hooks. | |
386 | */ | |
c7ee1819 | 387 | blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, |
c8b97818 CM |
388 | unsigned long len, u64 disk_start, |
389 | unsigned long compressed_len, | |
390 | struct page **compressed_pages, | |
f82b7359 | 391 | unsigned long nr_pages, |
ec39f769 CM |
392 | unsigned int write_flags, |
393 | struct cgroup_subsys_state *blkcg_css) | |
c8b97818 | 394 | { |
c7ee1819 | 395 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
c8b97818 | 396 | struct bio *bio = NULL; |
c8b97818 CM |
397 | struct compressed_bio *cb; |
398 | unsigned long bytes_left; | |
306e16ce | 399 | int pg_index = 0; |
c8b97818 CM |
400 | struct page *page; |
401 | u64 first_byte = disk_start; | |
4e4cbee9 | 402 | blk_status_t ret; |
c7ee1819 | 403 | int skip_sum = inode->flags & BTRFS_INODE_NODATASUM; |
c8b97818 | 404 | |
fdb1e121 | 405 | WARN_ON(!PAGE_ALIGNED(start)); |
2ff7e61e | 406 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
dac97e51 | 407 | if (!cb) |
4e4cbee9 | 408 | return BLK_STS_RESOURCE; |
a50299ae | 409 | refcount_set(&cb->pending_bios, 0); |
c8b97818 | 410 | cb->errors = 0; |
c7ee1819 | 411 | cb->inode = &inode->vfs_inode; |
c8b97818 CM |
412 | cb->start = start; |
413 | cb->len = len; | |
d20f7043 | 414 | cb->mirror_num = 0; |
c8b97818 CM |
415 | cb->compressed_pages = compressed_pages; |
416 | cb->compressed_len = compressed_len; | |
417 | cb->orig_bio = NULL; | |
418 | cb->nr_pages = nr_pages; | |
419 | ||
e749af44 | 420 | bio = btrfs_bio_alloc(first_byte); |
f82b7359 | 421 | bio->bi_opf = REQ_OP_WRITE | write_flags; |
c8b97818 CM |
422 | bio->bi_private = cb; |
423 | bio->bi_end_io = end_compressed_bio_write; | |
ec39f769 CM |
424 | |
425 | if (blkcg_css) { | |
426 | bio->bi_opf |= REQ_CGROUP_PUNT; | |
46bcff2b | 427 | kthread_associate_blkcg(blkcg_css); |
ec39f769 | 428 | } |
a50299ae | 429 | refcount_set(&cb->pending_bios, 1); |
c8b97818 CM |
430 | |
431 | /* create and submit bios for the compressed pages */ | |
432 | bytes_left = compressed_len; | |
306e16ce | 433 | for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { |
4e4cbee9 CH |
434 | int submit = 0; |
435 | ||
306e16ce | 436 | page = compressed_pages[pg_index]; |
c7ee1819 | 437 | page->mapping = inode->vfs_inode.i_mapping; |
4f024f37 | 438 | if (bio->bi_iter.bi_size) |
da12fe54 NB |
439 | submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio, |
440 | 0); | |
c8b97818 | 441 | |
70b99e69 | 442 | page->mapping = NULL; |
4e4cbee9 | 443 | if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) < |
09cbfeaf | 444 | PAGE_SIZE) { |
af09abfe CM |
445 | /* |
446 | * inc the count before we submit the bio so | |
447 | * we know the end IO handler won't happen before | |
448 | * we inc the count. Otherwise, the cb might get | |
449 | * freed before we're done setting it up | |
450 | */ | |
a50299ae | 451 | refcount_inc(&cb->pending_bios); |
0b246afa JM |
452 | ret = btrfs_bio_wq_end_io(fs_info, bio, |
453 | BTRFS_WQ_ENDIO_DATA); | |
79787eaa | 454 | BUG_ON(ret); /* -ENOMEM */ |
c8b97818 | 455 | |
e55179b3 | 456 | if (!skip_sum) { |
c7ee1819 | 457 | ret = btrfs_csum_one_bio(inode, bio, start, 1); |
79787eaa | 458 | BUG_ON(ret); /* -ENOMEM */ |
e55179b3 | 459 | } |
d20f7043 | 460 | |
08635bae | 461 | ret = btrfs_map_bio(fs_info, bio, 0); |
f5daf2c7 | 462 | if (ret) { |
4e4cbee9 | 463 | bio->bi_status = ret; |
f5daf2c7 LB |
464 | bio_endio(bio); |
465 | } | |
c8b97818 | 466 | |
e749af44 | 467 | bio = btrfs_bio_alloc(first_byte); |
f82b7359 | 468 | bio->bi_opf = REQ_OP_WRITE | write_flags; |
c8b97818 CM |
469 | bio->bi_private = cb; |
470 | bio->bi_end_io = end_compressed_bio_write; | |
46bcff2b | 471 | if (blkcg_css) |
7b62e66c | 472 | bio->bi_opf |= REQ_CGROUP_PUNT; |
09cbfeaf | 473 | bio_add_page(bio, page, PAGE_SIZE, 0); |
c8b97818 | 474 | } |
09cbfeaf | 475 | if (bytes_left < PAGE_SIZE) { |
0b246afa | 476 | btrfs_info(fs_info, |
efe120a0 | 477 | "bytes left %lu compress len %lu nr %lu", |
cfbc246e CM |
478 | bytes_left, cb->compressed_len, cb->nr_pages); |
479 | } | |
09cbfeaf KS |
480 | bytes_left -= PAGE_SIZE; |
481 | first_byte += PAGE_SIZE; | |
771ed689 | 482 | cond_resched(); |
c8b97818 | 483 | } |
c8b97818 | 484 | |
0b246afa | 485 | ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); |
79787eaa | 486 | BUG_ON(ret); /* -ENOMEM */ |
c8b97818 | 487 | |
e55179b3 | 488 | if (!skip_sum) { |
c7ee1819 | 489 | ret = btrfs_csum_one_bio(inode, bio, start, 1); |
79787eaa | 490 | BUG_ON(ret); /* -ENOMEM */ |
e55179b3 | 491 | } |
d20f7043 | 492 | |
08635bae | 493 | ret = btrfs_map_bio(fs_info, bio, 0); |
f5daf2c7 | 494 | if (ret) { |
4e4cbee9 | 495 | bio->bi_status = ret; |
f5daf2c7 LB |
496 | bio_endio(bio); |
497 | } | |
c8b97818 | 498 | |
46bcff2b DZ |
499 | if (blkcg_css) |
500 | kthread_associate_blkcg(NULL); | |
501 | ||
c8b97818 CM |
502 | return 0; |
503 | } | |
504 | ||
2a4d0c90 CH |
505 | static u64 bio_end_offset(struct bio *bio) |
506 | { | |
c45a8f2d | 507 | struct bio_vec *last = bio_last_bvec_all(bio); |
2a4d0c90 CH |
508 | |
509 | return page_offset(last->bv_page) + last->bv_len + last->bv_offset; | |
510 | } | |
511 | ||
771ed689 CM |
512 | static noinline int add_ra_bio_pages(struct inode *inode, |
513 | u64 compressed_end, | |
514 | struct compressed_bio *cb) | |
515 | { | |
516 | unsigned long end_index; | |
306e16ce | 517 | unsigned long pg_index; |
771ed689 CM |
518 | u64 last_offset; |
519 | u64 isize = i_size_read(inode); | |
520 | int ret; | |
521 | struct page *page; | |
522 | unsigned long nr_pages = 0; | |
523 | struct extent_map *em; | |
524 | struct address_space *mapping = inode->i_mapping; | |
771ed689 CM |
525 | struct extent_map_tree *em_tree; |
526 | struct extent_io_tree *tree; | |
527 | u64 end; | |
528 | int misses = 0; | |
529 | ||
2a4d0c90 | 530 | last_offset = bio_end_offset(cb->orig_bio); |
771ed689 CM |
531 | em_tree = &BTRFS_I(inode)->extent_tree; |
532 | tree = &BTRFS_I(inode)->io_tree; | |
533 | ||
534 | if (isize == 0) | |
535 | return 0; | |
536 | ||
09cbfeaf | 537 | end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; |
771ed689 | 538 | |
d397712b | 539 | while (last_offset < compressed_end) { |
09cbfeaf | 540 | pg_index = last_offset >> PAGE_SHIFT; |
771ed689 | 541 | |
306e16ce | 542 | if (pg_index > end_index) |
771ed689 CM |
543 | break; |
544 | ||
0a943c65 | 545 | page = xa_load(&mapping->i_pages, pg_index); |
3159f943 | 546 | if (page && !xa_is_value(page)) { |
771ed689 CM |
547 | misses++; |
548 | if (misses > 4) | |
549 | break; | |
550 | goto next; | |
551 | } | |
552 | ||
c62d2555 MH |
553 | page = __page_cache_alloc(mapping_gfp_constraint(mapping, |
554 | ~__GFP_FS)); | |
771ed689 CM |
555 | if (!page) |
556 | break; | |
557 | ||
c62d2555 | 558 | if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { |
09cbfeaf | 559 | put_page(page); |
771ed689 CM |
560 | goto next; |
561 | } | |
562 | ||
771ed689 CM |
563 | /* |
564 | * at this point, we have a locked page in the page cache | |
565 | * for these bytes in the file. But, we have to make | |
566 | * sure they map to this compressed extent on disk. | |
567 | */ | |
32443de3 QW |
568 | ret = set_page_extent_mapped(page); |
569 | if (ret < 0) { | |
570 | unlock_page(page); | |
571 | put_page(page); | |
572 | break; | |
573 | } | |
574 | ||
575 | end = last_offset + PAGE_SIZE - 1; | |
d0082371 | 576 | lock_extent(tree, last_offset, end); |
890871be | 577 | read_lock(&em_tree->lock); |
771ed689 | 578 | em = lookup_extent_mapping(em_tree, last_offset, |
09cbfeaf | 579 | PAGE_SIZE); |
890871be | 580 | read_unlock(&em_tree->lock); |
771ed689 CM |
581 | |
582 | if (!em || last_offset < em->start || | |
09cbfeaf | 583 | (last_offset + PAGE_SIZE > extent_map_end(em)) || |
4f024f37 | 584 | (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { |
771ed689 | 585 | free_extent_map(em); |
d0082371 | 586 | unlock_extent(tree, last_offset, end); |
771ed689 | 587 | unlock_page(page); |
09cbfeaf | 588 | put_page(page); |
771ed689 CM |
589 | break; |
590 | } | |
591 | free_extent_map(em); | |
592 | ||
593 | if (page->index == end_index) { | |
594 | char *userpage; | |
7073017a | 595 | size_t zero_offset = offset_in_page(isize); |
771ed689 CM |
596 | |
597 | if (zero_offset) { | |
598 | int zeros; | |
09cbfeaf | 599 | zeros = PAGE_SIZE - zero_offset; |
7ac687d9 | 600 | userpage = kmap_atomic(page); |
771ed689 CM |
601 | memset(userpage + zero_offset, 0, zeros); |
602 | flush_dcache_page(page); | |
7ac687d9 | 603 | kunmap_atomic(userpage); |
771ed689 CM |
604 | } |
605 | } | |
606 | ||
607 | ret = bio_add_page(cb->orig_bio, page, | |
09cbfeaf | 608 | PAGE_SIZE, 0); |
771ed689 | 609 | |
09cbfeaf | 610 | if (ret == PAGE_SIZE) { |
771ed689 | 611 | nr_pages++; |
09cbfeaf | 612 | put_page(page); |
771ed689 | 613 | } else { |
d0082371 | 614 | unlock_extent(tree, last_offset, end); |
771ed689 | 615 | unlock_page(page); |
09cbfeaf | 616 | put_page(page); |
771ed689 CM |
617 | break; |
618 | } | |
619 | next: | |
09cbfeaf | 620 | last_offset += PAGE_SIZE; |
771ed689 | 621 | } |
771ed689 CM |
622 | return 0; |
623 | } | |
624 | ||
c8b97818 CM |
625 | /* |
626 | * for a compressed read, the bio we get passed has all the inode pages | |
627 | * in it. We don't actually do IO on those pages but allocate new ones | |
628 | * to hold the compressed pages on disk. | |
629 | * | |
4f024f37 | 630 | * bio->bi_iter.bi_sector points to the compressed extent on disk |
c8b97818 | 631 | * bio->bi_io_vec points to all of the inode pages |
c8b97818 CM |
632 | * |
633 | * After the compressed pages are read, we copy the bytes into the | |
634 | * bio we were passed and then call the bio end_io calls | |
635 | */ | |
4e4cbee9 | 636 | blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, |
c8b97818 CM |
637 | int mirror_num, unsigned long bio_flags) |
638 | { | |
0b246afa | 639 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
c8b97818 CM |
640 | struct extent_map_tree *em_tree; |
641 | struct compressed_bio *cb; | |
c8b97818 CM |
642 | unsigned long compressed_len; |
643 | unsigned long nr_pages; | |
306e16ce | 644 | unsigned long pg_index; |
c8b97818 | 645 | struct page *page; |
c8b97818 | 646 | struct bio *comp_bio; |
1201b58b | 647 | u64 cur_disk_byte = bio->bi_iter.bi_sector << 9; |
e04ca626 CM |
648 | u64 em_len; |
649 | u64 em_start; | |
c8b97818 | 650 | struct extent_map *em; |
4e4cbee9 | 651 | blk_status_t ret = BLK_STS_RESOURCE; |
15e3004a | 652 | int faili = 0; |
10fe6ca8 | 653 | u8 *sums; |
c8b97818 | 654 | |
c8b97818 CM |
655 | em_tree = &BTRFS_I(inode)->extent_tree; |
656 | ||
657 | /* we need the actual starting offset of this extent in the file */ | |
890871be | 658 | read_lock(&em_tree->lock); |
c8b97818 | 659 | em = lookup_extent_mapping(em_tree, |
263663cd | 660 | page_offset(bio_first_page_all(bio)), |
be6a1361 | 661 | fs_info->sectorsize); |
890871be | 662 | read_unlock(&em_tree->lock); |
285190d9 | 663 | if (!em) |
4e4cbee9 | 664 | return BLK_STS_IOERR; |
c8b97818 | 665 | |
d20f7043 | 666 | compressed_len = em->block_len; |
2ff7e61e | 667 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
6b82ce8d | 668 | if (!cb) |
669 | goto out; | |
670 | ||
a50299ae | 671 | refcount_set(&cb->pending_bios, 0); |
c8b97818 CM |
672 | cb->errors = 0; |
673 | cb->inode = inode; | |
d20f7043 | 674 | cb->mirror_num = mirror_num; |
10fe6ca8 | 675 | sums = cb->sums; |
c8b97818 | 676 | |
ff5b7ee3 | 677 | cb->start = em->orig_start; |
e04ca626 CM |
678 | em_len = em->len; |
679 | em_start = em->start; | |
d20f7043 | 680 | |
c8b97818 | 681 | free_extent_map(em); |
e04ca626 | 682 | em = NULL; |
c8b97818 | 683 | |
81381053 | 684 | cb->len = bio->bi_iter.bi_size; |
c8b97818 | 685 | cb->compressed_len = compressed_len; |
261507a0 | 686 | cb->compress_type = extent_compress_type(bio_flags); |
c8b97818 CM |
687 | cb->orig_bio = bio; |
688 | ||
09cbfeaf | 689 | nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); |
31e818fe | 690 | cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), |
c8b97818 | 691 | GFP_NOFS); |
6b82ce8d | 692 | if (!cb->compressed_pages) |
693 | goto fail1; | |
694 | ||
306e16ce DS |
695 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
696 | cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | | |
c8b97818 | 697 | __GFP_HIGHMEM); |
15e3004a JB |
698 | if (!cb->compressed_pages[pg_index]) { |
699 | faili = pg_index - 1; | |
0e9350de | 700 | ret = BLK_STS_RESOURCE; |
6b82ce8d | 701 | goto fail2; |
15e3004a | 702 | } |
c8b97818 | 703 | } |
15e3004a | 704 | faili = nr_pages - 1; |
c8b97818 CM |
705 | cb->nr_pages = nr_pages; |
706 | ||
7f042a83 | 707 | add_ra_bio_pages(inode, em_start + em_len, cb); |
771ed689 | 708 | |
771ed689 | 709 | /* include any pages we added in add_ra-bio_pages */ |
81381053 | 710 | cb->len = bio->bi_iter.bi_size; |
771ed689 | 711 | |
e749af44 | 712 | comp_bio = btrfs_bio_alloc(cur_disk_byte); |
ebcc3263 | 713 | comp_bio->bi_opf = REQ_OP_READ; |
c8b97818 CM |
714 | comp_bio->bi_private = cb; |
715 | comp_bio->bi_end_io = end_compressed_bio_read; | |
a50299ae | 716 | refcount_set(&cb->pending_bios, 1); |
c8b97818 | 717 | |
306e16ce | 718 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
be6a1361 | 719 | u32 pg_len = PAGE_SIZE; |
4e4cbee9 CH |
720 | int submit = 0; |
721 | ||
be6a1361 QW |
722 | /* |
723 | * To handle subpage case, we need to make sure the bio only | |
724 | * covers the range we need. | |
725 | * | |
726 | * If we're at the last page, truncate the length to only cover | |
727 | * the remaining part. | |
728 | */ | |
729 | if (pg_index == nr_pages - 1) | |
730 | pg_len = min_t(u32, PAGE_SIZE, | |
731 | compressed_len - pg_index * PAGE_SIZE); | |
732 | ||
306e16ce | 733 | page = cb->compressed_pages[pg_index]; |
c8b97818 | 734 | page->mapping = inode->i_mapping; |
09cbfeaf | 735 | page->index = em_start >> PAGE_SHIFT; |
d20f7043 | 736 | |
4f024f37 | 737 | if (comp_bio->bi_iter.bi_size) |
be6a1361 | 738 | submit = btrfs_bio_fits_in_stripe(page, pg_len, |
da12fe54 | 739 | comp_bio, 0); |
c8b97818 | 740 | |
70b99e69 | 741 | page->mapping = NULL; |
be6a1361 | 742 | if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) { |
10fe6ca8 JT |
743 | unsigned int nr_sectors; |
744 | ||
0b246afa JM |
745 | ret = btrfs_bio_wq_end_io(fs_info, comp_bio, |
746 | BTRFS_WQ_ENDIO_DATA); | |
79787eaa | 747 | BUG_ON(ret); /* -ENOMEM */ |
c8b97818 | 748 | |
af09abfe CM |
749 | /* |
750 | * inc the count before we submit the bio so | |
751 | * we know the end IO handler won't happen before | |
752 | * we inc the count. Otherwise, the cb might get | |
753 | * freed before we're done setting it up | |
754 | */ | |
a50299ae | 755 | refcount_inc(&cb->pending_bios); |
af09abfe | 756 | |
6275193e | 757 | ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); |
334c16d8 | 758 | BUG_ON(ret); /* -ENOMEM */ |
10fe6ca8 JT |
759 | |
760 | nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size, | |
761 | fs_info->sectorsize); | |
713cebfb | 762 | sums += fs_info->csum_size * nr_sectors; |
d20f7043 | 763 | |
08635bae | 764 | ret = btrfs_map_bio(fs_info, comp_bio, mirror_num); |
4246a0b6 | 765 | if (ret) { |
4e4cbee9 | 766 | comp_bio->bi_status = ret; |
4246a0b6 CH |
767 | bio_endio(comp_bio); |
768 | } | |
c8b97818 | 769 | |
e749af44 | 770 | comp_bio = btrfs_bio_alloc(cur_disk_byte); |
ebcc3263 | 771 | comp_bio->bi_opf = REQ_OP_READ; |
771ed689 CM |
772 | comp_bio->bi_private = cb; |
773 | comp_bio->bi_end_io = end_compressed_bio_read; | |
774 | ||
be6a1361 | 775 | bio_add_page(comp_bio, page, pg_len, 0); |
c8b97818 | 776 | } |
be6a1361 | 777 | cur_disk_byte += pg_len; |
c8b97818 | 778 | } |
c8b97818 | 779 | |
0b246afa | 780 | ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA); |
79787eaa | 781 | BUG_ON(ret); /* -ENOMEM */ |
c8b97818 | 782 | |
6275193e | 783 | ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); |
334c16d8 | 784 | BUG_ON(ret); /* -ENOMEM */ |
d20f7043 | 785 | |
08635bae | 786 | ret = btrfs_map_bio(fs_info, comp_bio, mirror_num); |
4246a0b6 | 787 | if (ret) { |
4e4cbee9 | 788 | comp_bio->bi_status = ret; |
4246a0b6 CH |
789 | bio_endio(comp_bio); |
790 | } | |
c8b97818 | 791 | |
c8b97818 | 792 | return 0; |
6b82ce8d | 793 | |
794 | fail2: | |
15e3004a JB |
795 | while (faili >= 0) { |
796 | __free_page(cb->compressed_pages[faili]); | |
797 | faili--; | |
798 | } | |
6b82ce8d | 799 | |
800 | kfree(cb->compressed_pages); | |
801 | fail1: | |
802 | kfree(cb); | |
803 | out: | |
804 | free_extent_map(em); | |
805 | return ret; | |
c8b97818 | 806 | } |
261507a0 | 807 | |
17b5a6c1 TT |
808 | /* |
809 | * Heuristic uses systematic sampling to collect data from the input data | |
810 | * range, the logic can be tuned by the following constants: | |
811 | * | |
812 | * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample | |
813 | * @SAMPLING_INTERVAL - range from which the sampled data can be collected | |
814 | */ | |
815 | #define SAMPLING_READ_SIZE (16) | |
816 | #define SAMPLING_INTERVAL (256) | |
817 | ||
818 | /* | |
819 | * For statistical analysis of the input data we consider bytes that form a | |
820 | * Galois Field of 256 objects. Each object has an attribute count, ie. how | |
821 | * many times the object appeared in the sample. | |
822 | */ | |
823 | #define BUCKET_SIZE (256) | |
824 | ||
825 | /* | |
826 | * The size of the sample is based on a statistical sampling rule of thumb. | |
827 | * The common way is to perform sampling tests as long as the number of | |
828 | * elements in each cell is at least 5. | |
829 | * | |
830 | * Instead of 5, we choose 32 to obtain more accurate results. | |
831 | * If the data contain the maximum number of symbols, which is 256, we obtain a | |
832 | * sample size bound by 8192. | |
833 | * | |
834 | * For a sample of at most 8KB of data per data range: 16 consecutive bytes | |
835 | * from up to 512 locations. | |
836 | */ | |
837 | #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \ | |
838 | SAMPLING_READ_SIZE / SAMPLING_INTERVAL) | |
839 | ||
840 | struct bucket_item { | |
841 | u32 count; | |
842 | }; | |
4e439a0b TT |
843 | |
844 | struct heuristic_ws { | |
17b5a6c1 TT |
845 | /* Partial copy of input data */ |
846 | u8 *sample; | |
a440d48c | 847 | u32 sample_size; |
17b5a6c1 TT |
848 | /* Buckets store counters for each byte value */ |
849 | struct bucket_item *bucket; | |
440c840c TT |
850 | /* Sorting buffer */ |
851 | struct bucket_item *bucket_b; | |
4e439a0b TT |
852 | struct list_head list; |
853 | }; | |
854 | ||
92ee5530 DZ |
855 | static struct workspace_manager heuristic_wsm; |
856 | ||
4e439a0b TT |
857 | static void free_heuristic_ws(struct list_head *ws) |
858 | { | |
859 | struct heuristic_ws *workspace; | |
860 | ||
861 | workspace = list_entry(ws, struct heuristic_ws, list); | |
862 | ||
17b5a6c1 TT |
863 | kvfree(workspace->sample); |
864 | kfree(workspace->bucket); | |
440c840c | 865 | kfree(workspace->bucket_b); |
4e439a0b TT |
866 | kfree(workspace); |
867 | } | |
868 | ||
7bf49943 | 869 | static struct list_head *alloc_heuristic_ws(unsigned int level) |
4e439a0b TT |
870 | { |
871 | struct heuristic_ws *ws; | |
872 | ||
873 | ws = kzalloc(sizeof(*ws), GFP_KERNEL); | |
874 | if (!ws) | |
875 | return ERR_PTR(-ENOMEM); | |
876 | ||
17b5a6c1 TT |
877 | ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL); |
878 | if (!ws->sample) | |
879 | goto fail; | |
880 | ||
881 | ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL); | |
882 | if (!ws->bucket) | |
883 | goto fail; | |
4e439a0b | 884 | |
440c840c TT |
885 | ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL); |
886 | if (!ws->bucket_b) | |
887 | goto fail; | |
888 | ||
17b5a6c1 | 889 | INIT_LIST_HEAD(&ws->list); |
4e439a0b | 890 | return &ws->list; |
17b5a6c1 TT |
891 | fail: |
892 | free_heuristic_ws(&ws->list); | |
893 | return ERR_PTR(-ENOMEM); | |
4e439a0b TT |
894 | } |
895 | ||
ca4ac360 | 896 | const struct btrfs_compress_op btrfs_heuristic_compress = { |
be951045 | 897 | .workspace_manager = &heuristic_wsm, |
ca4ac360 DZ |
898 | }; |
899 | ||
e8c9f186 | 900 | static const struct btrfs_compress_op * const btrfs_compress_op[] = { |
ca4ac360 DZ |
901 | /* The heuristic is represented as compression type 0 */ |
902 | &btrfs_heuristic_compress, | |
261507a0 | 903 | &btrfs_zlib_compress, |
a6fa6fae | 904 | &btrfs_lzo_compress, |
5c1aab1d | 905 | &btrfs_zstd_compress, |
261507a0 LZ |
906 | }; |
907 | ||
c778df14 DS |
908 | static struct list_head *alloc_workspace(int type, unsigned int level) |
909 | { | |
910 | switch (type) { | |
911 | case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level); | |
912 | case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level); | |
913 | case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level); | |
914 | case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level); | |
915 | default: | |
916 | /* | |
917 | * This can't happen, the type is validated several times | |
918 | * before we get here. | |
919 | */ | |
920 | BUG(); | |
921 | } | |
922 | } | |
923 | ||
1e002351 DS |
924 | static void free_workspace(int type, struct list_head *ws) |
925 | { | |
926 | switch (type) { | |
927 | case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws); | |
928 | case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws); | |
929 | case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws); | |
930 | case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws); | |
931 | default: | |
932 | /* | |
933 | * This can't happen, the type is validated several times | |
934 | * before we get here. | |
935 | */ | |
936 | BUG(); | |
937 | } | |
938 | } | |
939 | ||
d5517033 | 940 | static void btrfs_init_workspace_manager(int type) |
261507a0 | 941 | { |
0cf25213 | 942 | struct workspace_manager *wsm; |
4e439a0b | 943 | struct list_head *workspace; |
261507a0 | 944 | |
0cf25213 | 945 | wsm = btrfs_compress_op[type]->workspace_manager; |
92ee5530 DZ |
946 | INIT_LIST_HEAD(&wsm->idle_ws); |
947 | spin_lock_init(&wsm->ws_lock); | |
948 | atomic_set(&wsm->total_ws, 0); | |
949 | init_waitqueue_head(&wsm->ws_wait); | |
f77dd0d6 | 950 | |
1666edab DZ |
951 | /* |
952 | * Preallocate one workspace for each compression type so we can | |
953 | * guarantee forward progress in the worst case | |
954 | */ | |
c778df14 | 955 | workspace = alloc_workspace(type, 0); |
1666edab DZ |
956 | if (IS_ERR(workspace)) { |
957 | pr_warn( | |
958 | "BTRFS: cannot preallocate compression workspace, will try later\n"); | |
959 | } else { | |
92ee5530 DZ |
960 | atomic_set(&wsm->total_ws, 1); |
961 | wsm->free_ws = 1; | |
962 | list_add(workspace, &wsm->idle_ws); | |
1666edab DZ |
963 | } |
964 | } | |
965 | ||
2510307e | 966 | static void btrfs_cleanup_workspace_manager(int type) |
1666edab | 967 | { |
2dba7143 | 968 | struct workspace_manager *wsman; |
1666edab DZ |
969 | struct list_head *ws; |
970 | ||
2dba7143 | 971 | wsman = btrfs_compress_op[type]->workspace_manager; |
1666edab DZ |
972 | while (!list_empty(&wsman->idle_ws)) { |
973 | ws = wsman->idle_ws.next; | |
974 | list_del(ws); | |
1e002351 | 975 | free_workspace(type, ws); |
1666edab | 976 | atomic_dec(&wsman->total_ws); |
261507a0 | 977 | } |
261507a0 LZ |
978 | } |
979 | ||
980 | /* | |
e721e49d DS |
981 | * This finds an available workspace or allocates a new one. |
982 | * If it's not possible to allocate a new one, waits until there's one. | |
983 | * Preallocation makes a forward progress guarantees and we do not return | |
984 | * errors. | |
261507a0 | 985 | */ |
5907a9bb | 986 | struct list_head *btrfs_get_workspace(int type, unsigned int level) |
261507a0 | 987 | { |
5907a9bb | 988 | struct workspace_manager *wsm; |
261507a0 LZ |
989 | struct list_head *workspace; |
990 | int cpus = num_online_cpus(); | |
fe308533 | 991 | unsigned nofs_flag; |
4e439a0b TT |
992 | struct list_head *idle_ws; |
993 | spinlock_t *ws_lock; | |
994 | atomic_t *total_ws; | |
995 | wait_queue_head_t *ws_wait; | |
996 | int *free_ws; | |
997 | ||
5907a9bb | 998 | wsm = btrfs_compress_op[type]->workspace_manager; |
92ee5530 DZ |
999 | idle_ws = &wsm->idle_ws; |
1000 | ws_lock = &wsm->ws_lock; | |
1001 | total_ws = &wsm->total_ws; | |
1002 | ws_wait = &wsm->ws_wait; | |
1003 | free_ws = &wsm->free_ws; | |
261507a0 | 1004 | |
261507a0 | 1005 | again: |
d9187649 BL |
1006 | spin_lock(ws_lock); |
1007 | if (!list_empty(idle_ws)) { | |
1008 | workspace = idle_ws->next; | |
261507a0 | 1009 | list_del(workspace); |
6ac10a6a | 1010 | (*free_ws)--; |
d9187649 | 1011 | spin_unlock(ws_lock); |
261507a0 LZ |
1012 | return workspace; |
1013 | ||
1014 | } | |
6ac10a6a | 1015 | if (atomic_read(total_ws) > cpus) { |
261507a0 LZ |
1016 | DEFINE_WAIT(wait); |
1017 | ||
d9187649 BL |
1018 | spin_unlock(ws_lock); |
1019 | prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); | |
6ac10a6a | 1020 | if (atomic_read(total_ws) > cpus && !*free_ws) |
261507a0 | 1021 | schedule(); |
d9187649 | 1022 | finish_wait(ws_wait, &wait); |
261507a0 LZ |
1023 | goto again; |
1024 | } | |
6ac10a6a | 1025 | atomic_inc(total_ws); |
d9187649 | 1026 | spin_unlock(ws_lock); |
261507a0 | 1027 | |
fe308533 DS |
1028 | /* |
1029 | * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have | |
1030 | * to turn it off here because we might get called from the restricted | |
1031 | * context of btrfs_compress_bio/btrfs_compress_pages | |
1032 | */ | |
1033 | nofs_flag = memalloc_nofs_save(); | |
c778df14 | 1034 | workspace = alloc_workspace(type, level); |
fe308533 DS |
1035 | memalloc_nofs_restore(nofs_flag); |
1036 | ||
261507a0 | 1037 | if (IS_ERR(workspace)) { |
6ac10a6a | 1038 | atomic_dec(total_ws); |
d9187649 | 1039 | wake_up(ws_wait); |
e721e49d DS |
1040 | |
1041 | /* | |
1042 | * Do not return the error but go back to waiting. There's a | |
1043 | * workspace preallocated for each type and the compression | |
1044 | * time is bounded so we get to a workspace eventually. This | |
1045 | * makes our caller's life easier. | |
52356716 DS |
1046 | * |
1047 | * To prevent silent and low-probability deadlocks (when the | |
1048 | * initial preallocation fails), check if there are any | |
1049 | * workspaces at all. | |
e721e49d | 1050 | */ |
52356716 DS |
1051 | if (atomic_read(total_ws) == 0) { |
1052 | static DEFINE_RATELIMIT_STATE(_rs, | |
1053 | /* once per minute */ 60 * HZ, | |
1054 | /* no burst */ 1); | |
1055 | ||
1056 | if (__ratelimit(&_rs)) { | |
ab8d0fc4 | 1057 | pr_warn("BTRFS: no compression workspaces, low memory, retrying\n"); |
52356716 DS |
1058 | } |
1059 | } | |
e721e49d | 1060 | goto again; |
261507a0 LZ |
1061 | } |
1062 | return workspace; | |
1063 | } | |
1064 | ||
7bf49943 | 1065 | static struct list_head *get_workspace(int type, int level) |
929f4baf | 1066 | { |
6a0d1272 | 1067 | switch (type) { |
5907a9bb | 1068 | case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level); |
6a0d1272 | 1069 | case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level); |
5907a9bb | 1070 | case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level); |
6a0d1272 DS |
1071 | case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level); |
1072 | default: | |
1073 | /* | |
1074 | * This can't happen, the type is validated several times | |
1075 | * before we get here. | |
1076 | */ | |
1077 | BUG(); | |
1078 | } | |
929f4baf DZ |
1079 | } |
1080 | ||
261507a0 LZ |
1081 | /* |
1082 | * put a workspace struct back on the list or free it if we have enough | |
1083 | * idle ones sitting around | |
1084 | */ | |
a3bbd2a9 | 1085 | void btrfs_put_workspace(int type, struct list_head *ws) |
261507a0 | 1086 | { |
a3bbd2a9 | 1087 | struct workspace_manager *wsm; |
4e439a0b TT |
1088 | struct list_head *idle_ws; |
1089 | spinlock_t *ws_lock; | |
1090 | atomic_t *total_ws; | |
1091 | wait_queue_head_t *ws_wait; | |
1092 | int *free_ws; | |
1093 | ||
a3bbd2a9 | 1094 | wsm = btrfs_compress_op[type]->workspace_manager; |
92ee5530 DZ |
1095 | idle_ws = &wsm->idle_ws; |
1096 | ws_lock = &wsm->ws_lock; | |
1097 | total_ws = &wsm->total_ws; | |
1098 | ws_wait = &wsm->ws_wait; | |
1099 | free_ws = &wsm->free_ws; | |
d9187649 BL |
1100 | |
1101 | spin_lock(ws_lock); | |
26b28dce | 1102 | if (*free_ws <= num_online_cpus()) { |
929f4baf | 1103 | list_add(ws, idle_ws); |
6ac10a6a | 1104 | (*free_ws)++; |
d9187649 | 1105 | spin_unlock(ws_lock); |
261507a0 LZ |
1106 | goto wake; |
1107 | } | |
d9187649 | 1108 | spin_unlock(ws_lock); |
261507a0 | 1109 | |
1e002351 | 1110 | free_workspace(type, ws); |
6ac10a6a | 1111 | atomic_dec(total_ws); |
261507a0 | 1112 | wake: |
093258e6 | 1113 | cond_wake_up(ws_wait); |
261507a0 LZ |
1114 | } |
1115 | ||
929f4baf DZ |
1116 | static void put_workspace(int type, struct list_head *ws) |
1117 | { | |
bd3a5287 | 1118 | switch (type) { |
a3bbd2a9 DS |
1119 | case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws); |
1120 | case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws); | |
1121 | case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws); | |
bd3a5287 DS |
1122 | case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws); |
1123 | default: | |
1124 | /* | |
1125 | * This can't happen, the type is validated several times | |
1126 | * before we get here. | |
1127 | */ | |
1128 | BUG(); | |
1129 | } | |
929f4baf DZ |
1130 | } |
1131 | ||
adbab642 AJ |
1132 | /* |
1133 | * Adjust @level according to the limits of the compression algorithm or | |
1134 | * fallback to default | |
1135 | */ | |
1136 | static unsigned int btrfs_compress_set_level(int type, unsigned level) | |
1137 | { | |
1138 | const struct btrfs_compress_op *ops = btrfs_compress_op[type]; | |
1139 | ||
1140 | if (level == 0) | |
1141 | level = ops->default_level; | |
1142 | else | |
1143 | level = min(level, ops->max_level); | |
1144 | ||
1145 | return level; | |
1146 | } | |
1147 | ||
261507a0 | 1148 | /* |
38c31464 DS |
1149 | * Given an address space and start and length, compress the bytes into @pages |
1150 | * that are allocated on demand. | |
261507a0 | 1151 | * |
f51d2b59 DS |
1152 | * @type_level is encoded algorithm and level, where level 0 means whatever |
1153 | * default the algorithm chooses and is opaque here; | |
1154 | * - compression algo are 0-3 | |
1155 | * - the level are bits 4-7 | |
1156 | * | |
4d3a800e DS |
1157 | * @out_pages is an in/out parameter, holds maximum number of pages to allocate |
1158 | * and returns number of actually allocated pages | |
261507a0 | 1159 | * |
38c31464 DS |
1160 | * @total_in is used to return the number of bytes actually read. It |
1161 | * may be smaller than the input length if we had to exit early because we | |
261507a0 LZ |
1162 | * ran out of room in the pages array or because we cross the |
1163 | * max_out threshold. | |
1164 | * | |
38c31464 DS |
1165 | * @total_out is an in/out parameter, must be set to the input length and will |
1166 | * be also used to return the total number of compressed bytes | |
261507a0 | 1167 | * |
38c31464 | 1168 | * @max_out tells us the max number of bytes that we're allowed to |
261507a0 LZ |
1169 | * stuff into pages |
1170 | */ | |
f51d2b59 | 1171 | int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, |
38c31464 | 1172 | u64 start, struct page **pages, |
261507a0 LZ |
1173 | unsigned long *out_pages, |
1174 | unsigned long *total_in, | |
e5d74902 | 1175 | unsigned long *total_out) |
261507a0 | 1176 | { |
1972708a | 1177 | int type = btrfs_compress_type(type_level); |
7bf49943 | 1178 | int level = btrfs_compress_level(type_level); |
261507a0 LZ |
1179 | struct list_head *workspace; |
1180 | int ret; | |
1181 | ||
b0c1fe1e | 1182 | level = btrfs_compress_set_level(type, level); |
7bf49943 | 1183 | workspace = get_workspace(type, level); |
1e4eb746 DS |
1184 | ret = compression_compress_pages(type, workspace, mapping, start, pages, |
1185 | out_pages, total_in, total_out); | |
929f4baf | 1186 | put_workspace(type, workspace); |
261507a0 LZ |
1187 | return ret; |
1188 | } | |
1189 | ||
1190 | /* | |
1191 | * pages_in is an array of pages with compressed data. | |
1192 | * | |
1193 | * disk_start is the starting logical offset of this array in the file | |
1194 | * | |
974b1adc | 1195 | * orig_bio contains the pages from the file that we want to decompress into |
261507a0 LZ |
1196 | * |
1197 | * srclen is the number of bytes in pages_in | |
1198 | * | |
1199 | * The basic idea is that we have a bio that was created by readpages. | |
1200 | * The pages in the bio are for the uncompressed data, and they may not | |
1201 | * be contiguous. They all correspond to the range of bytes covered by | |
1202 | * the compressed extent. | |
1203 | */ | |
8140dc30 | 1204 | static int btrfs_decompress_bio(struct compressed_bio *cb) |
261507a0 LZ |
1205 | { |
1206 | struct list_head *workspace; | |
1207 | int ret; | |
8140dc30 | 1208 | int type = cb->compress_type; |
261507a0 | 1209 | |
7bf49943 | 1210 | workspace = get_workspace(type, 0); |
1e4eb746 | 1211 | ret = compression_decompress_bio(type, workspace, cb); |
929f4baf | 1212 | put_workspace(type, workspace); |
e1ddce71 | 1213 | |
261507a0 LZ |
1214 | return ret; |
1215 | } | |
1216 | ||
1217 | /* | |
1218 | * a less complex decompression routine. Our compressed data fits in a | |
1219 | * single page, and we want to read a single page out of it. | |
1220 | * start_byte tells us the offset into the compressed data we're interested in | |
1221 | */ | |
1222 | int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, | |
1223 | unsigned long start_byte, size_t srclen, size_t destlen) | |
1224 | { | |
1225 | struct list_head *workspace; | |
1226 | int ret; | |
1227 | ||
7bf49943 | 1228 | workspace = get_workspace(type, 0); |
1e4eb746 DS |
1229 | ret = compression_decompress(type, workspace, data_in, dest_page, |
1230 | start_byte, srclen, destlen); | |
929f4baf | 1231 | put_workspace(type, workspace); |
7bf49943 | 1232 | |
261507a0 LZ |
1233 | return ret; |
1234 | } | |
1235 | ||
1666edab DZ |
1236 | void __init btrfs_init_compress(void) |
1237 | { | |
d5517033 DS |
1238 | btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE); |
1239 | btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB); | |
1240 | btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO); | |
1241 | zstd_init_workspace_manager(); | |
1666edab DZ |
1242 | } |
1243 | ||
e67c718b | 1244 | void __cold btrfs_exit_compress(void) |
261507a0 | 1245 | { |
2510307e DS |
1246 | btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE); |
1247 | btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB); | |
1248 | btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO); | |
1249 | zstd_cleanup_workspace_manager(); | |
261507a0 | 1250 | } |
3a39c18d LZ |
1251 | |
1252 | /* | |
1253 | * Copy uncompressed data from working buffer to pages. | |
1254 | * | |
1255 | * buf_start is the byte offset we're of the start of our workspace buffer. | |
1256 | * | |
1257 | * total_out is the last byte of the buffer | |
1258 | */ | |
14a3357b | 1259 | int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, |
3a39c18d | 1260 | unsigned long total_out, u64 disk_start, |
974b1adc | 1261 | struct bio *bio) |
3a39c18d LZ |
1262 | { |
1263 | unsigned long buf_offset; | |
1264 | unsigned long current_buf_start; | |
1265 | unsigned long start_byte; | |
6e78b3f7 | 1266 | unsigned long prev_start_byte; |
3a39c18d LZ |
1267 | unsigned long working_bytes = total_out - buf_start; |
1268 | unsigned long bytes; | |
974b1adc | 1269 | struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter); |
3a39c18d LZ |
1270 | |
1271 | /* | |
1272 | * start byte is the first byte of the page we're currently | |
1273 | * copying into relative to the start of the compressed data. | |
1274 | */ | |
974b1adc | 1275 | start_byte = page_offset(bvec.bv_page) - disk_start; |
3a39c18d LZ |
1276 | |
1277 | /* we haven't yet hit data corresponding to this page */ | |
1278 | if (total_out <= start_byte) | |
1279 | return 1; | |
1280 | ||
1281 | /* | |
1282 | * the start of the data we care about is offset into | |
1283 | * the middle of our working buffer | |
1284 | */ | |
1285 | if (total_out > start_byte && buf_start < start_byte) { | |
1286 | buf_offset = start_byte - buf_start; | |
1287 | working_bytes -= buf_offset; | |
1288 | } else { | |
1289 | buf_offset = 0; | |
1290 | } | |
1291 | current_buf_start = buf_start; | |
1292 | ||
1293 | /* copy bytes from the working buffer into the pages */ | |
1294 | while (working_bytes > 0) { | |
974b1adc | 1295 | bytes = min_t(unsigned long, bvec.bv_len, |
3fd396af | 1296 | PAGE_SIZE - (buf_offset % PAGE_SIZE)); |
3a39c18d | 1297 | bytes = min(bytes, working_bytes); |
974b1adc | 1298 | |
3590ec58 IW |
1299 | memcpy_to_page(bvec.bv_page, bvec.bv_offset, buf + buf_offset, |
1300 | bytes); | |
974b1adc | 1301 | flush_dcache_page(bvec.bv_page); |
3a39c18d | 1302 | |
3a39c18d LZ |
1303 | buf_offset += bytes; |
1304 | working_bytes -= bytes; | |
1305 | current_buf_start += bytes; | |
1306 | ||
1307 | /* check if we need to pick another page */ | |
974b1adc CH |
1308 | bio_advance(bio, bytes); |
1309 | if (!bio->bi_iter.bi_size) | |
1310 | return 0; | |
1311 | bvec = bio_iter_iovec(bio, bio->bi_iter); | |
6e78b3f7 | 1312 | prev_start_byte = start_byte; |
974b1adc | 1313 | start_byte = page_offset(bvec.bv_page) - disk_start; |
3a39c18d | 1314 | |
974b1adc | 1315 | /* |
6e78b3f7 OS |
1316 | * We need to make sure we're only adjusting |
1317 | * our offset into compression working buffer when | |
1318 | * we're switching pages. Otherwise we can incorrectly | |
1319 | * keep copying when we were actually done. | |
974b1adc | 1320 | */ |
6e78b3f7 OS |
1321 | if (start_byte != prev_start_byte) { |
1322 | /* | |
1323 | * make sure our new page is covered by this | |
1324 | * working buffer | |
1325 | */ | |
1326 | if (total_out <= start_byte) | |
1327 | return 1; | |
3a39c18d | 1328 | |
6e78b3f7 OS |
1329 | /* |
1330 | * the next page in the biovec might not be adjacent | |
1331 | * to the last page, but it might still be found | |
1332 | * inside this working buffer. bump our offset pointer | |
1333 | */ | |
1334 | if (total_out > start_byte && | |
1335 | current_buf_start < start_byte) { | |
1336 | buf_offset = start_byte - buf_start; | |
1337 | working_bytes = total_out - start_byte; | |
1338 | current_buf_start = buf_start + buf_offset; | |
1339 | } | |
3a39c18d LZ |
1340 | } |
1341 | } | |
1342 | ||
1343 | return 1; | |
1344 | } | |
c2fcdcdf | 1345 | |
19562430 TT |
1346 | /* |
1347 | * Shannon Entropy calculation | |
1348 | * | |
52042d8e | 1349 | * Pure byte distribution analysis fails to determine compressibility of data. |
19562430 TT |
1350 | * Try calculating entropy to estimate the average minimum number of bits |
1351 | * needed to encode the sampled data. | |
1352 | * | |
1353 | * For convenience, return the percentage of needed bits, instead of amount of | |
1354 | * bits directly. | |
1355 | * | |
1356 | * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy | |
1357 | * and can be compressible with high probability | |
1358 | * | |
1359 | * @ENTROPY_LVL_HIGH - data are not compressible with high probability | |
1360 | * | |
1361 | * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate. | |
1362 | */ | |
1363 | #define ENTROPY_LVL_ACEPTABLE (65) | |
1364 | #define ENTROPY_LVL_HIGH (80) | |
1365 | ||
1366 | /* | |
1367 | * For increasead precision in shannon_entropy calculation, | |
1368 | * let's do pow(n, M) to save more digits after comma: | |
1369 | * | |
1370 | * - maximum int bit length is 64 | |
1371 | * - ilog2(MAX_SAMPLE_SIZE) -> 13 | |
1372 | * - 13 * 4 = 52 < 64 -> M = 4 | |
1373 | * | |
1374 | * So use pow(n, 4). | |
1375 | */ | |
1376 | static inline u32 ilog2_w(u64 n) | |
1377 | { | |
1378 | return ilog2(n * n * n * n); | |
1379 | } | |
1380 | ||
1381 | static u32 shannon_entropy(struct heuristic_ws *ws) | |
1382 | { | |
1383 | const u32 entropy_max = 8 * ilog2_w(2); | |
1384 | u32 entropy_sum = 0; | |
1385 | u32 p, p_base, sz_base; | |
1386 | u32 i; | |
1387 | ||
1388 | sz_base = ilog2_w(ws->sample_size); | |
1389 | for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) { | |
1390 | p = ws->bucket[i].count; | |
1391 | p_base = ilog2_w(p); | |
1392 | entropy_sum += p * (sz_base - p_base); | |
1393 | } | |
1394 | ||
1395 | entropy_sum /= ws->sample_size; | |
1396 | return entropy_sum * 100 / entropy_max; | |
1397 | } | |
1398 | ||
440c840c TT |
1399 | #define RADIX_BASE 4U |
1400 | #define COUNTERS_SIZE (1U << RADIX_BASE) | |
1401 | ||
1402 | static u8 get4bits(u64 num, int shift) { | |
1403 | u8 low4bits; | |
1404 | ||
1405 | num >>= shift; | |
1406 | /* Reverse order */ | |
1407 | low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE); | |
1408 | return low4bits; | |
1409 | } | |
1410 | ||
440c840c TT |
1411 | /* |
1412 | * Use 4 bits as radix base | |
52042d8e | 1413 | * Use 16 u32 counters for calculating new position in buf array |
440c840c TT |
1414 | * |
1415 | * @array - array that will be sorted | |
1416 | * @array_buf - buffer array to store sorting results | |
1417 | * must be equal in size to @array | |
1418 | * @num - array size | |
440c840c | 1419 | */ |
23ae8c63 | 1420 | static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf, |
36243c91 | 1421 | int num) |
858177d3 | 1422 | { |
440c840c TT |
1423 | u64 max_num; |
1424 | u64 buf_num; | |
1425 | u32 counters[COUNTERS_SIZE]; | |
1426 | u32 new_addr; | |
1427 | u32 addr; | |
1428 | int bitlen; | |
1429 | int shift; | |
1430 | int i; | |
858177d3 | 1431 | |
440c840c TT |
1432 | /* |
1433 | * Try avoid useless loop iterations for small numbers stored in big | |
1434 | * counters. Example: 48 33 4 ... in 64bit array | |
1435 | */ | |
23ae8c63 | 1436 | max_num = array[0].count; |
440c840c | 1437 | for (i = 1; i < num; i++) { |
23ae8c63 | 1438 | buf_num = array[i].count; |
440c840c TT |
1439 | if (buf_num > max_num) |
1440 | max_num = buf_num; | |
1441 | } | |
1442 | ||
1443 | buf_num = ilog2(max_num); | |
1444 | bitlen = ALIGN(buf_num, RADIX_BASE * 2); | |
1445 | ||
1446 | shift = 0; | |
1447 | while (shift < bitlen) { | |
1448 | memset(counters, 0, sizeof(counters)); | |
1449 | ||
1450 | for (i = 0; i < num; i++) { | |
23ae8c63 | 1451 | buf_num = array[i].count; |
440c840c TT |
1452 | addr = get4bits(buf_num, shift); |
1453 | counters[addr]++; | |
1454 | } | |
1455 | ||
1456 | for (i = 1; i < COUNTERS_SIZE; i++) | |
1457 | counters[i] += counters[i - 1]; | |
1458 | ||
1459 | for (i = num - 1; i >= 0; i--) { | |
23ae8c63 | 1460 | buf_num = array[i].count; |
440c840c TT |
1461 | addr = get4bits(buf_num, shift); |
1462 | counters[addr]--; | |
1463 | new_addr = counters[addr]; | |
7add17be | 1464 | array_buf[new_addr] = array[i]; |
440c840c TT |
1465 | } |
1466 | ||
1467 | shift += RADIX_BASE; | |
1468 | ||
1469 | /* | |
1470 | * Normal radix expects to move data from a temporary array, to | |
1471 | * the main one. But that requires some CPU time. Avoid that | |
1472 | * by doing another sort iteration to original array instead of | |
1473 | * memcpy() | |
1474 | */ | |
1475 | memset(counters, 0, sizeof(counters)); | |
1476 | ||
1477 | for (i = 0; i < num; i ++) { | |
23ae8c63 | 1478 | buf_num = array_buf[i].count; |
440c840c TT |
1479 | addr = get4bits(buf_num, shift); |
1480 | counters[addr]++; | |
1481 | } | |
1482 | ||
1483 | for (i = 1; i < COUNTERS_SIZE; i++) | |
1484 | counters[i] += counters[i - 1]; | |
1485 | ||
1486 | for (i = num - 1; i >= 0; i--) { | |
23ae8c63 | 1487 | buf_num = array_buf[i].count; |
440c840c TT |
1488 | addr = get4bits(buf_num, shift); |
1489 | counters[addr]--; | |
1490 | new_addr = counters[addr]; | |
7add17be | 1491 | array[new_addr] = array_buf[i]; |
440c840c TT |
1492 | } |
1493 | ||
1494 | shift += RADIX_BASE; | |
1495 | } | |
858177d3 TT |
1496 | } |
1497 | ||
1498 | /* | |
1499 | * Size of the core byte set - how many bytes cover 90% of the sample | |
1500 | * | |
1501 | * There are several types of structured binary data that use nearly all byte | |
1502 | * values. The distribution can be uniform and counts in all buckets will be | |
1503 | * nearly the same (eg. encrypted data). Unlikely to be compressible. | |
1504 | * | |
1505 | * Other possibility is normal (Gaussian) distribution, where the data could | |
1506 | * be potentially compressible, but we have to take a few more steps to decide | |
1507 | * how much. | |
1508 | * | |
1509 | * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently, | |
1510 | * compression algo can easy fix that | |
1511 | * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high | |
1512 | * probability is not compressible | |
1513 | */ | |
1514 | #define BYTE_CORE_SET_LOW (64) | |
1515 | #define BYTE_CORE_SET_HIGH (200) | |
1516 | ||
1517 | static int byte_core_set_size(struct heuristic_ws *ws) | |
1518 | { | |
1519 | u32 i; | |
1520 | u32 coreset_sum = 0; | |
1521 | const u32 core_set_threshold = ws->sample_size * 90 / 100; | |
1522 | struct bucket_item *bucket = ws->bucket; | |
1523 | ||
1524 | /* Sort in reverse order */ | |
36243c91 | 1525 | radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE); |
858177d3 TT |
1526 | |
1527 | for (i = 0; i < BYTE_CORE_SET_LOW; i++) | |
1528 | coreset_sum += bucket[i].count; | |
1529 | ||
1530 | if (coreset_sum > core_set_threshold) | |
1531 | return i; | |
1532 | ||
1533 | for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) { | |
1534 | coreset_sum += bucket[i].count; | |
1535 | if (coreset_sum > core_set_threshold) | |
1536 | break; | |
1537 | } | |
1538 | ||
1539 | return i; | |
1540 | } | |
1541 | ||
a288e92c TT |
1542 | /* |
1543 | * Count byte values in buckets. | |
1544 | * This heuristic can detect textual data (configs, xml, json, html, etc). | |
1545 | * Because in most text-like data byte set is restricted to limited number of | |
1546 | * possible characters, and that restriction in most cases makes data easy to | |
1547 | * compress. | |
1548 | * | |
1549 | * @BYTE_SET_THRESHOLD - consider all data within this byte set size: | |
1550 | * less - compressible | |
1551 | * more - need additional analysis | |
1552 | */ | |
1553 | #define BYTE_SET_THRESHOLD (64) | |
1554 | ||
1555 | static u32 byte_set_size(const struct heuristic_ws *ws) | |
1556 | { | |
1557 | u32 i; | |
1558 | u32 byte_set_size = 0; | |
1559 | ||
1560 | for (i = 0; i < BYTE_SET_THRESHOLD; i++) { | |
1561 | if (ws->bucket[i].count > 0) | |
1562 | byte_set_size++; | |
1563 | } | |
1564 | ||
1565 | /* | |
1566 | * Continue collecting count of byte values in buckets. If the byte | |
1567 | * set size is bigger then the threshold, it's pointless to continue, | |
1568 | * the detection technique would fail for this type of data. | |
1569 | */ | |
1570 | for (; i < BUCKET_SIZE; i++) { | |
1571 | if (ws->bucket[i].count > 0) { | |
1572 | byte_set_size++; | |
1573 | if (byte_set_size > BYTE_SET_THRESHOLD) | |
1574 | return byte_set_size; | |
1575 | } | |
1576 | } | |
1577 | ||
1578 | return byte_set_size; | |
1579 | } | |
1580 | ||
1fe4f6fa TT |
1581 | static bool sample_repeated_patterns(struct heuristic_ws *ws) |
1582 | { | |
1583 | const u32 half_of_sample = ws->sample_size / 2; | |
1584 | const u8 *data = ws->sample; | |
1585 | ||
1586 | return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0; | |
1587 | } | |
1588 | ||
a440d48c TT |
1589 | static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, |
1590 | struct heuristic_ws *ws) | |
1591 | { | |
1592 | struct page *page; | |
1593 | u64 index, index_end; | |
1594 | u32 i, curr_sample_pos; | |
1595 | u8 *in_data; | |
1596 | ||
1597 | /* | |
1598 | * Compression handles the input data by chunks of 128KiB | |
1599 | * (defined by BTRFS_MAX_UNCOMPRESSED) | |
1600 | * | |
1601 | * We do the same for the heuristic and loop over the whole range. | |
1602 | * | |
1603 | * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will | |
1604 | * process no more than BTRFS_MAX_UNCOMPRESSED at a time. | |
1605 | */ | |
1606 | if (end - start > BTRFS_MAX_UNCOMPRESSED) | |
1607 | end = start + BTRFS_MAX_UNCOMPRESSED; | |
1608 | ||
1609 | index = start >> PAGE_SHIFT; | |
1610 | index_end = end >> PAGE_SHIFT; | |
1611 | ||
1612 | /* Don't miss unaligned end */ | |
1613 | if (!IS_ALIGNED(end, PAGE_SIZE)) | |
1614 | index_end++; | |
1615 | ||
1616 | curr_sample_pos = 0; | |
1617 | while (index < index_end) { | |
1618 | page = find_get_page(inode->i_mapping, index); | |
58c1a35c | 1619 | in_data = kmap_local_page(page); |
a440d48c TT |
1620 | /* Handle case where the start is not aligned to PAGE_SIZE */ |
1621 | i = start % PAGE_SIZE; | |
1622 | while (i < PAGE_SIZE - SAMPLING_READ_SIZE) { | |
1623 | /* Don't sample any garbage from the last page */ | |
1624 | if (start > end - SAMPLING_READ_SIZE) | |
1625 | break; | |
1626 | memcpy(&ws->sample[curr_sample_pos], &in_data[i], | |
1627 | SAMPLING_READ_SIZE); | |
1628 | i += SAMPLING_INTERVAL; | |
1629 | start += SAMPLING_INTERVAL; | |
1630 | curr_sample_pos += SAMPLING_READ_SIZE; | |
1631 | } | |
58c1a35c | 1632 | kunmap_local(in_data); |
a440d48c TT |
1633 | put_page(page); |
1634 | ||
1635 | index++; | |
1636 | } | |
1637 | ||
1638 | ws->sample_size = curr_sample_pos; | |
1639 | } | |
1640 | ||
c2fcdcdf TT |
1641 | /* |
1642 | * Compression heuristic. | |
1643 | * | |
1644 | * For now is's a naive and optimistic 'return true', we'll extend the logic to | |
1645 | * quickly (compared to direct compression) detect data characteristics | |
1646 | * (compressible/uncompressible) to avoid wasting CPU time on uncompressible | |
1647 | * data. | |
1648 | * | |
1649 | * The following types of analysis can be performed: | |
1650 | * - detect mostly zero data | |
1651 | * - detect data with low "byte set" size (text, etc) | |
1652 | * - detect data with low/high "core byte" set | |
1653 | * | |
1654 | * Return non-zero if the compression should be done, 0 otherwise. | |
1655 | */ | |
1656 | int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) | |
1657 | { | |
7bf49943 | 1658 | struct list_head *ws_list = get_workspace(0, 0); |
4e439a0b | 1659 | struct heuristic_ws *ws; |
a440d48c TT |
1660 | u32 i; |
1661 | u8 byte; | |
19562430 | 1662 | int ret = 0; |
c2fcdcdf | 1663 | |
4e439a0b TT |
1664 | ws = list_entry(ws_list, struct heuristic_ws, list); |
1665 | ||
a440d48c TT |
1666 | heuristic_collect_sample(inode, start, end, ws); |
1667 | ||
1fe4f6fa TT |
1668 | if (sample_repeated_patterns(ws)) { |
1669 | ret = 1; | |
1670 | goto out; | |
1671 | } | |
1672 | ||
a440d48c TT |
1673 | memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE); |
1674 | ||
1675 | for (i = 0; i < ws->sample_size; i++) { | |
1676 | byte = ws->sample[i]; | |
1677 | ws->bucket[byte].count++; | |
c2fcdcdf TT |
1678 | } |
1679 | ||
a288e92c TT |
1680 | i = byte_set_size(ws); |
1681 | if (i < BYTE_SET_THRESHOLD) { | |
1682 | ret = 2; | |
1683 | goto out; | |
1684 | } | |
1685 | ||
858177d3 TT |
1686 | i = byte_core_set_size(ws); |
1687 | if (i <= BYTE_CORE_SET_LOW) { | |
1688 | ret = 3; | |
1689 | goto out; | |
1690 | } | |
1691 | ||
1692 | if (i >= BYTE_CORE_SET_HIGH) { | |
1693 | ret = 0; | |
1694 | goto out; | |
1695 | } | |
1696 | ||
19562430 TT |
1697 | i = shannon_entropy(ws); |
1698 | if (i <= ENTROPY_LVL_ACEPTABLE) { | |
1699 | ret = 4; | |
1700 | goto out; | |
1701 | } | |
1702 | ||
1703 | /* | |
1704 | * For the levels below ENTROPY_LVL_HIGH, additional analysis would be | |
1705 | * needed to give green light to compression. | |
1706 | * | |
1707 | * For now just assume that compression at that level is not worth the | |
1708 | * resources because: | |
1709 | * | |
1710 | * 1. it is possible to defrag the data later | |
1711 | * | |
1712 | * 2. the data would turn out to be hardly compressible, eg. 150 byte | |
1713 | * values, every bucket has counter at level ~54. The heuristic would | |
1714 | * be confused. This can happen when data have some internal repeated | |
1715 | * patterns like "abbacbbc...". This can be detected by analyzing | |
1716 | * pairs of bytes, which is too costly. | |
1717 | */ | |
1718 | if (i < ENTROPY_LVL_HIGH) { | |
1719 | ret = 5; | |
1720 | goto out; | |
1721 | } else { | |
1722 | ret = 0; | |
1723 | goto out; | |
1724 | } | |
1725 | ||
1fe4f6fa | 1726 | out: |
929f4baf | 1727 | put_workspace(0, ws_list); |
c2fcdcdf TT |
1728 | return ret; |
1729 | } | |
f51d2b59 | 1730 | |
d0ab62ce DZ |
1731 | /* |
1732 | * Convert the compression suffix (eg. after "zlib" starting with ":") to | |
1733 | * level, unrecognized string will set the default level | |
1734 | */ | |
1735 | unsigned int btrfs_compress_str2level(unsigned int type, const char *str) | |
f51d2b59 | 1736 | { |
d0ab62ce DZ |
1737 | unsigned int level = 0; |
1738 | int ret; | |
1739 | ||
1740 | if (!type) | |
f51d2b59 DS |
1741 | return 0; |
1742 | ||
d0ab62ce DZ |
1743 | if (str[0] == ':') { |
1744 | ret = kstrtouint(str + 1, 10, &level); | |
1745 | if (ret) | |
1746 | level = 0; | |
1747 | } | |
1748 | ||
b0c1fe1e DS |
1749 | level = btrfs_compress_set_level(type, level); |
1750 | ||
1751 | return level; | |
1752 | } |