Commit | Line | Data |
---|---|---|
c1d7c514 | 1 | // SPDX-License-Identifier: GPL-2.0 |
c8b97818 CM |
2 | /* |
3 | * Copyright (C) 2008 Oracle. All rights reserved. | |
c8b97818 CM |
4 | */ |
5 | ||
6 | #include <linux/kernel.h> | |
7 | #include <linux/bio.h> | |
c8b97818 CM |
8 | #include <linux/file.h> |
9 | #include <linux/fs.h> | |
10 | #include <linux/pagemap.h> | |
11 | #include <linux/highmem.h> | |
e41d12f5 | 12 | #include <linux/kthread.h> |
c8b97818 CM |
13 | #include <linux/time.h> |
14 | #include <linux/init.h> | |
15 | #include <linux/string.h> | |
c8b97818 | 16 | #include <linux/backing-dev.h> |
c8b97818 | 17 | #include <linux/writeback.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
fe308533 | 19 | #include <linux/sched/mm.h> |
19562430 | 20 | #include <linux/log2.h> |
d5178578 | 21 | #include <crypto/hash.h> |
602cbe91 | 22 | #include "misc.h" |
c8b97818 CM |
23 | #include "ctree.h" |
24 | #include "disk-io.h" | |
25 | #include "transaction.h" | |
26 | #include "btrfs_inode.h" | |
27 | #include "volumes.h" | |
28 | #include "ordered-data.h" | |
c8b97818 CM |
29 | #include "compression.h" |
30 | #include "extent_io.h" | |
31 | #include "extent_map.h" | |
6a404910 | 32 | #include "subpage.h" |
764c7c9a | 33 | #include "zoned.h" |
c8b97818 | 34 | |
e128f9c3 DS |
35 | static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; |
36 | ||
37 | const char* btrfs_compress_type2str(enum btrfs_compression_type type) | |
38 | { | |
39 | switch (type) { | |
40 | case BTRFS_COMPRESS_ZLIB: | |
41 | case BTRFS_COMPRESS_LZO: | |
42 | case BTRFS_COMPRESS_ZSTD: | |
43 | case BTRFS_COMPRESS_NONE: | |
44 | return btrfs_compress_types[type]; | |
ce96b7ff CX |
45 | default: |
46 | break; | |
e128f9c3 DS |
47 | } |
48 | ||
49 | return NULL; | |
50 | } | |
51 | ||
aa53e3bf JT |
52 | bool btrfs_compress_is_valid_type(const char *str, size_t len) |
53 | { | |
54 | int i; | |
55 | ||
56 | for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) { | |
57 | size_t comp_len = strlen(btrfs_compress_types[i]); | |
58 | ||
59 | if (len < comp_len) | |
60 | continue; | |
61 | ||
62 | if (!strncmp(btrfs_compress_types[i], str, comp_len)) | |
63 | return true; | |
64 | } | |
65 | return false; | |
66 | } | |
67 | ||
1e4eb746 DS |
68 | static int compression_compress_pages(int type, struct list_head *ws, |
69 | struct address_space *mapping, u64 start, struct page **pages, | |
70 | unsigned long *out_pages, unsigned long *total_in, | |
71 | unsigned long *total_out) | |
72 | { | |
73 | switch (type) { | |
74 | case BTRFS_COMPRESS_ZLIB: | |
75 | return zlib_compress_pages(ws, mapping, start, pages, | |
76 | out_pages, total_in, total_out); | |
77 | case BTRFS_COMPRESS_LZO: | |
78 | return lzo_compress_pages(ws, mapping, start, pages, | |
79 | out_pages, total_in, total_out); | |
80 | case BTRFS_COMPRESS_ZSTD: | |
81 | return zstd_compress_pages(ws, mapping, start, pages, | |
82 | out_pages, total_in, total_out); | |
83 | case BTRFS_COMPRESS_NONE: | |
84 | default: | |
85 | /* | |
1d8ba9e7 QW |
86 | * This can happen when compression races with remount setting |
87 | * it to 'no compress', while caller doesn't call | |
88 | * inode_need_compress() to check if we really need to | |
89 | * compress. | |
90 | * | |
91 | * Not a big deal, just need to inform caller that we | |
92 | * haven't allocated any pages yet. | |
1e4eb746 | 93 | */ |
1d8ba9e7 | 94 | *out_pages = 0; |
1e4eb746 DS |
95 | return -E2BIG; |
96 | } | |
97 | } | |
98 | ||
4a9e803e SY |
99 | static int compression_decompress_bio(struct list_head *ws, |
100 | struct compressed_bio *cb) | |
1e4eb746 | 101 | { |
4a9e803e | 102 | switch (cb->compress_type) { |
1e4eb746 DS |
103 | case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb); |
104 | case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb); | |
105 | case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb); | |
106 | case BTRFS_COMPRESS_NONE: | |
107 | default: | |
108 | /* | |
109 | * This can't happen, the type is validated several times | |
110 | * before we get here. | |
111 | */ | |
112 | BUG(); | |
113 | } | |
114 | } | |
115 | ||
116 | static int compression_decompress(int type, struct list_head *ws, | |
117 | unsigned char *data_in, struct page *dest_page, | |
118 | unsigned long start_byte, size_t srclen, size_t destlen) | |
119 | { | |
120 | switch (type) { | |
121 | case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page, | |
122 | start_byte, srclen, destlen); | |
123 | case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page, | |
124 | start_byte, srclen, destlen); | |
125 | case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page, | |
126 | start_byte, srclen, destlen); | |
127 | case BTRFS_COMPRESS_NONE: | |
128 | default: | |
129 | /* | |
130 | * This can't happen, the type is validated several times | |
131 | * before we get here. | |
132 | */ | |
133 | BUG(); | |
134 | } | |
135 | } | |
136 | ||
8140dc30 | 137 | static int btrfs_decompress_bio(struct compressed_bio *cb); |
48a3b636 | 138 | |
2ff7e61e | 139 | static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, |
d20f7043 CM |
140 | unsigned long disk_size) |
141 | { | |
d20f7043 | 142 | return sizeof(struct compressed_bio) + |
713cebfb | 143 | (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size; |
d20f7043 CM |
144 | } |
145 | ||
5a9472fe | 146 | static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio, |
d20f7043 CM |
147 | u64 disk_start) |
148 | { | |
10fe6ca8 | 149 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
d5178578 | 150 | SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); |
223486c2 | 151 | const u32 csum_size = fs_info->csum_size; |
04d4ba4c | 152 | const u32 sectorsize = fs_info->sectorsize; |
d20f7043 | 153 | struct page *page; |
1d08ce58 | 154 | unsigned int i; |
d20f7043 | 155 | char *kaddr; |
d5178578 | 156 | u8 csum[BTRFS_CSUM_SIZE]; |
5a9472fe | 157 | struct compressed_bio *cb = bio->bi_private; |
10fe6ca8 | 158 | u8 *cb_sum = cb->sums; |
d20f7043 | 159 | |
056c8311 JB |
160 | if ((inode->flags & BTRFS_INODE_NODATASUM) || |
161 | test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)) | |
d20f7043 CM |
162 | return 0; |
163 | ||
d5178578 JT |
164 | shash->tfm = fs_info->csum_shash; |
165 | ||
d20f7043 | 166 | for (i = 0; i < cb->nr_pages; i++) { |
04d4ba4c QW |
167 | u32 pg_offset; |
168 | u32 bytes_left = PAGE_SIZE; | |
d20f7043 | 169 | page = cb->compressed_pages[i]; |
d20f7043 | 170 | |
04d4ba4c QW |
171 | /* Determine the remaining bytes inside the page first */ |
172 | if (i == cb->nr_pages - 1) | |
173 | bytes_left = cb->compressed_len - i * PAGE_SIZE; | |
174 | ||
175 | /* Hash through the page sector by sector */ | |
176 | for (pg_offset = 0; pg_offset < bytes_left; | |
177 | pg_offset += sectorsize) { | |
3a60f653 | 178 | kaddr = kmap_atomic(page); |
04d4ba4c QW |
179 | crypto_shash_digest(shash, kaddr + pg_offset, |
180 | sectorsize, csum); | |
3a60f653 | 181 | kunmap_atomic(kaddr); |
04d4ba4c QW |
182 | |
183 | if (memcmp(&csum, cb_sum, csum_size) != 0) { | |
184 | btrfs_print_data_csum_error(inode, disk_start, | |
185 | csum, cb_sum, cb->mirror_num); | |
c3a3b19b | 186 | if (btrfs_bio(bio)->device) |
04d4ba4c | 187 | btrfs_dev_stat_inc_and_print( |
c3a3b19b | 188 | btrfs_bio(bio)->device, |
04d4ba4c QW |
189 | BTRFS_DEV_STAT_CORRUPTION_ERRS); |
190 | return -EIO; | |
191 | } | |
192 | cb_sum += csum_size; | |
193 | disk_start += sectorsize; | |
d20f7043 | 194 | } |
d20f7043 | 195 | } |
93c4c033 | 196 | return 0; |
d20f7043 CM |
197 | } |
198 | ||
6ec9765d QW |
199 | /* |
200 | * Reduce bio and io accounting for a compressed_bio with its corresponding bio. | |
201 | * | |
202 | * Return true if there is no pending bio nor io. | |
203 | * Return false otherwise. | |
204 | */ | |
205 | static bool dec_and_test_compressed_bio(struct compressed_bio *cb, struct bio *bio) | |
206 | { | |
207 | struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb); | |
208 | unsigned int bi_size = 0; | |
209 | bool last_io = false; | |
210 | struct bio_vec *bvec; | |
211 | struct bvec_iter_all iter_all; | |
212 | ||
213 | /* | |
214 | * At endio time, bi_iter.bi_size doesn't represent the real bio size. | |
215 | * Thus here we have to iterate through all segments to grab correct | |
216 | * bio size. | |
217 | */ | |
218 | bio_for_each_segment_all(bvec, bio, iter_all) | |
219 | bi_size += bvec->bv_len; | |
220 | ||
221 | if (bio->bi_status) | |
222 | cb->errors = 1; | |
223 | ||
224 | ASSERT(bi_size && bi_size <= cb->compressed_len); | |
225 | last_io = refcount_sub_and_test(bi_size >> fs_info->sectorsize_bits, | |
226 | &cb->pending_sectors); | |
86ccbb4d QW |
227 | /* |
228 | * Here we must wake up the possible error handler after all other | |
229 | * operations on @cb finished, or we can race with | |
230 | * finish_compressed_bio_*() which may free @cb. | |
231 | */ | |
232 | wake_up_var(cb); | |
233 | ||
6ec9765d QW |
234 | return last_io; |
235 | } | |
236 | ||
86ccbb4d QW |
237 | static void finish_compressed_bio_read(struct compressed_bio *cb, struct bio *bio) |
238 | { | |
239 | unsigned int index; | |
240 | struct page *page; | |
241 | ||
242 | /* Release the compressed pages */ | |
243 | for (index = 0; index < cb->nr_pages; index++) { | |
244 | page = cb->compressed_pages[index]; | |
245 | page->mapping = NULL; | |
246 | put_page(page); | |
247 | } | |
248 | ||
249 | /* Do io completion on the original bio */ | |
250 | if (cb->errors) { | |
251 | bio_io_error(cb->orig_bio); | |
252 | } else { | |
253 | struct bio_vec *bvec; | |
254 | struct bvec_iter_all iter_all; | |
255 | ||
256 | ASSERT(bio); | |
257 | ASSERT(!bio->bi_status); | |
258 | /* | |
259 | * We have verified the checksum already, set page checked so | |
260 | * the end_io handlers know about it | |
261 | */ | |
262 | ASSERT(!bio_flagged(bio, BIO_CLONED)); | |
263 | bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) { | |
264 | u64 bvec_start = page_offset(bvec->bv_page) + | |
265 | bvec->bv_offset; | |
266 | ||
267 | btrfs_page_set_checked(btrfs_sb(cb->inode->i_sb), | |
268 | bvec->bv_page, bvec_start, | |
269 | bvec->bv_len); | |
270 | } | |
271 | ||
272 | bio_endio(cb->orig_bio); | |
273 | } | |
274 | ||
275 | /* Finally free the cb struct */ | |
276 | kfree(cb->compressed_pages); | |
277 | kfree(cb); | |
278 | } | |
279 | ||
c8b97818 CM |
280 | /* when we finish reading compressed pages from the disk, we |
281 | * decompress them and then run the bio end_io routines on the | |
282 | * decompressed pages (in the inode address space). | |
283 | * | |
284 | * This allows the checksumming and other IO error handling routines | |
285 | * to work normally | |
286 | * | |
287 | * The compressed pages are freed here, and it must be run | |
288 | * in process context | |
289 | */ | |
4246a0b6 | 290 | static void end_compressed_bio_read(struct bio *bio) |
c8b97818 | 291 | { |
c8b97818 CM |
292 | struct compressed_bio *cb = bio->bi_private; |
293 | struct inode *inode; | |
c3a3b19b | 294 | unsigned int mirror = btrfs_bio(bio)->mirror_num; |
e6311f24 | 295 | int ret = 0; |
c8b97818 | 296 | |
6ec9765d | 297 | if (!dec_and_test_compressed_bio(cb, bio)) |
c8b97818 CM |
298 | goto out; |
299 | ||
cf1167d5 LB |
300 | /* |
301 | * Record the correct mirror_num in cb->orig_bio so that | |
302 | * read-repair can work properly. | |
303 | */ | |
c3a3b19b | 304 | btrfs_bio(cb->orig_bio)->mirror_num = mirror; |
cf1167d5 LB |
305 | cb->mirror_num = mirror; |
306 | ||
e6311f24 LB |
307 | /* |
308 | * Some IO in this cb have failed, just skip checksum as there | |
309 | * is no way it could be correct. | |
310 | */ | |
311 | if (cb->errors == 1) | |
312 | goto csum_failed; | |
313 | ||
d20f7043 | 314 | inode = cb->inode; |
5a9472fe | 315 | ret = check_compressed_csum(BTRFS_I(inode), bio, |
1201b58b | 316 | bio->bi_iter.bi_sector << 9); |
d20f7043 CM |
317 | if (ret) |
318 | goto csum_failed; | |
319 | ||
c8b97818 CM |
320 | /* ok, we're the last bio for this extent, lets start |
321 | * the decompression. | |
322 | */ | |
8140dc30 AJ |
323 | ret = btrfs_decompress_bio(cb); |
324 | ||
d20f7043 | 325 | csum_failed: |
c8b97818 CM |
326 | if (ret) |
327 | cb->errors = 1; | |
86ccbb4d | 328 | finish_compressed_bio_read(cb, bio); |
c8b97818 CM |
329 | out: |
330 | bio_put(bio); | |
331 | } | |
332 | ||
333 | /* | |
334 | * Clear the writeback bits on all of the file | |
335 | * pages for a compressed write | |
336 | */ | |
7bdcefc1 FM |
337 | static noinline void end_compressed_writeback(struct inode *inode, |
338 | const struct compressed_bio *cb) | |
c8b97818 | 339 | { |
741ec653 | 340 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
09cbfeaf KS |
341 | unsigned long index = cb->start >> PAGE_SHIFT; |
342 | unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; | |
c8b97818 CM |
343 | struct page *pages[16]; |
344 | unsigned long nr_pages = end_index - index + 1; | |
345 | int i; | |
346 | int ret; | |
347 | ||
7bdcefc1 FM |
348 | if (cb->errors) |
349 | mapping_set_error(inode->i_mapping, -EIO); | |
350 | ||
d397712b | 351 | while (nr_pages > 0) { |
c8b97818 | 352 | ret = find_get_pages_contig(inode->i_mapping, index, |
5b050f04 CM |
353 | min_t(unsigned long, |
354 | nr_pages, ARRAY_SIZE(pages)), pages); | |
c8b97818 CM |
355 | if (ret == 0) { |
356 | nr_pages -= 1; | |
357 | index += 1; | |
358 | continue; | |
359 | } | |
360 | for (i = 0; i < ret; i++) { | |
7bdcefc1 FM |
361 | if (cb->errors) |
362 | SetPageError(pages[i]); | |
741ec653 QW |
363 | btrfs_page_clamp_clear_writeback(fs_info, pages[i], |
364 | cb->start, cb->len); | |
09cbfeaf | 365 | put_page(pages[i]); |
c8b97818 CM |
366 | } |
367 | nr_pages -= ret; | |
368 | index += ret; | |
369 | } | |
370 | /* the inode may be gone now */ | |
c8b97818 CM |
371 | } |
372 | ||
6853c64a | 373 | static void finish_compressed_bio_write(struct compressed_bio *cb) |
c8b97818 | 374 | { |
6853c64a | 375 | struct inode *inode = cb->inode; |
1d08ce58 | 376 | unsigned int index; |
c8b97818 | 377 | |
6853c64a QW |
378 | /* |
379 | * Ok, we're the last bio for this extent, step one is to call back | |
380 | * into the FS and do all the end_io operations. | |
c8b97818 | 381 | */ |
38a39ac7 | 382 | btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL, |
c629732d | 383 | cb->start, cb->start + cb->len - 1, |
240246f6 | 384 | !cb->errors); |
c8b97818 | 385 | |
7bdcefc1 | 386 | end_compressed_writeback(inode, cb); |
6853c64a | 387 | /* Note, our inode could be gone now */ |
c8b97818 CM |
388 | |
389 | /* | |
6853c64a | 390 | * Release the compressed pages, these came from alloc_page and |
c8b97818 CM |
391 | * are not attached to the inode at all |
392 | */ | |
c8b97818 | 393 | for (index = 0; index < cb->nr_pages; index++) { |
6853c64a QW |
394 | struct page *page = cb->compressed_pages[index]; |
395 | ||
c8b97818 | 396 | page->mapping = NULL; |
09cbfeaf | 397 | put_page(page); |
c8b97818 CM |
398 | } |
399 | ||
6853c64a | 400 | /* Finally free the cb struct */ |
c8b97818 CM |
401 | kfree(cb->compressed_pages); |
402 | kfree(cb); | |
6853c64a QW |
403 | } |
404 | ||
405 | /* | |
406 | * Do the cleanup once all the compressed pages hit the disk. This will clear | |
407 | * writeback on the file pages and free the compressed pages. | |
408 | * | |
409 | * This also calls the writeback end hooks for the file pages so that metadata | |
410 | * and checksums can be updated in the file. | |
411 | */ | |
412 | static void end_compressed_bio_write(struct bio *bio) | |
413 | { | |
414 | struct compressed_bio *cb = bio->bi_private; | |
415 | ||
416 | if (!dec_and_test_compressed_bio(cb, bio)) | |
417 | goto out; | |
418 | ||
419 | btrfs_record_physical_zoned(cb->inode, cb->start, bio); | |
420 | ||
421 | finish_compressed_bio_write(cb); | |
c8b97818 CM |
422 | out: |
423 | bio_put(bio); | |
424 | } | |
425 | ||
2d4e0b84 QW |
426 | static blk_status_t submit_compressed_bio(struct btrfs_fs_info *fs_info, |
427 | struct compressed_bio *cb, | |
428 | struct bio *bio, int mirror_num) | |
429 | { | |
430 | blk_status_t ret; | |
431 | ||
432 | ASSERT(bio->bi_iter.bi_size); | |
2d4e0b84 QW |
433 | ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); |
434 | if (ret) | |
435 | return ret; | |
436 | ret = btrfs_map_bio(fs_info, bio, mirror_num); | |
437 | return ret; | |
438 | } | |
439 | ||
22c306fe | 440 | /* |
f472c28f QW |
441 | * Allocate a compressed_bio, which will be used to read/write on-disk |
442 | * (aka, compressed) * data. | |
443 | * | |
444 | * @cb: The compressed_bio structure, which records all the needed | |
445 | * information to bind the compressed data to the uncompressed | |
446 | * page cache. | |
447 | * @disk_byten: The logical bytenr where the compressed data will be read | |
448 | * from or written to. | |
449 | * @endio_func: The endio function to call after the IO for compressed data | |
450 | * is finished. | |
451 | * @next_stripe_start: Return value of logical bytenr of where next stripe starts. | |
452 | * Let the caller know to only fill the bio up to the stripe | |
453 | * boundary. | |
22c306fe | 454 | */ |
f472c28f QW |
455 | |
456 | ||
22c306fe | 457 | static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_bytenr, |
f472c28f QW |
458 | unsigned int opf, bio_end_io_t endio_func, |
459 | u64 *next_stripe_start) | |
22c306fe | 460 | { |
f472c28f QW |
461 | struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb); |
462 | struct btrfs_io_geometry geom; | |
463 | struct extent_map *em; | |
22c306fe | 464 | struct bio *bio; |
f472c28f | 465 | int ret; |
22c306fe QW |
466 | |
467 | bio = btrfs_bio_alloc(BIO_MAX_VECS); | |
468 | ||
469 | bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; | |
470 | bio->bi_opf = opf; | |
471 | bio->bi_private = cb; | |
472 | bio->bi_end_io = endio_func; | |
473 | ||
f472c28f QW |
474 | em = btrfs_get_chunk_map(fs_info, disk_bytenr, fs_info->sectorsize); |
475 | if (IS_ERR(em)) { | |
476 | bio_put(bio); | |
477 | return ERR_CAST(em); | |
478 | } | |
22c306fe | 479 | |
f472c28f QW |
480 | if (bio_op(bio) == REQ_OP_ZONE_APPEND) |
481 | bio_set_dev(bio, em->map_lookup->stripes[0].dev->bdev); | |
482 | ||
483 | ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio), disk_bytenr, &geom); | |
484 | free_extent_map(em); | |
485 | if (ret < 0) { | |
486 | bio_put(bio); | |
487 | return ERR_PTR(ret); | |
22c306fe | 488 | } |
f472c28f QW |
489 | *next_stripe_start = disk_bytenr + geom.len; |
490 | ||
22c306fe QW |
491 | return bio; |
492 | } | |
493 | ||
c8b97818 CM |
494 | /* |
495 | * worker function to build and submit bios for previously compressed pages. | |
496 | * The corresponding pages in the inode should be marked for writeback | |
497 | * and the compressed pages should have a reference on them for dropping | |
498 | * when the IO is complete. | |
499 | * | |
500 | * This also checksums the file bytes and gets things ready for | |
501 | * the end io hooks. | |
502 | */ | |
c7ee1819 | 503 | blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, |
65b5355f AJ |
504 | unsigned int len, u64 disk_start, |
505 | unsigned int compressed_len, | |
c8b97818 | 506 | struct page **compressed_pages, |
65b5355f | 507 | unsigned int nr_pages, |
ec39f769 CM |
508 | unsigned int write_flags, |
509 | struct cgroup_subsys_state *blkcg_css) | |
c8b97818 | 510 | { |
c7ee1819 | 511 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
c8b97818 | 512 | struct bio *bio = NULL; |
c8b97818 | 513 | struct compressed_bio *cb; |
91507240 | 514 | u64 cur_disk_bytenr = disk_start; |
f472c28f | 515 | u64 next_stripe_start; |
4e4cbee9 | 516 | blk_status_t ret; |
c7ee1819 | 517 | int skip_sum = inode->flags & BTRFS_INODE_NODATASUM; |
764c7c9a JT |
518 | const bool use_append = btrfs_use_zone_append(inode, disk_start); |
519 | const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE; | |
c8b97818 | 520 | |
bbbff01a QW |
521 | ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && |
522 | IS_ALIGNED(len, fs_info->sectorsize)); | |
2ff7e61e | 523 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
dac97e51 | 524 | if (!cb) |
4e4cbee9 | 525 | return BLK_STS_RESOURCE; |
6ec9765d | 526 | refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits); |
c8b97818 | 527 | cb->errors = 0; |
c7ee1819 | 528 | cb->inode = &inode->vfs_inode; |
c8b97818 CM |
529 | cb->start = start; |
530 | cb->len = len; | |
d20f7043 | 531 | cb->mirror_num = 0; |
c8b97818 CM |
532 | cb->compressed_pages = compressed_pages; |
533 | cb->compressed_len = compressed_len; | |
534 | cb->orig_bio = NULL; | |
535 | cb->nr_pages = nr_pages; | |
536 | ||
91507240 QW |
537 | while (cur_disk_bytenr < disk_start + compressed_len) { |
538 | u64 offset = cur_disk_bytenr - disk_start; | |
539 | unsigned int index = offset >> PAGE_SHIFT; | |
540 | unsigned int real_size; | |
541 | unsigned int added; | |
542 | struct page *page = compressed_pages[index]; | |
543 | bool submit = false; | |
544 | ||
545 | /* Allocate new bio if submitted or not yet allocated */ | |
546 | if (!bio) { | |
547 | bio = alloc_compressed_bio(cb, cur_disk_bytenr, | |
548 | bio_op | write_flags, end_compressed_bio_write, | |
549 | &next_stripe_start); | |
550 | if (IS_ERR(bio)) { | |
551 | ret = errno_to_blk_status(PTR_ERR(bio)); | |
552 | bio = NULL; | |
553 | goto finish_cb; | |
554 | } | |
764c7c9a | 555 | } |
4c80a97d | 556 | /* |
91507240 QW |
557 | * We should never reach next_stripe_start start as we will |
558 | * submit comp_bio when reach the boundary immediately. | |
4c80a97d | 559 | */ |
91507240 | 560 | ASSERT(cur_disk_bytenr != next_stripe_start); |
c8b97818 | 561 | |
91507240 QW |
562 | /* |
563 | * We have various limits on the real read size: | |
564 | * - stripe boundary | |
565 | * - page boundary | |
566 | * - compressed length boundary | |
567 | */ | |
568 | real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_bytenr); | |
569 | real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset)); | |
570 | real_size = min_t(u64, real_size, compressed_len - offset); | |
571 | ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize)); | |
572 | ||
573 | if (use_append) | |
574 | added = bio_add_zone_append_page(bio, page, real_size, | |
575 | offset_in_page(offset)); | |
576 | else | |
577 | added = bio_add_page(bio, page, real_size, | |
578 | offset_in_page(offset)); | |
579 | /* Reached zoned boundary */ | |
580 | if (added == 0) | |
581 | submit = true; | |
582 | ||
583 | cur_disk_bytenr += added; | |
584 | /* Reached stripe boundary */ | |
585 | if (cur_disk_bytenr == next_stripe_start) | |
586 | submit = true; | |
587 | ||
588 | /* Finished the range */ | |
589 | if (cur_disk_bytenr == disk_start + compressed_len) | |
590 | submit = true; | |
591 | ||
592 | if (submit) { | |
e55179b3 | 593 | if (!skip_sum) { |
c7ee1819 | 594 | ret = btrfs_csum_one_bio(inode, bio, start, 1); |
6853c64a QW |
595 | if (ret) |
596 | goto finish_cb; | |
f5daf2c7 | 597 | } |
c8b97818 | 598 | |
2d4e0b84 | 599 | ret = submit_compressed_bio(fs_info, cb, bio, 0); |
6853c64a QW |
600 | if (ret) |
601 | goto finish_cb; | |
91507240 | 602 | bio = NULL; |
c8b97818 | 603 | } |
771ed689 | 604 | cond_resched(); |
c8b97818 | 605 | } |
46bcff2b DZ |
606 | if (blkcg_css) |
607 | kthread_associate_blkcg(NULL); | |
c8b97818 | 608 | |
c8b97818 | 609 | return 0; |
d20f7043 | 610 | |
6853c64a QW |
611 | finish_cb: |
612 | if (bio) { | |
4e4cbee9 | 613 | bio->bi_status = ret; |
f5daf2c7 LB |
614 | bio_endio(bio); |
615 | } | |
91507240 QW |
616 | /* Last byte of @cb is submitted, endio will free @cb */ |
617 | if (cur_disk_bytenr == disk_start + compressed_len) | |
618 | return ret; | |
c8b97818 | 619 | |
91507240 QW |
620 | wait_var_event(cb, refcount_read(&cb->pending_sectors) == |
621 | (disk_start + compressed_len - cur_disk_bytenr) >> | |
622 | fs_info->sectorsize_bits); | |
6853c64a QW |
623 | /* |
624 | * Even with previous bio ended, we should still have io not yet | |
625 | * submitted, thus need to finish manually. | |
626 | */ | |
627 | ASSERT(refcount_read(&cb->pending_sectors)); | |
628 | /* Now we are the only one referring @cb, can finish it safely. */ | |
629 | finish_compressed_bio_write(cb); | |
630 | return ret; | |
c8b97818 CM |
631 | } |
632 | ||
2a4d0c90 CH |
633 | static u64 bio_end_offset(struct bio *bio) |
634 | { | |
c45a8f2d | 635 | struct bio_vec *last = bio_last_bvec_all(bio); |
2a4d0c90 CH |
636 | |
637 | return page_offset(last->bv_page) + last->bv_len + last->bv_offset; | |
638 | } | |
639 | ||
6a404910 QW |
640 | /* |
641 | * Add extra pages in the same compressed file extent so that we don't need to | |
642 | * re-read the same extent again and again. | |
643 | * | |
644 | * NOTE: this won't work well for subpage, as for subpage read, we lock the | |
645 | * full page then submit bio for each compressed/regular extents. | |
646 | * | |
647 | * This means, if we have several sectors in the same page points to the same | |
648 | * on-disk compressed data, we will re-read the same extent many times and | |
649 | * this function can only help for the next page. | |
650 | */ | |
771ed689 CM |
651 | static noinline int add_ra_bio_pages(struct inode *inode, |
652 | u64 compressed_end, | |
653 | struct compressed_bio *cb) | |
654 | { | |
6a404910 | 655 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
771ed689 | 656 | unsigned long end_index; |
6a404910 | 657 | u64 cur = bio_end_offset(cb->orig_bio); |
771ed689 CM |
658 | u64 isize = i_size_read(inode); |
659 | int ret; | |
660 | struct page *page; | |
771ed689 CM |
661 | struct extent_map *em; |
662 | struct address_space *mapping = inode->i_mapping; | |
771ed689 CM |
663 | struct extent_map_tree *em_tree; |
664 | struct extent_io_tree *tree; | |
6a404910 | 665 | int sectors_missed = 0; |
771ed689 | 666 | |
771ed689 CM |
667 | em_tree = &BTRFS_I(inode)->extent_tree; |
668 | tree = &BTRFS_I(inode)->io_tree; | |
669 | ||
670 | if (isize == 0) | |
671 | return 0; | |
672 | ||
ca62e85d QW |
673 | /* |
674 | * For current subpage support, we only support 64K page size, | |
675 | * which means maximum compressed extent size (128K) is just 2x page | |
676 | * size. | |
677 | * This makes readahead less effective, so here disable readahead for | |
678 | * subpage for now, until full compressed write is supported. | |
679 | */ | |
680 | if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE) | |
681 | return 0; | |
682 | ||
09cbfeaf | 683 | end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; |
771ed689 | 684 | |
6a404910 QW |
685 | while (cur < compressed_end) { |
686 | u64 page_end; | |
687 | u64 pg_index = cur >> PAGE_SHIFT; | |
688 | u32 add_size; | |
771ed689 | 689 | |
306e16ce | 690 | if (pg_index > end_index) |
771ed689 CM |
691 | break; |
692 | ||
0a943c65 | 693 | page = xa_load(&mapping->i_pages, pg_index); |
3159f943 | 694 | if (page && !xa_is_value(page)) { |
6a404910 QW |
695 | sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >> |
696 | fs_info->sectorsize_bits; | |
697 | ||
698 | /* Beyond threshold, no need to continue */ | |
699 | if (sectors_missed > 4) | |
771ed689 | 700 | break; |
6a404910 QW |
701 | |
702 | /* | |
703 | * Jump to next page start as we already have page for | |
704 | * current offset. | |
705 | */ | |
706 | cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE; | |
707 | continue; | |
771ed689 CM |
708 | } |
709 | ||
c62d2555 MH |
710 | page = __page_cache_alloc(mapping_gfp_constraint(mapping, |
711 | ~__GFP_FS)); | |
771ed689 CM |
712 | if (!page) |
713 | break; | |
714 | ||
c62d2555 | 715 | if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { |
09cbfeaf | 716 | put_page(page); |
6a404910 QW |
717 | /* There is already a page, skip to page end */ |
718 | cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE; | |
719 | continue; | |
771ed689 CM |
720 | } |
721 | ||
32443de3 QW |
722 | ret = set_page_extent_mapped(page); |
723 | if (ret < 0) { | |
724 | unlock_page(page); | |
725 | put_page(page); | |
726 | break; | |
727 | } | |
728 | ||
6a404910 QW |
729 | page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1; |
730 | lock_extent(tree, cur, page_end); | |
890871be | 731 | read_lock(&em_tree->lock); |
6a404910 | 732 | em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur); |
890871be | 733 | read_unlock(&em_tree->lock); |
771ed689 | 734 | |
6a404910 QW |
735 | /* |
736 | * At this point, we have a locked page in the page cache for | |
737 | * these bytes in the file. But, we have to make sure they map | |
738 | * to this compressed extent on disk. | |
739 | */ | |
740 | if (!em || cur < em->start || | |
741 | (cur + fs_info->sectorsize > extent_map_end(em)) || | |
4f024f37 | 742 | (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { |
771ed689 | 743 | free_extent_map(em); |
6a404910 | 744 | unlock_extent(tree, cur, page_end); |
771ed689 | 745 | unlock_page(page); |
09cbfeaf | 746 | put_page(page); |
771ed689 CM |
747 | break; |
748 | } | |
749 | free_extent_map(em); | |
750 | ||
751 | if (page->index == end_index) { | |
7073017a | 752 | size_t zero_offset = offset_in_page(isize); |
771ed689 CM |
753 | |
754 | if (zero_offset) { | |
755 | int zeros; | |
09cbfeaf | 756 | zeros = PAGE_SIZE - zero_offset; |
d048b9c2 | 757 | memzero_page(page, zero_offset, zeros); |
771ed689 | 758 | flush_dcache_page(page); |
771ed689 CM |
759 | } |
760 | } | |
761 | ||
6a404910 QW |
762 | add_size = min(em->start + em->len, page_end + 1) - cur; |
763 | ret = bio_add_page(cb->orig_bio, page, add_size, offset_in_page(cur)); | |
764 | if (ret != add_size) { | |
765 | unlock_extent(tree, cur, page_end); | |
771ed689 | 766 | unlock_page(page); |
09cbfeaf | 767 | put_page(page); |
771ed689 CM |
768 | break; |
769 | } | |
6a404910 QW |
770 | /* |
771 | * If it's subpage, we also need to increase its | |
772 | * subpage::readers number, as at endio we will decrease | |
773 | * subpage::readers and to unlock the page. | |
774 | */ | |
775 | if (fs_info->sectorsize < PAGE_SIZE) | |
776 | btrfs_subpage_start_reader(fs_info, page, cur, add_size); | |
777 | put_page(page); | |
778 | cur += add_size; | |
771ed689 | 779 | } |
771ed689 CM |
780 | return 0; |
781 | } | |
782 | ||
c8b97818 CM |
783 | /* |
784 | * for a compressed read, the bio we get passed has all the inode pages | |
785 | * in it. We don't actually do IO on those pages but allocate new ones | |
786 | * to hold the compressed pages on disk. | |
787 | * | |
4f024f37 | 788 | * bio->bi_iter.bi_sector points to the compressed extent on disk |
c8b97818 | 789 | * bio->bi_io_vec points to all of the inode pages |
c8b97818 CM |
790 | * |
791 | * After the compressed pages are read, we copy the bytes into the | |
792 | * bio we were passed and then call the bio end_io calls | |
793 | */ | |
4e4cbee9 | 794 | blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, |
c8b97818 CM |
795 | int mirror_num, unsigned long bio_flags) |
796 | { | |
0b246afa | 797 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
c8b97818 CM |
798 | struct extent_map_tree *em_tree; |
799 | struct compressed_bio *cb; | |
356b4a2d AJ |
800 | unsigned int compressed_len; |
801 | unsigned int nr_pages; | |
802 | unsigned int pg_index; | |
f472c28f QW |
803 | struct bio *comp_bio = NULL; |
804 | const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT; | |
805 | u64 cur_disk_byte = disk_bytenr; | |
806 | u64 next_stripe_start; | |
557023ea | 807 | u64 file_offset; |
e04ca626 CM |
808 | u64 em_len; |
809 | u64 em_start; | |
c8b97818 | 810 | struct extent_map *em; |
4e4cbee9 | 811 | blk_status_t ret = BLK_STS_RESOURCE; |
15e3004a | 812 | int faili = 0; |
10fe6ca8 | 813 | u8 *sums; |
c8b97818 | 814 | |
c8b97818 CM |
815 | em_tree = &BTRFS_I(inode)->extent_tree; |
816 | ||
557023ea QW |
817 | file_offset = bio_first_bvec_all(bio)->bv_offset + |
818 | page_offset(bio_first_page_all(bio)); | |
819 | ||
c8b97818 | 820 | /* we need the actual starting offset of this extent in the file */ |
890871be | 821 | read_lock(&em_tree->lock); |
557023ea | 822 | em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize); |
890871be | 823 | read_unlock(&em_tree->lock); |
285190d9 | 824 | if (!em) |
4e4cbee9 | 825 | return BLK_STS_IOERR; |
c8b97818 | 826 | |
557023ea | 827 | ASSERT(em->compress_type != BTRFS_COMPRESS_NONE); |
d20f7043 | 828 | compressed_len = em->block_len; |
2ff7e61e | 829 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
6b82ce8d | 830 | if (!cb) |
831 | goto out; | |
832 | ||
6ec9765d | 833 | refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits); |
c8b97818 CM |
834 | cb->errors = 0; |
835 | cb->inode = inode; | |
d20f7043 | 836 | cb->mirror_num = mirror_num; |
10fe6ca8 | 837 | sums = cb->sums; |
c8b97818 | 838 | |
ff5b7ee3 | 839 | cb->start = em->orig_start; |
e04ca626 CM |
840 | em_len = em->len; |
841 | em_start = em->start; | |
d20f7043 | 842 | |
c8b97818 | 843 | free_extent_map(em); |
e04ca626 | 844 | em = NULL; |
c8b97818 | 845 | |
81381053 | 846 | cb->len = bio->bi_iter.bi_size; |
c8b97818 | 847 | cb->compressed_len = compressed_len; |
261507a0 | 848 | cb->compress_type = extent_compress_type(bio_flags); |
c8b97818 CM |
849 | cb->orig_bio = bio; |
850 | ||
09cbfeaf | 851 | nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); |
31e818fe | 852 | cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), |
c8b97818 | 853 | GFP_NOFS); |
6b82ce8d | 854 | if (!cb->compressed_pages) |
855 | goto fail1; | |
856 | ||
306e16ce | 857 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
b0ee5e1e | 858 | cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS); |
15e3004a JB |
859 | if (!cb->compressed_pages[pg_index]) { |
860 | faili = pg_index - 1; | |
0e9350de | 861 | ret = BLK_STS_RESOURCE; |
6b82ce8d | 862 | goto fail2; |
15e3004a | 863 | } |
c8b97818 | 864 | } |
15e3004a | 865 | faili = nr_pages - 1; |
c8b97818 CM |
866 | cb->nr_pages = nr_pages; |
867 | ||
7f042a83 | 868 | add_ra_bio_pages(inode, em_start + em_len, cb); |
771ed689 | 869 | |
771ed689 | 870 | /* include any pages we added in add_ra-bio_pages */ |
81381053 | 871 | cb->len = bio->bi_iter.bi_size; |
771ed689 | 872 | |
f472c28f QW |
873 | while (cur_disk_byte < disk_bytenr + compressed_len) { |
874 | u64 offset = cur_disk_byte - disk_bytenr; | |
875 | unsigned int index = offset >> PAGE_SHIFT; | |
876 | unsigned int real_size; | |
877 | unsigned int added; | |
878 | struct page *page = cb->compressed_pages[index]; | |
879 | bool submit = false; | |
c8b97818 | 880 | |
f472c28f QW |
881 | /* Allocate new bio if submitted or not yet allocated */ |
882 | if (!comp_bio) { | |
883 | comp_bio = alloc_compressed_bio(cb, cur_disk_byte, | |
884 | REQ_OP_READ, end_compressed_bio_read, | |
885 | &next_stripe_start); | |
886 | if (IS_ERR(comp_bio)) { | |
887 | ret = errno_to_blk_status(PTR_ERR(comp_bio)); | |
888 | comp_bio = NULL; | |
889 | goto finish_cb; | |
890 | } | |
891 | } | |
892 | /* | |
893 | * We should never reach next_stripe_start start as we will | |
894 | * submit comp_bio when reach the boundary immediately. | |
895 | */ | |
896 | ASSERT(cur_disk_byte != next_stripe_start); | |
897 | /* | |
898 | * We have various limit on the real read size: | |
899 | * - stripe boundary | |
900 | * - page boundary | |
901 | * - compressed length boundary | |
902 | */ | |
903 | real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_byte); | |
904 | real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset)); | |
905 | real_size = min_t(u64, real_size, compressed_len - offset); | |
906 | ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize)); | |
4e4cbee9 | 907 | |
f472c28f | 908 | added = bio_add_page(comp_bio, page, real_size, offset_in_page(offset)); |
be6a1361 | 909 | /* |
f472c28f QW |
910 | * Maximum compressed extent is smaller than bio size limit, |
911 | * thus bio_add_page() should always success. | |
be6a1361 | 912 | */ |
f472c28f QW |
913 | ASSERT(added == real_size); |
914 | cur_disk_byte += added; | |
be6a1361 | 915 | |
f472c28f QW |
916 | /* Reached stripe boundary, need to submit */ |
917 | if (cur_disk_byte == next_stripe_start) | |
918 | submit = true; | |
d20f7043 | 919 | |
f472c28f QW |
920 | /* Has finished the range, need to submit */ |
921 | if (cur_disk_byte == disk_bytenr + compressed_len) | |
922 | submit = true; | |
c8b97818 | 923 | |
f472c28f | 924 | if (submit) { |
10fe6ca8 JT |
925 | unsigned int nr_sectors; |
926 | ||
6275193e | 927 | ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); |
86ccbb4d QW |
928 | if (ret) |
929 | goto finish_cb; | |
10fe6ca8 JT |
930 | |
931 | nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size, | |
932 | fs_info->sectorsize); | |
713cebfb | 933 | sums += fs_info->csum_size * nr_sectors; |
d20f7043 | 934 | |
2d4e0b84 | 935 | ret = submit_compressed_bio(fs_info, cb, comp_bio, mirror_num); |
86ccbb4d QW |
936 | if (ret) |
937 | goto finish_cb; | |
f472c28f | 938 | comp_bio = NULL; |
c8b97818 | 939 | } |
c8b97818 | 940 | } |
c8b97818 | 941 | return 0; |
6b82ce8d | 942 | |
943 | fail2: | |
15e3004a JB |
944 | while (faili >= 0) { |
945 | __free_page(cb->compressed_pages[faili]); | |
946 | faili--; | |
947 | } | |
6b82ce8d | 948 | |
949 | kfree(cb->compressed_pages); | |
950 | fail1: | |
951 | kfree(cb); | |
952 | out: | |
953 | free_extent_map(em); | |
954 | return ret; | |
86ccbb4d QW |
955 | finish_cb: |
956 | if (comp_bio) { | |
957 | comp_bio->bi_status = ret; | |
958 | bio_endio(comp_bio); | |
959 | } | |
f472c28f QW |
960 | /* All bytes of @cb is submitted, endio will free @cb */ |
961 | if (cur_disk_byte == disk_bytenr + compressed_len) | |
962 | return ret; | |
963 | ||
964 | wait_var_event(cb, refcount_read(&cb->pending_sectors) == | |
965 | (disk_bytenr + compressed_len - cur_disk_byte) >> | |
966 | fs_info->sectorsize_bits); | |
86ccbb4d QW |
967 | /* |
968 | * Even with previous bio ended, we should still have io not yet | |
969 | * submitted, thus need to finish @cb manually. | |
970 | */ | |
971 | ASSERT(refcount_read(&cb->pending_sectors)); | |
972 | /* Now we are the only one referring @cb, can finish it safely. */ | |
973 | finish_compressed_bio_read(cb, NULL); | |
974 | return ret; | |
c8b97818 | 975 | } |
261507a0 | 976 | |
17b5a6c1 TT |
977 | /* |
978 | * Heuristic uses systematic sampling to collect data from the input data | |
979 | * range, the logic can be tuned by the following constants: | |
980 | * | |
981 | * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample | |
982 | * @SAMPLING_INTERVAL - range from which the sampled data can be collected | |
983 | */ | |
984 | #define SAMPLING_READ_SIZE (16) | |
985 | #define SAMPLING_INTERVAL (256) | |
986 | ||
987 | /* | |
988 | * For statistical analysis of the input data we consider bytes that form a | |
989 | * Galois Field of 256 objects. Each object has an attribute count, ie. how | |
990 | * many times the object appeared in the sample. | |
991 | */ | |
992 | #define BUCKET_SIZE (256) | |
993 | ||
994 | /* | |
995 | * The size of the sample is based on a statistical sampling rule of thumb. | |
996 | * The common way is to perform sampling tests as long as the number of | |
997 | * elements in each cell is at least 5. | |
998 | * | |
999 | * Instead of 5, we choose 32 to obtain more accurate results. | |
1000 | * If the data contain the maximum number of symbols, which is 256, we obtain a | |
1001 | * sample size bound by 8192. | |
1002 | * | |
1003 | * For a sample of at most 8KB of data per data range: 16 consecutive bytes | |
1004 | * from up to 512 locations. | |
1005 | */ | |
1006 | #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \ | |
1007 | SAMPLING_READ_SIZE / SAMPLING_INTERVAL) | |
1008 | ||
1009 | struct bucket_item { | |
1010 | u32 count; | |
1011 | }; | |
4e439a0b TT |
1012 | |
1013 | struct heuristic_ws { | |
17b5a6c1 TT |
1014 | /* Partial copy of input data */ |
1015 | u8 *sample; | |
a440d48c | 1016 | u32 sample_size; |
17b5a6c1 TT |
1017 | /* Buckets store counters for each byte value */ |
1018 | struct bucket_item *bucket; | |
440c840c TT |
1019 | /* Sorting buffer */ |
1020 | struct bucket_item *bucket_b; | |
4e439a0b TT |
1021 | struct list_head list; |
1022 | }; | |
1023 | ||
92ee5530 DZ |
1024 | static struct workspace_manager heuristic_wsm; |
1025 | ||
4e439a0b TT |
1026 | static void free_heuristic_ws(struct list_head *ws) |
1027 | { | |
1028 | struct heuristic_ws *workspace; | |
1029 | ||
1030 | workspace = list_entry(ws, struct heuristic_ws, list); | |
1031 | ||
17b5a6c1 TT |
1032 | kvfree(workspace->sample); |
1033 | kfree(workspace->bucket); | |
440c840c | 1034 | kfree(workspace->bucket_b); |
4e439a0b TT |
1035 | kfree(workspace); |
1036 | } | |
1037 | ||
7bf49943 | 1038 | static struct list_head *alloc_heuristic_ws(unsigned int level) |
4e439a0b TT |
1039 | { |
1040 | struct heuristic_ws *ws; | |
1041 | ||
1042 | ws = kzalloc(sizeof(*ws), GFP_KERNEL); | |
1043 | if (!ws) | |
1044 | return ERR_PTR(-ENOMEM); | |
1045 | ||
17b5a6c1 TT |
1046 | ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL); |
1047 | if (!ws->sample) | |
1048 | goto fail; | |
1049 | ||
1050 | ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL); | |
1051 | if (!ws->bucket) | |
1052 | goto fail; | |
4e439a0b | 1053 | |
440c840c TT |
1054 | ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL); |
1055 | if (!ws->bucket_b) | |
1056 | goto fail; | |
1057 | ||
17b5a6c1 | 1058 | INIT_LIST_HEAD(&ws->list); |
4e439a0b | 1059 | return &ws->list; |
17b5a6c1 TT |
1060 | fail: |
1061 | free_heuristic_ws(&ws->list); | |
1062 | return ERR_PTR(-ENOMEM); | |
4e439a0b TT |
1063 | } |
1064 | ||
ca4ac360 | 1065 | const struct btrfs_compress_op btrfs_heuristic_compress = { |
be951045 | 1066 | .workspace_manager = &heuristic_wsm, |
ca4ac360 DZ |
1067 | }; |
1068 | ||
e8c9f186 | 1069 | static const struct btrfs_compress_op * const btrfs_compress_op[] = { |
ca4ac360 DZ |
1070 | /* The heuristic is represented as compression type 0 */ |
1071 | &btrfs_heuristic_compress, | |
261507a0 | 1072 | &btrfs_zlib_compress, |
a6fa6fae | 1073 | &btrfs_lzo_compress, |
5c1aab1d | 1074 | &btrfs_zstd_compress, |
261507a0 LZ |
1075 | }; |
1076 | ||
c778df14 DS |
1077 | static struct list_head *alloc_workspace(int type, unsigned int level) |
1078 | { | |
1079 | switch (type) { | |
1080 | case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level); | |
1081 | case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level); | |
1082 | case BTRFS_COMPRESS_LZO: return lzo_alloc_workspace(level); | |
1083 | case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level); | |
1084 | default: | |
1085 | /* | |
1086 | * This can't happen, the type is validated several times | |
1087 | * before we get here. | |
1088 | */ | |
1089 | BUG(); | |
1090 | } | |
1091 | } | |
1092 | ||
1e002351 DS |
1093 | static void free_workspace(int type, struct list_head *ws) |
1094 | { | |
1095 | switch (type) { | |
1096 | case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws); | |
1097 | case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws); | |
1098 | case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws); | |
1099 | case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws); | |
1100 | default: | |
1101 | /* | |
1102 | * This can't happen, the type is validated several times | |
1103 | * before we get here. | |
1104 | */ | |
1105 | BUG(); | |
1106 | } | |
1107 | } | |
1108 | ||
d5517033 | 1109 | static void btrfs_init_workspace_manager(int type) |
261507a0 | 1110 | { |
0cf25213 | 1111 | struct workspace_manager *wsm; |
4e439a0b | 1112 | struct list_head *workspace; |
261507a0 | 1113 | |
0cf25213 | 1114 | wsm = btrfs_compress_op[type]->workspace_manager; |
92ee5530 DZ |
1115 | INIT_LIST_HEAD(&wsm->idle_ws); |
1116 | spin_lock_init(&wsm->ws_lock); | |
1117 | atomic_set(&wsm->total_ws, 0); | |
1118 | init_waitqueue_head(&wsm->ws_wait); | |
f77dd0d6 | 1119 | |
1666edab DZ |
1120 | /* |
1121 | * Preallocate one workspace for each compression type so we can | |
1122 | * guarantee forward progress in the worst case | |
1123 | */ | |
c778df14 | 1124 | workspace = alloc_workspace(type, 0); |
1666edab DZ |
1125 | if (IS_ERR(workspace)) { |
1126 | pr_warn( | |
1127 | "BTRFS: cannot preallocate compression workspace, will try later\n"); | |
1128 | } else { | |
92ee5530 DZ |
1129 | atomic_set(&wsm->total_ws, 1); |
1130 | wsm->free_ws = 1; | |
1131 | list_add(workspace, &wsm->idle_ws); | |
1666edab DZ |
1132 | } |
1133 | } | |
1134 | ||
2510307e | 1135 | static void btrfs_cleanup_workspace_manager(int type) |
1666edab | 1136 | { |
2dba7143 | 1137 | struct workspace_manager *wsman; |
1666edab DZ |
1138 | struct list_head *ws; |
1139 | ||
2dba7143 | 1140 | wsman = btrfs_compress_op[type]->workspace_manager; |
1666edab DZ |
1141 | while (!list_empty(&wsman->idle_ws)) { |
1142 | ws = wsman->idle_ws.next; | |
1143 | list_del(ws); | |
1e002351 | 1144 | free_workspace(type, ws); |
1666edab | 1145 | atomic_dec(&wsman->total_ws); |
261507a0 | 1146 | } |
261507a0 LZ |
1147 | } |
1148 | ||
1149 | /* | |
e721e49d DS |
1150 | * This finds an available workspace or allocates a new one. |
1151 | * If it's not possible to allocate a new one, waits until there's one. | |
1152 | * Preallocation makes a forward progress guarantees and we do not return | |
1153 | * errors. | |
261507a0 | 1154 | */ |
5907a9bb | 1155 | struct list_head *btrfs_get_workspace(int type, unsigned int level) |
261507a0 | 1156 | { |
5907a9bb | 1157 | struct workspace_manager *wsm; |
261507a0 LZ |
1158 | struct list_head *workspace; |
1159 | int cpus = num_online_cpus(); | |
fe308533 | 1160 | unsigned nofs_flag; |
4e439a0b TT |
1161 | struct list_head *idle_ws; |
1162 | spinlock_t *ws_lock; | |
1163 | atomic_t *total_ws; | |
1164 | wait_queue_head_t *ws_wait; | |
1165 | int *free_ws; | |
1166 | ||
5907a9bb | 1167 | wsm = btrfs_compress_op[type]->workspace_manager; |
92ee5530 DZ |
1168 | idle_ws = &wsm->idle_ws; |
1169 | ws_lock = &wsm->ws_lock; | |
1170 | total_ws = &wsm->total_ws; | |
1171 | ws_wait = &wsm->ws_wait; | |
1172 | free_ws = &wsm->free_ws; | |
261507a0 | 1173 | |
261507a0 | 1174 | again: |
d9187649 BL |
1175 | spin_lock(ws_lock); |
1176 | if (!list_empty(idle_ws)) { | |
1177 | workspace = idle_ws->next; | |
261507a0 | 1178 | list_del(workspace); |
6ac10a6a | 1179 | (*free_ws)--; |
d9187649 | 1180 | spin_unlock(ws_lock); |
261507a0 LZ |
1181 | return workspace; |
1182 | ||
1183 | } | |
6ac10a6a | 1184 | if (atomic_read(total_ws) > cpus) { |
261507a0 LZ |
1185 | DEFINE_WAIT(wait); |
1186 | ||
d9187649 BL |
1187 | spin_unlock(ws_lock); |
1188 | prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); | |
6ac10a6a | 1189 | if (atomic_read(total_ws) > cpus && !*free_ws) |
261507a0 | 1190 | schedule(); |
d9187649 | 1191 | finish_wait(ws_wait, &wait); |
261507a0 LZ |
1192 | goto again; |
1193 | } | |
6ac10a6a | 1194 | atomic_inc(total_ws); |
d9187649 | 1195 | spin_unlock(ws_lock); |
261507a0 | 1196 | |
fe308533 DS |
1197 | /* |
1198 | * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have | |
1199 | * to turn it off here because we might get called from the restricted | |
1200 | * context of btrfs_compress_bio/btrfs_compress_pages | |
1201 | */ | |
1202 | nofs_flag = memalloc_nofs_save(); | |
c778df14 | 1203 | workspace = alloc_workspace(type, level); |
fe308533 DS |
1204 | memalloc_nofs_restore(nofs_flag); |
1205 | ||
261507a0 | 1206 | if (IS_ERR(workspace)) { |
6ac10a6a | 1207 | atomic_dec(total_ws); |
d9187649 | 1208 | wake_up(ws_wait); |
e721e49d DS |
1209 | |
1210 | /* | |
1211 | * Do not return the error but go back to waiting. There's a | |
1212 | * workspace preallocated for each type and the compression | |
1213 | * time is bounded so we get to a workspace eventually. This | |
1214 | * makes our caller's life easier. | |
52356716 DS |
1215 | * |
1216 | * To prevent silent and low-probability deadlocks (when the | |
1217 | * initial preallocation fails), check if there are any | |
1218 | * workspaces at all. | |
e721e49d | 1219 | */ |
52356716 DS |
1220 | if (atomic_read(total_ws) == 0) { |
1221 | static DEFINE_RATELIMIT_STATE(_rs, | |
1222 | /* once per minute */ 60 * HZ, | |
1223 | /* no burst */ 1); | |
1224 | ||
1225 | if (__ratelimit(&_rs)) { | |
ab8d0fc4 | 1226 | pr_warn("BTRFS: no compression workspaces, low memory, retrying\n"); |
52356716 DS |
1227 | } |
1228 | } | |
e721e49d | 1229 | goto again; |
261507a0 LZ |
1230 | } |
1231 | return workspace; | |
1232 | } | |
1233 | ||
7bf49943 | 1234 | static struct list_head *get_workspace(int type, int level) |
929f4baf | 1235 | { |
6a0d1272 | 1236 | switch (type) { |
5907a9bb | 1237 | case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level); |
6a0d1272 | 1238 | case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level); |
5907a9bb | 1239 | case BTRFS_COMPRESS_LZO: return btrfs_get_workspace(type, level); |
6a0d1272 DS |
1240 | case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level); |
1241 | default: | |
1242 | /* | |
1243 | * This can't happen, the type is validated several times | |
1244 | * before we get here. | |
1245 | */ | |
1246 | BUG(); | |
1247 | } | |
929f4baf DZ |
1248 | } |
1249 | ||
261507a0 LZ |
1250 | /* |
1251 | * put a workspace struct back on the list or free it if we have enough | |
1252 | * idle ones sitting around | |
1253 | */ | |
a3bbd2a9 | 1254 | void btrfs_put_workspace(int type, struct list_head *ws) |
261507a0 | 1255 | { |
a3bbd2a9 | 1256 | struct workspace_manager *wsm; |
4e439a0b TT |
1257 | struct list_head *idle_ws; |
1258 | spinlock_t *ws_lock; | |
1259 | atomic_t *total_ws; | |
1260 | wait_queue_head_t *ws_wait; | |
1261 | int *free_ws; | |
1262 | ||
a3bbd2a9 | 1263 | wsm = btrfs_compress_op[type]->workspace_manager; |
92ee5530 DZ |
1264 | idle_ws = &wsm->idle_ws; |
1265 | ws_lock = &wsm->ws_lock; | |
1266 | total_ws = &wsm->total_ws; | |
1267 | ws_wait = &wsm->ws_wait; | |
1268 | free_ws = &wsm->free_ws; | |
d9187649 BL |
1269 | |
1270 | spin_lock(ws_lock); | |
26b28dce | 1271 | if (*free_ws <= num_online_cpus()) { |
929f4baf | 1272 | list_add(ws, idle_ws); |
6ac10a6a | 1273 | (*free_ws)++; |
d9187649 | 1274 | spin_unlock(ws_lock); |
261507a0 LZ |
1275 | goto wake; |
1276 | } | |
d9187649 | 1277 | spin_unlock(ws_lock); |
261507a0 | 1278 | |
1e002351 | 1279 | free_workspace(type, ws); |
6ac10a6a | 1280 | atomic_dec(total_ws); |
261507a0 | 1281 | wake: |
093258e6 | 1282 | cond_wake_up(ws_wait); |
261507a0 LZ |
1283 | } |
1284 | ||
929f4baf DZ |
1285 | static void put_workspace(int type, struct list_head *ws) |
1286 | { | |
bd3a5287 | 1287 | switch (type) { |
a3bbd2a9 DS |
1288 | case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws); |
1289 | case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws); | |
1290 | case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws); | |
bd3a5287 DS |
1291 | case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws); |
1292 | default: | |
1293 | /* | |
1294 | * This can't happen, the type is validated several times | |
1295 | * before we get here. | |
1296 | */ | |
1297 | BUG(); | |
1298 | } | |
929f4baf DZ |
1299 | } |
1300 | ||
adbab642 AJ |
1301 | /* |
1302 | * Adjust @level according to the limits of the compression algorithm or | |
1303 | * fallback to default | |
1304 | */ | |
1305 | static unsigned int btrfs_compress_set_level(int type, unsigned level) | |
1306 | { | |
1307 | const struct btrfs_compress_op *ops = btrfs_compress_op[type]; | |
1308 | ||
1309 | if (level == 0) | |
1310 | level = ops->default_level; | |
1311 | else | |
1312 | level = min(level, ops->max_level); | |
1313 | ||
1314 | return level; | |
1315 | } | |
1316 | ||
261507a0 | 1317 | /* |
38c31464 DS |
1318 | * Given an address space and start and length, compress the bytes into @pages |
1319 | * that are allocated on demand. | |
261507a0 | 1320 | * |
f51d2b59 DS |
1321 | * @type_level is encoded algorithm and level, where level 0 means whatever |
1322 | * default the algorithm chooses and is opaque here; | |
1323 | * - compression algo are 0-3 | |
1324 | * - the level are bits 4-7 | |
1325 | * | |
4d3a800e DS |
1326 | * @out_pages is an in/out parameter, holds maximum number of pages to allocate |
1327 | * and returns number of actually allocated pages | |
261507a0 | 1328 | * |
38c31464 DS |
1329 | * @total_in is used to return the number of bytes actually read. It |
1330 | * may be smaller than the input length if we had to exit early because we | |
261507a0 LZ |
1331 | * ran out of room in the pages array or because we cross the |
1332 | * max_out threshold. | |
1333 | * | |
38c31464 DS |
1334 | * @total_out is an in/out parameter, must be set to the input length and will |
1335 | * be also used to return the total number of compressed bytes | |
261507a0 | 1336 | */ |
f51d2b59 | 1337 | int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, |
38c31464 | 1338 | u64 start, struct page **pages, |
261507a0 LZ |
1339 | unsigned long *out_pages, |
1340 | unsigned long *total_in, | |
e5d74902 | 1341 | unsigned long *total_out) |
261507a0 | 1342 | { |
1972708a | 1343 | int type = btrfs_compress_type(type_level); |
7bf49943 | 1344 | int level = btrfs_compress_level(type_level); |
261507a0 LZ |
1345 | struct list_head *workspace; |
1346 | int ret; | |
1347 | ||
b0c1fe1e | 1348 | level = btrfs_compress_set_level(type, level); |
7bf49943 | 1349 | workspace = get_workspace(type, level); |
1e4eb746 DS |
1350 | ret = compression_compress_pages(type, workspace, mapping, start, pages, |
1351 | out_pages, total_in, total_out); | |
929f4baf | 1352 | put_workspace(type, workspace); |
261507a0 LZ |
1353 | return ret; |
1354 | } | |
1355 | ||
8140dc30 | 1356 | static int btrfs_decompress_bio(struct compressed_bio *cb) |
261507a0 LZ |
1357 | { |
1358 | struct list_head *workspace; | |
1359 | int ret; | |
8140dc30 | 1360 | int type = cb->compress_type; |
261507a0 | 1361 | |
7bf49943 | 1362 | workspace = get_workspace(type, 0); |
4a9e803e | 1363 | ret = compression_decompress_bio(workspace, cb); |
929f4baf | 1364 | put_workspace(type, workspace); |
e1ddce71 | 1365 | |
261507a0 LZ |
1366 | return ret; |
1367 | } | |
1368 | ||
1369 | /* | |
1370 | * a less complex decompression routine. Our compressed data fits in a | |
1371 | * single page, and we want to read a single page out of it. | |
1372 | * start_byte tells us the offset into the compressed data we're interested in | |
1373 | */ | |
1374 | int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, | |
1375 | unsigned long start_byte, size_t srclen, size_t destlen) | |
1376 | { | |
1377 | struct list_head *workspace; | |
1378 | int ret; | |
1379 | ||
7bf49943 | 1380 | workspace = get_workspace(type, 0); |
1e4eb746 DS |
1381 | ret = compression_decompress(type, workspace, data_in, dest_page, |
1382 | start_byte, srclen, destlen); | |
929f4baf | 1383 | put_workspace(type, workspace); |
7bf49943 | 1384 | |
261507a0 LZ |
1385 | return ret; |
1386 | } | |
1387 | ||
1666edab DZ |
1388 | void __init btrfs_init_compress(void) |
1389 | { | |
d5517033 DS |
1390 | btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE); |
1391 | btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB); | |
1392 | btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO); | |
1393 | zstd_init_workspace_manager(); | |
1666edab DZ |
1394 | } |
1395 | ||
e67c718b | 1396 | void __cold btrfs_exit_compress(void) |
261507a0 | 1397 | { |
2510307e DS |
1398 | btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE); |
1399 | btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB); | |
1400 | btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO); | |
1401 | zstd_cleanup_workspace_manager(); | |
261507a0 | 1402 | } |
3a39c18d LZ |
1403 | |
1404 | /* | |
1c3dc173 | 1405 | * Copy decompressed data from working buffer to pages. |
3a39c18d | 1406 | * |
1c3dc173 QW |
1407 | * @buf: The decompressed data buffer |
1408 | * @buf_len: The decompressed data length | |
1409 | * @decompressed: Number of bytes that are already decompressed inside the | |
1410 | * compressed extent | |
1411 | * @cb: The compressed extent descriptor | |
1412 | * @orig_bio: The original bio that the caller wants to read for | |
3a39c18d | 1413 | * |
1c3dc173 QW |
1414 | * An easier to understand graph is like below: |
1415 | * | |
1416 | * |<- orig_bio ->| |<- orig_bio->| | |
1417 | * |<------- full decompressed extent ----->| | |
1418 | * |<----------- @cb range ---->| | |
1419 | * | |<-- @buf_len -->| | |
1420 | * |<--- @decompressed --->| | |
1421 | * | |
1422 | * Note that, @cb can be a subpage of the full decompressed extent, but | |
1423 | * @cb->start always has the same as the orig_file_offset value of the full | |
1424 | * decompressed extent. | |
1425 | * | |
1426 | * When reading compressed extent, we have to read the full compressed extent, | |
1427 | * while @orig_bio may only want part of the range. | |
1428 | * Thus this function will ensure only data covered by @orig_bio will be copied | |
1429 | * to. | |
1430 | * | |
1431 | * Return 0 if we have copied all needed contents for @orig_bio. | |
1432 | * Return >0 if we need continue decompress. | |
3a39c18d | 1433 | */ |
1c3dc173 QW |
1434 | int btrfs_decompress_buf2page(const char *buf, u32 buf_len, |
1435 | struct compressed_bio *cb, u32 decompressed) | |
3a39c18d | 1436 | { |
1c3dc173 QW |
1437 | struct bio *orig_bio = cb->orig_bio; |
1438 | /* Offset inside the full decompressed extent */ | |
1439 | u32 cur_offset; | |
1440 | ||
1441 | cur_offset = decompressed; | |
1442 | /* The main loop to do the copy */ | |
1443 | while (cur_offset < decompressed + buf_len) { | |
1444 | struct bio_vec bvec; | |
1445 | size_t copy_len; | |
1446 | u32 copy_start; | |
1447 | /* Offset inside the full decompressed extent */ | |
1448 | u32 bvec_offset; | |
1449 | ||
1450 | bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter); | |
1451 | /* | |
1452 | * cb->start may underflow, but subtracting that value can still | |
1453 | * give us correct offset inside the full decompressed extent. | |
1454 | */ | |
1455 | bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start; | |
974b1adc | 1456 | |
1c3dc173 QW |
1457 | /* Haven't reached the bvec range, exit */ |
1458 | if (decompressed + buf_len <= bvec_offset) | |
1459 | return 1; | |
3a39c18d | 1460 | |
1c3dc173 QW |
1461 | copy_start = max(cur_offset, bvec_offset); |
1462 | copy_len = min(bvec_offset + bvec.bv_len, | |
1463 | decompressed + buf_len) - copy_start; | |
1464 | ASSERT(copy_len); | |
3a39c18d | 1465 | |
974b1adc | 1466 | /* |
1c3dc173 QW |
1467 | * Extra range check to ensure we didn't go beyond |
1468 | * @buf + @buf_len. | |
974b1adc | 1469 | */ |
1c3dc173 QW |
1470 | ASSERT(copy_start - decompressed < buf_len); |
1471 | memcpy_to_page(bvec.bv_page, bvec.bv_offset, | |
1472 | buf + copy_start - decompressed, copy_len); | |
1473 | flush_dcache_page(bvec.bv_page); | |
1474 | cur_offset += copy_len; | |
3a39c18d | 1475 | |
1c3dc173 QW |
1476 | bio_advance(orig_bio, copy_len); |
1477 | /* Finished the bio */ | |
1478 | if (!orig_bio->bi_iter.bi_size) | |
1479 | return 0; | |
3a39c18d | 1480 | } |
3a39c18d LZ |
1481 | return 1; |
1482 | } | |
c2fcdcdf | 1483 | |
19562430 TT |
1484 | /* |
1485 | * Shannon Entropy calculation | |
1486 | * | |
52042d8e | 1487 | * Pure byte distribution analysis fails to determine compressibility of data. |
19562430 TT |
1488 | * Try calculating entropy to estimate the average minimum number of bits |
1489 | * needed to encode the sampled data. | |
1490 | * | |
1491 | * For convenience, return the percentage of needed bits, instead of amount of | |
1492 | * bits directly. | |
1493 | * | |
1494 | * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy | |
1495 | * and can be compressible with high probability | |
1496 | * | |
1497 | * @ENTROPY_LVL_HIGH - data are not compressible with high probability | |
1498 | * | |
1499 | * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate. | |
1500 | */ | |
1501 | #define ENTROPY_LVL_ACEPTABLE (65) | |
1502 | #define ENTROPY_LVL_HIGH (80) | |
1503 | ||
1504 | /* | |
1505 | * For increasead precision in shannon_entropy calculation, | |
1506 | * let's do pow(n, M) to save more digits after comma: | |
1507 | * | |
1508 | * - maximum int bit length is 64 | |
1509 | * - ilog2(MAX_SAMPLE_SIZE) -> 13 | |
1510 | * - 13 * 4 = 52 < 64 -> M = 4 | |
1511 | * | |
1512 | * So use pow(n, 4). | |
1513 | */ | |
1514 | static inline u32 ilog2_w(u64 n) | |
1515 | { | |
1516 | return ilog2(n * n * n * n); | |
1517 | } | |
1518 | ||
1519 | static u32 shannon_entropy(struct heuristic_ws *ws) | |
1520 | { | |
1521 | const u32 entropy_max = 8 * ilog2_w(2); | |
1522 | u32 entropy_sum = 0; | |
1523 | u32 p, p_base, sz_base; | |
1524 | u32 i; | |
1525 | ||
1526 | sz_base = ilog2_w(ws->sample_size); | |
1527 | for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) { | |
1528 | p = ws->bucket[i].count; | |
1529 | p_base = ilog2_w(p); | |
1530 | entropy_sum += p * (sz_base - p_base); | |
1531 | } | |
1532 | ||
1533 | entropy_sum /= ws->sample_size; | |
1534 | return entropy_sum * 100 / entropy_max; | |
1535 | } | |
1536 | ||
440c840c TT |
1537 | #define RADIX_BASE 4U |
1538 | #define COUNTERS_SIZE (1U << RADIX_BASE) | |
1539 | ||
1540 | static u8 get4bits(u64 num, int shift) { | |
1541 | u8 low4bits; | |
1542 | ||
1543 | num >>= shift; | |
1544 | /* Reverse order */ | |
1545 | low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE); | |
1546 | return low4bits; | |
1547 | } | |
1548 | ||
440c840c TT |
1549 | /* |
1550 | * Use 4 bits as radix base | |
52042d8e | 1551 | * Use 16 u32 counters for calculating new position in buf array |
440c840c TT |
1552 | * |
1553 | * @array - array that will be sorted | |
1554 | * @array_buf - buffer array to store sorting results | |
1555 | * must be equal in size to @array | |
1556 | * @num - array size | |
440c840c | 1557 | */ |
23ae8c63 | 1558 | static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf, |
36243c91 | 1559 | int num) |
858177d3 | 1560 | { |
440c840c TT |
1561 | u64 max_num; |
1562 | u64 buf_num; | |
1563 | u32 counters[COUNTERS_SIZE]; | |
1564 | u32 new_addr; | |
1565 | u32 addr; | |
1566 | int bitlen; | |
1567 | int shift; | |
1568 | int i; | |
858177d3 | 1569 | |
440c840c TT |
1570 | /* |
1571 | * Try avoid useless loop iterations for small numbers stored in big | |
1572 | * counters. Example: 48 33 4 ... in 64bit array | |
1573 | */ | |
23ae8c63 | 1574 | max_num = array[0].count; |
440c840c | 1575 | for (i = 1; i < num; i++) { |
23ae8c63 | 1576 | buf_num = array[i].count; |
440c840c TT |
1577 | if (buf_num > max_num) |
1578 | max_num = buf_num; | |
1579 | } | |
1580 | ||
1581 | buf_num = ilog2(max_num); | |
1582 | bitlen = ALIGN(buf_num, RADIX_BASE * 2); | |
1583 | ||
1584 | shift = 0; | |
1585 | while (shift < bitlen) { | |
1586 | memset(counters, 0, sizeof(counters)); | |
1587 | ||
1588 | for (i = 0; i < num; i++) { | |
23ae8c63 | 1589 | buf_num = array[i].count; |
440c840c TT |
1590 | addr = get4bits(buf_num, shift); |
1591 | counters[addr]++; | |
1592 | } | |
1593 | ||
1594 | for (i = 1; i < COUNTERS_SIZE; i++) | |
1595 | counters[i] += counters[i - 1]; | |
1596 | ||
1597 | for (i = num - 1; i >= 0; i--) { | |
23ae8c63 | 1598 | buf_num = array[i].count; |
440c840c TT |
1599 | addr = get4bits(buf_num, shift); |
1600 | counters[addr]--; | |
1601 | new_addr = counters[addr]; | |
7add17be | 1602 | array_buf[new_addr] = array[i]; |
440c840c TT |
1603 | } |
1604 | ||
1605 | shift += RADIX_BASE; | |
1606 | ||
1607 | /* | |
1608 | * Normal radix expects to move data from a temporary array, to | |
1609 | * the main one. But that requires some CPU time. Avoid that | |
1610 | * by doing another sort iteration to original array instead of | |
1611 | * memcpy() | |
1612 | */ | |
1613 | memset(counters, 0, sizeof(counters)); | |
1614 | ||
1615 | for (i = 0; i < num; i ++) { | |
23ae8c63 | 1616 | buf_num = array_buf[i].count; |
440c840c TT |
1617 | addr = get4bits(buf_num, shift); |
1618 | counters[addr]++; | |
1619 | } | |
1620 | ||
1621 | for (i = 1; i < COUNTERS_SIZE; i++) | |
1622 | counters[i] += counters[i - 1]; | |
1623 | ||
1624 | for (i = num - 1; i >= 0; i--) { | |
23ae8c63 | 1625 | buf_num = array_buf[i].count; |
440c840c TT |
1626 | addr = get4bits(buf_num, shift); |
1627 | counters[addr]--; | |
1628 | new_addr = counters[addr]; | |
7add17be | 1629 | array[new_addr] = array_buf[i]; |
440c840c TT |
1630 | } |
1631 | ||
1632 | shift += RADIX_BASE; | |
1633 | } | |
858177d3 TT |
1634 | } |
1635 | ||
1636 | /* | |
1637 | * Size of the core byte set - how many bytes cover 90% of the sample | |
1638 | * | |
1639 | * There are several types of structured binary data that use nearly all byte | |
1640 | * values. The distribution can be uniform and counts in all buckets will be | |
1641 | * nearly the same (eg. encrypted data). Unlikely to be compressible. | |
1642 | * | |
1643 | * Other possibility is normal (Gaussian) distribution, where the data could | |
1644 | * be potentially compressible, but we have to take a few more steps to decide | |
1645 | * how much. | |
1646 | * | |
1647 | * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently, | |
1648 | * compression algo can easy fix that | |
1649 | * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high | |
1650 | * probability is not compressible | |
1651 | */ | |
1652 | #define BYTE_CORE_SET_LOW (64) | |
1653 | #define BYTE_CORE_SET_HIGH (200) | |
1654 | ||
1655 | static int byte_core_set_size(struct heuristic_ws *ws) | |
1656 | { | |
1657 | u32 i; | |
1658 | u32 coreset_sum = 0; | |
1659 | const u32 core_set_threshold = ws->sample_size * 90 / 100; | |
1660 | struct bucket_item *bucket = ws->bucket; | |
1661 | ||
1662 | /* Sort in reverse order */ | |
36243c91 | 1663 | radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE); |
858177d3 TT |
1664 | |
1665 | for (i = 0; i < BYTE_CORE_SET_LOW; i++) | |
1666 | coreset_sum += bucket[i].count; | |
1667 | ||
1668 | if (coreset_sum > core_set_threshold) | |
1669 | return i; | |
1670 | ||
1671 | for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) { | |
1672 | coreset_sum += bucket[i].count; | |
1673 | if (coreset_sum > core_set_threshold) | |
1674 | break; | |
1675 | } | |
1676 | ||
1677 | return i; | |
1678 | } | |
1679 | ||
a288e92c TT |
1680 | /* |
1681 | * Count byte values in buckets. | |
1682 | * This heuristic can detect textual data (configs, xml, json, html, etc). | |
1683 | * Because in most text-like data byte set is restricted to limited number of | |
1684 | * possible characters, and that restriction in most cases makes data easy to | |
1685 | * compress. | |
1686 | * | |
1687 | * @BYTE_SET_THRESHOLD - consider all data within this byte set size: | |
1688 | * less - compressible | |
1689 | * more - need additional analysis | |
1690 | */ | |
1691 | #define BYTE_SET_THRESHOLD (64) | |
1692 | ||
1693 | static u32 byte_set_size(const struct heuristic_ws *ws) | |
1694 | { | |
1695 | u32 i; | |
1696 | u32 byte_set_size = 0; | |
1697 | ||
1698 | for (i = 0; i < BYTE_SET_THRESHOLD; i++) { | |
1699 | if (ws->bucket[i].count > 0) | |
1700 | byte_set_size++; | |
1701 | } | |
1702 | ||
1703 | /* | |
1704 | * Continue collecting count of byte values in buckets. If the byte | |
1705 | * set size is bigger then the threshold, it's pointless to continue, | |
1706 | * the detection technique would fail for this type of data. | |
1707 | */ | |
1708 | for (; i < BUCKET_SIZE; i++) { | |
1709 | if (ws->bucket[i].count > 0) { | |
1710 | byte_set_size++; | |
1711 | if (byte_set_size > BYTE_SET_THRESHOLD) | |
1712 | return byte_set_size; | |
1713 | } | |
1714 | } | |
1715 | ||
1716 | return byte_set_size; | |
1717 | } | |
1718 | ||
1fe4f6fa TT |
1719 | static bool sample_repeated_patterns(struct heuristic_ws *ws) |
1720 | { | |
1721 | const u32 half_of_sample = ws->sample_size / 2; | |
1722 | const u8 *data = ws->sample; | |
1723 | ||
1724 | return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0; | |
1725 | } | |
1726 | ||
a440d48c TT |
1727 | static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, |
1728 | struct heuristic_ws *ws) | |
1729 | { | |
1730 | struct page *page; | |
1731 | u64 index, index_end; | |
1732 | u32 i, curr_sample_pos; | |
1733 | u8 *in_data; | |
1734 | ||
1735 | /* | |
1736 | * Compression handles the input data by chunks of 128KiB | |
1737 | * (defined by BTRFS_MAX_UNCOMPRESSED) | |
1738 | * | |
1739 | * We do the same for the heuristic and loop over the whole range. | |
1740 | * | |
1741 | * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will | |
1742 | * process no more than BTRFS_MAX_UNCOMPRESSED at a time. | |
1743 | */ | |
1744 | if (end - start > BTRFS_MAX_UNCOMPRESSED) | |
1745 | end = start + BTRFS_MAX_UNCOMPRESSED; | |
1746 | ||
1747 | index = start >> PAGE_SHIFT; | |
1748 | index_end = end >> PAGE_SHIFT; | |
1749 | ||
1750 | /* Don't miss unaligned end */ | |
1751 | if (!IS_ALIGNED(end, PAGE_SIZE)) | |
1752 | index_end++; | |
1753 | ||
1754 | curr_sample_pos = 0; | |
1755 | while (index < index_end) { | |
1756 | page = find_get_page(inode->i_mapping, index); | |
58c1a35c | 1757 | in_data = kmap_local_page(page); |
a440d48c TT |
1758 | /* Handle case where the start is not aligned to PAGE_SIZE */ |
1759 | i = start % PAGE_SIZE; | |
1760 | while (i < PAGE_SIZE - SAMPLING_READ_SIZE) { | |
1761 | /* Don't sample any garbage from the last page */ | |
1762 | if (start > end - SAMPLING_READ_SIZE) | |
1763 | break; | |
1764 | memcpy(&ws->sample[curr_sample_pos], &in_data[i], | |
1765 | SAMPLING_READ_SIZE); | |
1766 | i += SAMPLING_INTERVAL; | |
1767 | start += SAMPLING_INTERVAL; | |
1768 | curr_sample_pos += SAMPLING_READ_SIZE; | |
1769 | } | |
58c1a35c | 1770 | kunmap_local(in_data); |
a440d48c TT |
1771 | put_page(page); |
1772 | ||
1773 | index++; | |
1774 | } | |
1775 | ||
1776 | ws->sample_size = curr_sample_pos; | |
1777 | } | |
1778 | ||
c2fcdcdf TT |
1779 | /* |
1780 | * Compression heuristic. | |
1781 | * | |
1782 | * For now is's a naive and optimistic 'return true', we'll extend the logic to | |
1783 | * quickly (compared to direct compression) detect data characteristics | |
1784 | * (compressible/uncompressible) to avoid wasting CPU time on uncompressible | |
1785 | * data. | |
1786 | * | |
1787 | * The following types of analysis can be performed: | |
1788 | * - detect mostly zero data | |
1789 | * - detect data with low "byte set" size (text, etc) | |
1790 | * - detect data with low/high "core byte" set | |
1791 | * | |
1792 | * Return non-zero if the compression should be done, 0 otherwise. | |
1793 | */ | |
1794 | int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) | |
1795 | { | |
7bf49943 | 1796 | struct list_head *ws_list = get_workspace(0, 0); |
4e439a0b | 1797 | struct heuristic_ws *ws; |
a440d48c TT |
1798 | u32 i; |
1799 | u8 byte; | |
19562430 | 1800 | int ret = 0; |
c2fcdcdf | 1801 | |
4e439a0b TT |
1802 | ws = list_entry(ws_list, struct heuristic_ws, list); |
1803 | ||
a440d48c TT |
1804 | heuristic_collect_sample(inode, start, end, ws); |
1805 | ||
1fe4f6fa TT |
1806 | if (sample_repeated_patterns(ws)) { |
1807 | ret = 1; | |
1808 | goto out; | |
1809 | } | |
1810 | ||
a440d48c TT |
1811 | memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE); |
1812 | ||
1813 | for (i = 0; i < ws->sample_size; i++) { | |
1814 | byte = ws->sample[i]; | |
1815 | ws->bucket[byte].count++; | |
c2fcdcdf TT |
1816 | } |
1817 | ||
a288e92c TT |
1818 | i = byte_set_size(ws); |
1819 | if (i < BYTE_SET_THRESHOLD) { | |
1820 | ret = 2; | |
1821 | goto out; | |
1822 | } | |
1823 | ||
858177d3 TT |
1824 | i = byte_core_set_size(ws); |
1825 | if (i <= BYTE_CORE_SET_LOW) { | |
1826 | ret = 3; | |
1827 | goto out; | |
1828 | } | |
1829 | ||
1830 | if (i >= BYTE_CORE_SET_HIGH) { | |
1831 | ret = 0; | |
1832 | goto out; | |
1833 | } | |
1834 | ||
19562430 TT |
1835 | i = shannon_entropy(ws); |
1836 | if (i <= ENTROPY_LVL_ACEPTABLE) { | |
1837 | ret = 4; | |
1838 | goto out; | |
1839 | } | |
1840 | ||
1841 | /* | |
1842 | * For the levels below ENTROPY_LVL_HIGH, additional analysis would be | |
1843 | * needed to give green light to compression. | |
1844 | * | |
1845 | * For now just assume that compression at that level is not worth the | |
1846 | * resources because: | |
1847 | * | |
1848 | * 1. it is possible to defrag the data later | |
1849 | * | |
1850 | * 2. the data would turn out to be hardly compressible, eg. 150 byte | |
1851 | * values, every bucket has counter at level ~54. The heuristic would | |
1852 | * be confused. This can happen when data have some internal repeated | |
1853 | * patterns like "abbacbbc...". This can be detected by analyzing | |
1854 | * pairs of bytes, which is too costly. | |
1855 | */ | |
1856 | if (i < ENTROPY_LVL_HIGH) { | |
1857 | ret = 5; | |
1858 | goto out; | |
1859 | } else { | |
1860 | ret = 0; | |
1861 | goto out; | |
1862 | } | |
1863 | ||
1fe4f6fa | 1864 | out: |
929f4baf | 1865 | put_workspace(0, ws_list); |
c2fcdcdf TT |
1866 | return ret; |
1867 | } | |
f51d2b59 | 1868 | |
d0ab62ce DZ |
1869 | /* |
1870 | * Convert the compression suffix (eg. after "zlib" starting with ":") to | |
1871 | * level, unrecognized string will set the default level | |
1872 | */ | |
1873 | unsigned int btrfs_compress_str2level(unsigned int type, const char *str) | |
f51d2b59 | 1874 | { |
d0ab62ce DZ |
1875 | unsigned int level = 0; |
1876 | int ret; | |
1877 | ||
1878 | if (!type) | |
f51d2b59 DS |
1879 | return 0; |
1880 | ||
d0ab62ce DZ |
1881 | if (str[0] == ':') { |
1882 | ret = kstrtouint(str + 1, 10, &level); | |
1883 | if (ret) | |
1884 | level = 0; | |
1885 | } | |
1886 | ||
b0c1fe1e DS |
1887 | level = btrfs_compress_set_level(type, level); |
1888 | ||
1889 | return level; | |
1890 | } |