1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
5 * Based on jffs2 zlib code:
6 * Copyright © 2001-2007 Red Hat, Inc.
7 * Created by David Woodhouse <dwmw2@infradead.org>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/zlib.h>
13 #include <linux/zutil.h>
15 #include <linux/init.h>
16 #include <linux/err.h>
17 #include <linux/sched.h>
18 #include <linux/pagemap.h>
19 #include <linux/bio.h>
20 #include <linux/refcount.h>
21 #include "compression.h"
26 struct list_head list;
30 static struct workspace_manager wsm;
32 struct list_head *zlib_get_workspace(unsigned int level)
34 struct list_head *ws = btrfs_get_workspace(&wsm, level);
35 struct workspace *workspace = list_entry(ws, struct workspace, list);
37 workspace->level = level;
42 void zlib_put_workspace(struct list_head *ws)
44 btrfs_put_workspace(&wsm, ws);
47 void zlib_free_workspace(struct list_head *ws)
49 struct workspace *workspace = list_entry(ws, struct workspace, list);
51 kvfree(workspace->strm.workspace);
52 kfree(workspace->buf);
56 struct list_head *zlib_alloc_workspace(unsigned int level)
58 struct workspace *workspace;
61 workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
63 return ERR_PTR(-ENOMEM);
65 workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
66 zlib_inflate_workspacesize());
67 workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
68 workspace->level = level;
69 workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
70 if (!workspace->strm.workspace || !workspace->buf)
73 INIT_LIST_HEAD(&workspace->list);
75 return &workspace->list;
77 zlib_free_workspace(&workspace->list);
78 return ERR_PTR(-ENOMEM);
81 int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
82 u64 start, struct page **pages, unsigned long *out_pages,
83 unsigned long *total_in, unsigned long *total_out)
85 struct workspace *workspace = list_entry(ws, struct workspace, list);
90 struct page *in_page = NULL;
91 struct page *out_page = NULL;
92 unsigned long bytes_left;
93 unsigned long len = *total_out;
94 unsigned long nr_dest_pages = *out_pages;
95 const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
101 if (Z_OK != zlib_deflateInit(&workspace->strm, workspace->level)) {
102 pr_warn("BTRFS: deflateInit failed\n");
107 workspace->strm.total_in = 0;
108 workspace->strm.total_out = 0;
110 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
111 data_in = kmap(in_page);
113 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
114 if (out_page == NULL) {
118 cpage_out = kmap(out_page);
122 workspace->strm.next_in = data_in;
123 workspace->strm.next_out = cpage_out;
124 workspace->strm.avail_out = PAGE_SIZE;
125 workspace->strm.avail_in = min(len, PAGE_SIZE);
127 while (workspace->strm.total_in < len) {
128 ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH);
130 pr_debug("BTRFS: deflate in loop returned %d\n",
132 zlib_deflateEnd(&workspace->strm);
137 /* we're making it bigger, give up */
138 if (workspace->strm.total_in > 8192 &&
139 workspace->strm.total_in <
140 workspace->strm.total_out) {
144 /* we need another page for writing out. Test this
145 * before the total_in so we will pull in a new page for
146 * the stream end if required
148 if (workspace->strm.avail_out == 0) {
150 if (nr_pages == nr_dest_pages) {
155 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
156 if (out_page == NULL) {
160 cpage_out = kmap(out_page);
161 pages[nr_pages] = out_page;
163 workspace->strm.avail_out = PAGE_SIZE;
164 workspace->strm.next_out = cpage_out;
167 if (workspace->strm.total_in >= len)
170 /* we've read in a full page, get a new one */
171 if (workspace->strm.avail_in == 0) {
172 if (workspace->strm.total_out > max_out)
175 bytes_left = len - workspace->strm.total_in;
180 in_page = find_get_page(mapping,
181 start >> PAGE_SHIFT);
182 data_in = kmap(in_page);
183 workspace->strm.avail_in = min(bytes_left,
185 workspace->strm.next_in = data_in;
188 workspace->strm.avail_in = 0;
189 ret = zlib_deflate(&workspace->strm, Z_FINISH);
190 zlib_deflateEnd(&workspace->strm);
192 if (ret != Z_STREAM_END) {
197 if (workspace->strm.total_out >= workspace->strm.total_in) {
203 *total_out = workspace->strm.total_out;
204 *total_in = workspace->strm.total_in;
206 *out_pages = nr_pages;
217 int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
219 struct workspace *workspace = list_entry(ws, struct workspace, list);
221 int wbits = MAX_WBITS;
223 size_t total_out = 0;
224 unsigned long page_in_index = 0;
225 size_t srclen = cb->compressed_len;
226 unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
227 unsigned long buf_start;
228 struct page **pages_in = cb->compressed_pages;
229 u64 disk_start = cb->start;
230 struct bio *orig_bio = cb->orig_bio;
232 data_in = kmap(pages_in[page_in_index]);
233 workspace->strm.next_in = data_in;
234 workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
235 workspace->strm.total_in = 0;
237 workspace->strm.total_out = 0;
238 workspace->strm.next_out = workspace->buf;
239 workspace->strm.avail_out = PAGE_SIZE;
241 /* If it's deflate, and it's got no preset dictionary, then
242 we can tell zlib to skip the adler32 check. */
243 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
244 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
245 !(((data_in[0]<<8) + data_in[1]) % 31)) {
247 wbits = -((data_in[0] >> 4) + 8);
248 workspace->strm.next_in += 2;
249 workspace->strm.avail_in -= 2;
252 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
253 pr_warn("BTRFS: inflateInit failed\n");
254 kunmap(pages_in[page_in_index]);
257 while (workspace->strm.total_in < srclen) {
258 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
259 if (ret != Z_OK && ret != Z_STREAM_END)
262 buf_start = total_out;
263 total_out = workspace->strm.total_out;
265 /* we didn't make progress in this inflate call, we're done */
266 if (buf_start == total_out)
269 ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
270 total_out, disk_start,
277 workspace->strm.next_out = workspace->buf;
278 workspace->strm.avail_out = PAGE_SIZE;
280 if (workspace->strm.avail_in == 0) {
282 kunmap(pages_in[page_in_index]);
284 if (page_in_index >= total_pages_in) {
288 data_in = kmap(pages_in[page_in_index]);
289 workspace->strm.next_in = data_in;
290 tmp = srclen - workspace->strm.total_in;
291 workspace->strm.avail_in = min(tmp,
295 if (ret != Z_STREAM_END)
300 zlib_inflateEnd(&workspace->strm);
302 kunmap(pages_in[page_in_index]);
304 zero_fill_bio(orig_bio);
308 int zlib_decompress(struct list_head *ws, unsigned char *data_in,
309 struct page *dest_page, unsigned long start_byte, size_t srclen,
312 struct workspace *workspace = list_entry(ws, struct workspace, list);
314 int wbits = MAX_WBITS;
315 unsigned long bytes_left;
316 unsigned long total_out = 0;
317 unsigned long pg_offset = 0;
320 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
321 bytes_left = destlen;
323 workspace->strm.next_in = data_in;
324 workspace->strm.avail_in = srclen;
325 workspace->strm.total_in = 0;
327 workspace->strm.next_out = workspace->buf;
328 workspace->strm.avail_out = PAGE_SIZE;
329 workspace->strm.total_out = 0;
330 /* If it's deflate, and it's got no preset dictionary, then
331 we can tell zlib to skip the adler32 check. */
332 if (srclen > 2 && !(data_in[1] & PRESET_DICT) &&
333 ((data_in[0] & 0x0f) == Z_DEFLATED) &&
334 !(((data_in[0]<<8) + data_in[1]) % 31)) {
336 wbits = -((data_in[0] >> 4) + 8);
337 workspace->strm.next_in += 2;
338 workspace->strm.avail_in -= 2;
341 if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
342 pr_warn("BTRFS: inflateInit failed\n");
346 while (bytes_left > 0) {
347 unsigned long buf_start;
348 unsigned long buf_offset;
351 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
352 if (ret != Z_OK && ret != Z_STREAM_END)
355 buf_start = total_out;
356 total_out = workspace->strm.total_out;
358 if (total_out == buf_start) {
363 if (total_out <= start_byte)
366 if (total_out > start_byte && buf_start < start_byte)
367 buf_offset = start_byte - buf_start;
371 bytes = min(PAGE_SIZE - pg_offset,
372 PAGE_SIZE - buf_offset);
373 bytes = min(bytes, bytes_left);
375 kaddr = kmap_atomic(dest_page);
376 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
377 kunmap_atomic(kaddr);
382 workspace->strm.next_out = workspace->buf;
383 workspace->strm.avail_out = PAGE_SIZE;
386 if (ret != Z_STREAM_END && bytes_left != 0)
391 zlib_inflateEnd(&workspace->strm);
394 * this should only happen if zlib returned fewer bytes than we
395 * expected. btrfs_get_block is responsible for zeroing from the
396 * end of the inline extent (destlen) to the end of the page
398 if (pg_offset < destlen) {
399 kaddr = kmap_atomic(dest_page);
400 memset(kaddr + pg_offset, 0, destlen - pg_offset);
401 kunmap_atomic(kaddr);
406 const struct btrfs_compress_op btrfs_zlib_compress = {
407 .workspace_manager = &wsm,
408 .get_workspace = zlib_get_workspace,
409 .put_workspace = zlib_put_workspace,
410 .alloc_workspace = zlib_alloc_workspace,
411 .free_workspace = zlib_free_workspace,
413 .default_level = BTRFS_ZLIB_DEFAULT_LEVEL,