Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdim...
[linux-2.6-block.git] / fs / btrfs / lzo.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2008 Oracle.  All rights reserved.
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/mm.h>
9 #include <linux/init.h>
10 #include <linux/err.h>
11 #include <linux/sched.h>
12 #include <linux/pagemap.h>
13 #include <linux/bio.h>
14 #include <linux/lzo.h>
15 #include <linux/refcount.h>
16 #include "compression.h"
17
18 #define LZO_LEN 4
19
20 struct workspace {
21         void *mem;
22         void *buf;      /* where decompressed data goes */
23         void *cbuf;     /* where compressed data goes */
24         struct list_head list;
25 };
26
27 static void lzo_free_workspace(struct list_head *ws)
28 {
29         struct workspace *workspace = list_entry(ws, struct workspace, list);
30
31         kvfree(workspace->buf);
32         kvfree(workspace->cbuf);
33         kvfree(workspace->mem);
34         kfree(workspace);
35 }
36
37 static struct list_head *lzo_alloc_workspace(void)
38 {
39         struct workspace *workspace;
40
41         workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
42         if (!workspace)
43                 return ERR_PTR(-ENOMEM);
44
45         workspace->mem = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
46         workspace->buf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
47         workspace->cbuf = kvmalloc(lzo1x_worst_compress(PAGE_SIZE), GFP_KERNEL);
48         if (!workspace->mem || !workspace->buf || !workspace->cbuf)
49                 goto fail;
50
51         INIT_LIST_HEAD(&workspace->list);
52
53         return &workspace->list;
54 fail:
55         lzo_free_workspace(&workspace->list);
56         return ERR_PTR(-ENOMEM);
57 }
58
59 static inline void write_compress_length(char *buf, size_t len)
60 {
61         __le32 dlen;
62
63         dlen = cpu_to_le32(len);
64         memcpy(buf, &dlen, LZO_LEN);
65 }
66
67 static inline size_t read_compress_length(const char *buf)
68 {
69         __le32 dlen;
70
71         memcpy(&dlen, buf, LZO_LEN);
72         return le32_to_cpu(dlen);
73 }
74
75 static int lzo_compress_pages(struct list_head *ws,
76                               struct address_space *mapping,
77                               u64 start,
78                               struct page **pages,
79                               unsigned long *out_pages,
80                               unsigned long *total_in,
81                               unsigned long *total_out)
82 {
83         struct workspace *workspace = list_entry(ws, struct workspace, list);
84         int ret = 0;
85         char *data_in;
86         char *cpage_out;
87         int nr_pages = 0;
88         struct page *in_page = NULL;
89         struct page *out_page = NULL;
90         unsigned long bytes_left;
91         unsigned long len = *total_out;
92         unsigned long nr_dest_pages = *out_pages;
93         const unsigned long max_out = nr_dest_pages * PAGE_SIZE;
94         size_t in_len;
95         size_t out_len;
96         char *buf;
97         unsigned long tot_in = 0;
98         unsigned long tot_out = 0;
99         unsigned long pg_bytes_left;
100         unsigned long out_offset;
101         unsigned long bytes;
102
103         *out_pages = 0;
104         *total_out = 0;
105         *total_in = 0;
106
107         in_page = find_get_page(mapping, start >> PAGE_SHIFT);
108         data_in = kmap(in_page);
109
110         /*
111          * store the size of all chunks of compressed data in
112          * the first 4 bytes
113          */
114         out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
115         if (out_page == NULL) {
116                 ret = -ENOMEM;
117                 goto out;
118         }
119         cpage_out = kmap(out_page);
120         out_offset = LZO_LEN;
121         tot_out = LZO_LEN;
122         pages[0] = out_page;
123         nr_pages = 1;
124         pg_bytes_left = PAGE_SIZE - LZO_LEN;
125
126         /* compress at most one page of data each time */
127         in_len = min(len, PAGE_SIZE);
128         while (tot_in < len) {
129                 ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf,
130                                        &out_len, workspace->mem);
131                 if (ret != LZO_E_OK) {
132                         pr_debug("BTRFS: lzo in loop returned %d\n",
133                                ret);
134                         ret = -EIO;
135                         goto out;
136                 }
137
138                 /* store the size of this chunk of compressed data */
139                 write_compress_length(cpage_out + out_offset, out_len);
140                 tot_out += LZO_LEN;
141                 out_offset += LZO_LEN;
142                 pg_bytes_left -= LZO_LEN;
143
144                 tot_in += in_len;
145                 tot_out += out_len;
146
147                 /* copy bytes from the working buffer into the pages */
148                 buf = workspace->cbuf;
149                 while (out_len) {
150                         bytes = min_t(unsigned long, pg_bytes_left, out_len);
151
152                         memcpy(cpage_out + out_offset, buf, bytes);
153
154                         out_len -= bytes;
155                         pg_bytes_left -= bytes;
156                         buf += bytes;
157                         out_offset += bytes;
158
159                         /*
160                          * we need another page for writing out.
161                          *
162                          * Note if there's less than 4 bytes left, we just
163                          * skip to a new page.
164                          */
165                         if ((out_len == 0 && pg_bytes_left < LZO_LEN) ||
166                             pg_bytes_left == 0) {
167                                 if (pg_bytes_left) {
168                                         memset(cpage_out + out_offset, 0,
169                                                pg_bytes_left);
170                                         tot_out += pg_bytes_left;
171                                 }
172
173                                 /* we're done, don't allocate new page */
174                                 if (out_len == 0 && tot_in >= len)
175                                         break;
176
177                                 kunmap(out_page);
178                                 if (nr_pages == nr_dest_pages) {
179                                         out_page = NULL;
180                                         ret = -E2BIG;
181                                         goto out;
182                                 }
183
184                                 out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
185                                 if (out_page == NULL) {
186                                         ret = -ENOMEM;
187                                         goto out;
188                                 }
189                                 cpage_out = kmap(out_page);
190                                 pages[nr_pages++] = out_page;
191
192                                 pg_bytes_left = PAGE_SIZE;
193                                 out_offset = 0;
194                         }
195                 }
196
197                 /* we're making it bigger, give up */
198                 if (tot_in > 8192 && tot_in < tot_out) {
199                         ret = -E2BIG;
200                         goto out;
201                 }
202
203                 /* we're all done */
204                 if (tot_in >= len)
205                         break;
206
207                 if (tot_out > max_out)
208                         break;
209
210                 bytes_left = len - tot_in;
211                 kunmap(in_page);
212                 put_page(in_page);
213
214                 start += PAGE_SIZE;
215                 in_page = find_get_page(mapping, start >> PAGE_SHIFT);
216                 data_in = kmap(in_page);
217                 in_len = min(bytes_left, PAGE_SIZE);
218         }
219
220         if (tot_out >= tot_in) {
221                 ret = -E2BIG;
222                 goto out;
223         }
224
225         /* store the size of all chunks of compressed data */
226         cpage_out = kmap(pages[0]);
227         write_compress_length(cpage_out, tot_out);
228
229         kunmap(pages[0]);
230
231         ret = 0;
232         *total_out = tot_out;
233         *total_in = tot_in;
234 out:
235         *out_pages = nr_pages;
236         if (out_page)
237                 kunmap(out_page);
238
239         if (in_page) {
240                 kunmap(in_page);
241                 put_page(in_page);
242         }
243
244         return ret;
245 }
246
247 static int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
248 {
249         struct workspace *workspace = list_entry(ws, struct workspace, list);
250         int ret = 0, ret2;
251         char *data_in;
252         unsigned long page_in_index = 0;
253         size_t srclen = cb->compressed_len;
254         unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
255         unsigned long buf_start;
256         unsigned long buf_offset = 0;
257         unsigned long bytes;
258         unsigned long working_bytes;
259         size_t in_len;
260         size_t out_len;
261         unsigned long in_offset;
262         unsigned long in_page_bytes_left;
263         unsigned long tot_in;
264         unsigned long tot_out;
265         unsigned long tot_len;
266         char *buf;
267         bool may_late_unmap, need_unmap;
268         struct page **pages_in = cb->compressed_pages;
269         u64 disk_start = cb->start;
270         struct bio *orig_bio = cb->orig_bio;
271
272         data_in = kmap(pages_in[0]);
273         tot_len = read_compress_length(data_in);
274
275         tot_in = LZO_LEN;
276         in_offset = LZO_LEN;
277         tot_len = min_t(size_t, srclen, tot_len);
278         in_page_bytes_left = PAGE_SIZE - LZO_LEN;
279
280         tot_out = 0;
281
282         while (tot_in < tot_len) {
283                 in_len = read_compress_length(data_in + in_offset);
284                 in_page_bytes_left -= LZO_LEN;
285                 in_offset += LZO_LEN;
286                 tot_in += LZO_LEN;
287
288                 tot_in += in_len;
289                 working_bytes = in_len;
290                 may_late_unmap = need_unmap = false;
291
292                 /* fast path: avoid using the working buffer */
293                 if (in_page_bytes_left >= in_len) {
294                         buf = data_in + in_offset;
295                         bytes = in_len;
296                         may_late_unmap = true;
297                         goto cont;
298                 }
299
300                 /* copy bytes from the pages into the working buffer */
301                 buf = workspace->cbuf;
302                 buf_offset = 0;
303                 while (working_bytes) {
304                         bytes = min(working_bytes, in_page_bytes_left);
305
306                         memcpy(buf + buf_offset, data_in + in_offset, bytes);
307                         buf_offset += bytes;
308 cont:
309                         working_bytes -= bytes;
310                         in_page_bytes_left -= bytes;
311                         in_offset += bytes;
312
313                         /* check if we need to pick another page */
314                         if ((working_bytes == 0 && in_page_bytes_left < LZO_LEN)
315                             || in_page_bytes_left == 0) {
316                                 tot_in += in_page_bytes_left;
317
318                                 if (working_bytes == 0 && tot_in >= tot_len)
319                                         break;
320
321                                 if (page_in_index + 1 >= total_pages_in) {
322                                         ret = -EIO;
323                                         goto done;
324                                 }
325
326                                 if (may_late_unmap)
327                                         need_unmap = true;
328                                 else
329                                         kunmap(pages_in[page_in_index]);
330
331                                 data_in = kmap(pages_in[++page_in_index]);
332
333                                 in_page_bytes_left = PAGE_SIZE;
334                                 in_offset = 0;
335                         }
336                 }
337
338                 out_len = lzo1x_worst_compress(PAGE_SIZE);
339                 ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
340                                             &out_len);
341                 if (need_unmap)
342                         kunmap(pages_in[page_in_index - 1]);
343                 if (ret != LZO_E_OK) {
344                         pr_warn("BTRFS: decompress failed\n");
345                         ret = -EIO;
346                         break;
347                 }
348
349                 buf_start = tot_out;
350                 tot_out += out_len;
351
352                 ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
353                                                  tot_out, disk_start, orig_bio);
354                 if (ret2 == 0)
355                         break;
356         }
357 done:
358         kunmap(pages_in[page_in_index]);
359         if (!ret)
360                 zero_fill_bio(orig_bio);
361         return ret;
362 }
363
364 static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
365                           struct page *dest_page,
366                           unsigned long start_byte,
367                           size_t srclen, size_t destlen)
368 {
369         struct workspace *workspace = list_entry(ws, struct workspace, list);
370         size_t in_len;
371         size_t out_len;
372         int ret = 0;
373         char *kaddr;
374         unsigned long bytes;
375
376         BUG_ON(srclen < LZO_LEN);
377
378         data_in += LZO_LEN;
379
380         in_len = read_compress_length(data_in);
381         data_in += LZO_LEN;
382
383         out_len = PAGE_SIZE;
384         ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
385         if (ret != LZO_E_OK) {
386                 pr_warn("BTRFS: decompress failed!\n");
387                 ret = -EIO;
388                 goto out;
389         }
390
391         if (out_len < start_byte) {
392                 ret = -EIO;
393                 goto out;
394         }
395
396         /*
397          * the caller is already checking against PAGE_SIZE, but lets
398          * move this check closer to the memcpy/memset
399          */
400         destlen = min_t(unsigned long, destlen, PAGE_SIZE);
401         bytes = min_t(unsigned long, destlen, out_len - start_byte);
402
403         kaddr = kmap_atomic(dest_page);
404         memcpy(kaddr, workspace->buf + start_byte, bytes);
405
406         /*
407          * btrfs_getblock is doing a zero on the tail of the page too,
408          * but this will cover anything missing from the decompressed
409          * data.
410          */
411         if (bytes < destlen)
412                 memset(kaddr+bytes, 0, destlen-bytes);
413         kunmap_atomic(kaddr);
414 out:
415         return ret;
416 }
417
418 static void lzo_set_level(struct list_head *ws, unsigned int type)
419 {
420 }
421
422 const struct btrfs_compress_op btrfs_lzo_compress = {
423         .alloc_workspace        = lzo_alloc_workspace,
424         .free_workspace         = lzo_free_workspace,
425         .compress_pages         = lzo_compress_pages,
426         .decompress_bio         = lzo_decompress_bio,
427         .decompress             = lzo_decompress,
428         .set_level              = lzo_set_level,
429 };