1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
7 #include <linux/module.h>
10 #ifndef LZ4_DISTANCE_MAX /* history window size */
11 #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
14 #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
15 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
16 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
19 int z_erofs_load_lz4_config(struct super_block *sb,
20 struct erofs_super_block *dsb,
21 struct z_erofs_lz4_cfgs *lz4, int size)
23 struct erofs_sb_info *sbi = EROFS_SB(sb);
27 if (size < sizeof(struct z_erofs_lz4_cfgs)) {
28 erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
31 distance = le16_to_cpu(lz4->max_distance);
33 sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
34 if (!sbi->lz4.max_pclusterblks) {
35 sbi->lz4.max_pclusterblks = 1; /* reserved case */
36 } else if (sbi->lz4.max_pclusterblks >
37 Z_EROFS_PCLUSTER_MAX_SIZE / EROFS_BLKSIZ) {
38 erofs_err(sb, "too large lz4 pclusterblks %u",
39 sbi->lz4.max_pclusterblks);
41 } else if (sbi->lz4.max_pclusterblks >= 2) {
42 erofs_info(sb, "EXPERIMENTAL big pcluster feature in use. Use at your own risk!");
45 distance = le16_to_cpu(dsb->u1.lz4_max_distance);
46 sbi->lz4.max_pclusterblks = 1;
49 sbi->lz4.max_distance_pages = distance ?
50 DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
51 LZ4_MAX_DISTANCE_PAGES;
52 return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
56 * Fill all gaps with bounce pages if it's a sparse page list. Also check if
57 * all physical pages are consecutive, which can be seen for moderate CR.
59 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
60 struct page **pagepool)
62 const unsigned int nr =
63 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
64 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
65 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
66 BITS_PER_LONG)] = { 0 };
67 unsigned int lz4_max_distance_pages =
68 EROFS_SB(rq->sb)->lz4.max_distance_pages;
70 unsigned int i, j, top;
73 for (i = j = 0; i < nr; ++i, ++j) {
74 struct page *const page = rq->out[i];
77 if (j >= lz4_max_distance_pages)
80 /* 'valid' bounced can only be tested after a complete round */
81 if (test_bit(j, bounced)) {
82 DBG_BUGON(i < lz4_max_distance_pages);
83 DBG_BUGON(top >= lz4_max_distance_pages);
84 availables[top++] = rq->out[i - lz4_max_distance_pages];
88 __clear_bit(j, bounced);
90 if (kaddr + PAGE_SIZE == page_address(page))
95 kaddr = page_address(page);
100 __set_bit(j, bounced);
103 victim = availables[--top];
106 victim = erofs_allocpage(pagepool,
107 GFP_KERNEL | __GFP_NOFAIL);
108 set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
112 return kaddr ? 1 : 0;
115 static void *z_erofs_lz4_handle_inplace_io(struct z_erofs_decompress_req *rq,
116 void *inpage, unsigned int *inputmargin, int *maptype,
117 bool support_0padding)
119 unsigned int nrpages_in, nrpages_out;
120 unsigned int ofull, oend, inputsize, total, i, j;
124 inputsize = rq->inputsize;
125 nrpages_in = PAGE_ALIGN(inputsize) >> PAGE_SHIFT;
126 oend = rq->pageofs_out + rq->outputsize;
127 ofull = PAGE_ALIGN(oend);
128 nrpages_out = ofull >> PAGE_SHIFT;
130 if (rq->inplace_io) {
131 if (rq->partial_decoding || !support_0padding ||
132 ofull - oend < LZ4_DECOMPRESS_INPLACE_MARGIN(inputsize))
135 for (i = 0; i < nrpages_in; ++i) {
136 DBG_BUGON(rq->in[i] == NULL);
137 for (j = 0; j < nrpages_out - nrpages_in + i; ++j)
138 if (rq->out[j] == rq->in[i])
143 if (nrpages_in <= 1) {
147 kunmap_atomic(inpage);
149 src = erofs_vm_map_ram(rq->in, nrpages_in);
151 return ERR_PTR(-ENOMEM);
156 /* Or copy compressed data which can be overlapped to per-CPU buffer */
158 src = erofs_get_pcpubuf(nrpages_in);
161 kunmap_atomic(inpage);
162 return ERR_PTR(-EFAULT);
166 total = rq->inputsize;
168 unsigned int page_copycnt =
169 min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
172 inpage = kmap_atomic(*in);
173 memcpy(tmp, inpage + *inputmargin, page_copycnt);
174 kunmap_atomic(inpage);
177 total -= page_copycnt;
185 static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq,
188 unsigned int inputmargin;
190 bool support_0padding;
193 DBG_BUGON(*rq->in == NULL);
194 headpage = kmap_atomic(*rq->in);
196 support_0padding = false;
198 /* decompression inplace is only safe when 0padding is enabled */
199 if (erofs_sb_has_lz4_0padding(EROFS_SB(rq->sb))) {
200 support_0padding = true;
202 while (!headpage[inputmargin & ~PAGE_MASK])
203 if (!(++inputmargin & ~PAGE_MASK))
206 if (inputmargin >= rq->inputsize) {
207 kunmap_atomic(headpage);
212 rq->inputsize -= inputmargin;
213 src = z_erofs_lz4_handle_inplace_io(rq, headpage, &inputmargin,
214 &maptype, support_0padding);
218 /* legacy format could compress extra data in a pcluster. */
219 if (rq->partial_decoding || !support_0padding)
220 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
221 rq->inputsize, rq->outputsize, rq->outputsize);
223 ret = LZ4_decompress_safe(src + inputmargin, out,
224 rq->inputsize, rq->outputsize);
226 if (ret != rq->outputsize) {
227 erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
228 ret, rq->inputsize, inputmargin, rq->outputsize);
231 print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
232 16, 1, src + inputmargin, rq->inputsize, true);
233 print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
234 16, 1, out, rq->outputsize, true);
237 memset(out + ret, 0, rq->outputsize - ret);
245 } else if (maptype == 1) {
246 vm_unmap_ram(src, PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT);
247 } else if (maptype == 2) {
248 erofs_put_pcpubuf(src);
256 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
257 struct page **pagepool)
259 const unsigned int nrpages_out =
260 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
261 unsigned int dst_maptype;
265 /* one optimized fast path only for non bigpcluster cases yet */
266 if (rq->inputsize <= PAGE_SIZE && nrpages_out == 1 && !rq->inplace_io) {
267 DBG_BUGON(!*rq->out);
268 dst = kmap_atomic(*rq->out);
273 /* general decoding path which can be used for all cases */
274 ret = z_erofs_lz4_prepare_dstpages(rq, pagepool);
278 dst = page_address(*rq->out);
283 dst = erofs_vm_map_ram(rq->out, nrpages_out);
289 ret = z_erofs_lz4_decompress_mem(rq, dst + rq->pageofs_out);
293 else if (dst_maptype == 2)
294 vm_unmap_ram(dst, nrpages_out);
298 static int z_erofs_shifted_transform(struct z_erofs_decompress_req *rq,
299 struct page **pagepool)
301 const unsigned int nrpages_out =
302 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
303 const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
304 unsigned char *src, *dst;
306 if (nrpages_out > 2) {
311 if (rq->out[0] == *rq->in) {
312 DBG_BUGON(nrpages_out != 1);
316 src = kmap_atomic(*rq->in);
318 dst = kmap_atomic(rq->out[0]);
319 memcpy(dst + rq->pageofs_out, src, righthalf);
323 if (nrpages_out == 2) {
324 DBG_BUGON(!rq->out[1]);
325 if (rq->out[1] == *rq->in) {
326 memmove(src, src + righthalf, rq->pageofs_out);
328 dst = kmap_atomic(rq->out[1]);
329 memcpy(dst, src + righthalf, rq->pageofs_out);
337 static struct z_erofs_decompressor decompressors[] = {
338 [Z_EROFS_COMPRESSION_SHIFTED] = {
339 .decompress = z_erofs_shifted_transform,
342 [Z_EROFS_COMPRESSION_LZ4] = {
343 .decompress = z_erofs_lz4_decompress,
346 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
347 [Z_EROFS_COMPRESSION_LZMA] = {
348 .decompress = z_erofs_lzma_decompress,
354 int z_erofs_decompress(struct z_erofs_decompress_req *rq,
355 struct page **pagepool)
357 return decompressors[rq->alg].decompress(rq, pagepool);