Commit | Line | Data |
---|---|---|
0d40d6e3 GX |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * linux/drivers/staging/erofs/unzip_vle_lz4.c | |
4 | * | |
5 | * Copyright (C) 2018 HUAWEI, Inc. | |
6 | * http://www.huawei.com/ | |
7 | * Created by Gao Xiang <gaoxiang25@huawei.com> | |
8 | * | |
9 | * This file is subject to the terms and conditions of the GNU General Public | |
10 | * License. See the file COPYING in the main directory of the Linux | |
11 | * distribution for more details. | |
12 | */ | |
13 | #include "unzip_vle.h" | |
14 | ||
15 | #if Z_EROFS_CLUSTER_MAX_PAGES > Z_EROFS_VLE_INLINE_PAGEVECS | |
16 | #define EROFS_PERCPU_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES | |
17 | #else | |
18 | #define EROFS_PERCPU_NR_PAGES Z_EROFS_VLE_INLINE_PAGEVECS | |
19 | #endif | |
20 | ||
21 | static struct { | |
22 | char data[PAGE_SIZE * EROFS_PERCPU_NR_PAGES]; | |
23 | } erofs_pcpubuf[NR_CPUS]; | |
24 | ||
25 | int z_erofs_vle_plain_copy(struct page **compressed_pages, | |
e84e1ed8 | 26 | unsigned int clusterpages, |
0d40d6e3 | 27 | struct page **pages, |
e84e1ed8 | 28 | unsigned int nr_pages, |
0d40d6e3 GX |
29 | unsigned short pageofs) |
30 | { | |
e84e1ed8 | 31 | unsigned int i, j; |
0d40d6e3 | 32 | void *src = NULL; |
e84e1ed8 | 33 | const unsigned int righthalf = PAGE_SIZE - pageofs; |
0d40d6e3 GX |
34 | char *percpu_data; |
35 | bool mirrored[Z_EROFS_CLUSTER_MAX_PAGES] = { 0 }; | |
36 | ||
37 | preempt_disable(); | |
38 | percpu_data = erofs_pcpubuf[smp_processor_id()].data; | |
39 | ||
40 | j = 0; | |
41 | for (i = 0; i < nr_pages; j = i++) { | |
42 | struct page *page = pages[i]; | |
43 | void *dst; | |
44 | ||
196ef5f3 PZ |
45 | if (!page) { |
46 | if (src) { | |
0d40d6e3 GX |
47 | if (!mirrored[j]) |
48 | kunmap_atomic(src); | |
49 | src = NULL; | |
50 | } | |
51 | continue; | |
52 | } | |
53 | ||
54 | dst = kmap_atomic(page); | |
55 | ||
56 | for (; j < clusterpages; ++j) { | |
57 | if (compressed_pages[j] != page) | |
58 | continue; | |
59 | ||
60 | BUG_ON(mirrored[j]); | |
61 | memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE); | |
62 | mirrored[j] = true; | |
63 | break; | |
64 | } | |
65 | ||
66 | if (i) { | |
196ef5f3 | 67 | if (!src) |
b566ffc3 PZ |
68 | src = mirrored[i - 1] ? |
69 | percpu_data + (i - 1) * PAGE_SIZE : | |
70 | kmap_atomic(compressed_pages[i - 1]); | |
0d40d6e3 GX |
71 | |
72 | memcpy(dst, src + righthalf, pageofs); | |
73 | ||
b566ffc3 | 74 | if (!mirrored[i - 1]) |
0d40d6e3 GX |
75 | kunmap_atomic(src); |
76 | ||
77 | if (unlikely(i >= clusterpages)) { | |
78 | kunmap_atomic(dst); | |
79 | break; | |
80 | } | |
81 | } | |
82 | ||
f7240346 | 83 | if (!righthalf) { |
0d40d6e3 | 84 | src = NULL; |
f7240346 | 85 | } else { |
0d40d6e3 GX |
86 | src = mirrored[i] ? percpu_data + i * PAGE_SIZE : |
87 | kmap_atomic(compressed_pages[i]); | |
88 | ||
89 | memcpy(dst + pageofs, src, righthalf); | |
90 | } | |
91 | ||
92 | kunmap_atomic(dst); | |
93 | } | |
94 | ||
196ef5f3 | 95 | if (src && !mirrored[j]) |
0d40d6e3 GX |
96 | kunmap_atomic(src); |
97 | ||
98 | preempt_enable(); | |
99 | return 0; | |
100 | } | |
101 | ||
102 | extern int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen); | |
103 | ||
104 | int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages, | |
e84e1ed8 | 105 | unsigned int clusterpages, |
0d40d6e3 | 106 | struct page **pages, |
e84e1ed8 | 107 | unsigned int outlen, |
0d40d6e3 GX |
108 | unsigned short pageofs, |
109 | void (*endio)(struct page *)) | |
110 | { | |
111 | void *vin, *vout; | |
e84e1ed8 | 112 | unsigned int nr_pages, i, j; |
0d40d6e3 GX |
113 | int ret; |
114 | ||
115 | if (outlen + pageofs > EROFS_PERCPU_NR_PAGES * PAGE_SIZE) | |
116 | return -ENOTSUPP; | |
117 | ||
118 | nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE); | |
119 | ||
120 | if (clusterpages == 1) | |
121 | vin = kmap_atomic(compressed_pages[0]); | |
122 | else | |
123 | vin = erofs_vmap(compressed_pages, clusterpages); | |
124 | ||
125 | preempt_disable(); | |
126 | vout = erofs_pcpubuf[smp_processor_id()].data; | |
127 | ||
128 | ret = z_erofs_unzip_lz4(vin, vout + pageofs, | |
e4fccc8c | 129 | clusterpages * PAGE_SIZE, outlen); |
0d40d6e3 GX |
130 | |
131 | if (ret >= 0) { | |
132 | outlen = ret; | |
133 | ret = 0; | |
134 | } | |
135 | ||
136 | for (i = 0; i < nr_pages; ++i) { | |
e84e1ed8 | 137 | j = min((unsigned int)PAGE_SIZE - pageofs, outlen); |
0d40d6e3 | 138 | |
196ef5f3 | 139 | if (pages[i]) { |
f7240346 | 140 | if (ret < 0) { |
0d40d6e3 | 141 | SetPageError(pages[i]); |
f7240346 PZ |
142 | } else if (clusterpages == 1 && |
143 | pages[i] == compressed_pages[0]) { | |
0d40d6e3 | 144 | memcpy(vin + pageofs, vout + pageofs, j); |
f7240346 | 145 | } else { |
0d40d6e3 GX |
146 | void *dst = kmap_atomic(pages[i]); |
147 | ||
148 | memcpy(dst + pageofs, vout + pageofs, j); | |
149 | kunmap_atomic(dst); | |
150 | } | |
151 | endio(pages[i]); | |
152 | } | |
153 | vout += PAGE_SIZE; | |
154 | outlen -= j; | |
155 | pageofs = 0; | |
156 | } | |
157 | preempt_enable(); | |
158 | ||
159 | if (clusterpages == 1) | |
160 | kunmap_atomic(vin); | |
161 | else | |
162 | erofs_vunmap(vin, clusterpages); | |
163 | ||
164 | return ret; | |
165 | } | |
166 | ||
167 | int z_erofs_vle_unzip_vmap(struct page **compressed_pages, | |
e84e1ed8 | 168 | unsigned int clusterpages, |
0d40d6e3 | 169 | void *vout, |
e84e1ed8 | 170 | unsigned int llen, |
0d40d6e3 GX |
171 | unsigned short pageofs, |
172 | bool overlapped) | |
173 | { | |
174 | void *vin; | |
e84e1ed8 | 175 | unsigned int i; |
0d40d6e3 GX |
176 | int ret; |
177 | ||
178 | if (overlapped) { | |
179 | preempt_disable(); | |
180 | vin = erofs_pcpubuf[smp_processor_id()].data; | |
181 | ||
182 | for (i = 0; i < clusterpages; ++i) { | |
183 | void *t = kmap_atomic(compressed_pages[i]); | |
184 | ||
ea0b2d42 | 185 | memcpy(vin + PAGE_SIZE * i, t, PAGE_SIZE); |
0d40d6e3 GX |
186 | kunmap_atomic(t); |
187 | } | |
f7240346 | 188 | } else if (clusterpages == 1) { |
0d40d6e3 | 189 | vin = kmap_atomic(compressed_pages[0]); |
f7240346 | 190 | } else { |
0d40d6e3 GX |
191 | vin = erofs_vmap(compressed_pages, clusterpages); |
192 | } | |
193 | ||
194 | ret = z_erofs_unzip_lz4(vin, vout + pageofs, | |
e4fccc8c | 195 | clusterpages * PAGE_SIZE, llen); |
0d40d6e3 GX |
196 | if (ret > 0) |
197 | ret = 0; | |
198 | ||
199 | if (!overlapped) { | |
200 | if (clusterpages == 1) | |
201 | kunmap_atomic(vin); | |
f7240346 | 202 | else |
0d40d6e3 | 203 | erofs_vunmap(vin, clusterpages); |
f7240346 | 204 | } else { |
0d40d6e3 | 205 | preempt_enable(); |
f7240346 | 206 | } |
0d40d6e3 GX |
207 | return ret; |
208 | } |