Merge tag 'csky-for-linus-4.20-fixup-dtb' of https://github.com/c-sky/csky-linux
[linux-block.git] / drivers / staging / erofs / unzip_vle_lz4.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/drivers/staging/erofs/unzip_vle_lz4.c
4  *
5  * Copyright (C) 2018 HUAWEI, Inc.
6  *             http://www.huawei.com/
7  * Created by Gao Xiang <gaoxiang25@huawei.com>
8  *
9  * This file is subject to the terms and conditions of the GNU General Public
10  * License.  See the file COPYING in the main directory of the Linux
11  * distribution for more details.
12  */
13 #include "unzip_vle.h"
14
15 #if Z_EROFS_CLUSTER_MAX_PAGES > Z_EROFS_VLE_INLINE_PAGEVECS
16 #define EROFS_PERCPU_NR_PAGES   Z_EROFS_CLUSTER_MAX_PAGES
17 #else
18 #define EROFS_PERCPU_NR_PAGES   Z_EROFS_VLE_INLINE_PAGEVECS
19 #endif
20
21 static struct {
22         char data[PAGE_SIZE * EROFS_PERCPU_NR_PAGES];
23 } erofs_pcpubuf[NR_CPUS];
24
25 int z_erofs_vle_plain_copy(struct page **compressed_pages,
26                            unsigned int clusterpages,
27                            struct page **pages,
28                            unsigned int nr_pages,
29                            unsigned short pageofs)
30 {
31         unsigned int i, j;
32         void *src = NULL;
33         const unsigned int righthalf = PAGE_SIZE - pageofs;
34         char *percpu_data;
35         bool mirrored[Z_EROFS_CLUSTER_MAX_PAGES] = { 0 };
36
37         preempt_disable();
38         percpu_data = erofs_pcpubuf[smp_processor_id()].data;
39
40         j = 0;
41         for (i = 0; i < nr_pages; j = i++) {
42                 struct page *page = pages[i];
43                 void *dst;
44
45                 if (!page) {
46                         if (src) {
47                                 if (!mirrored[j])
48                                         kunmap_atomic(src);
49                                 src = NULL;
50                         }
51                         continue;
52                 }
53
54                 dst = kmap_atomic(page);
55
56                 for (; j < clusterpages; ++j) {
57                         if (compressed_pages[j] != page)
58                                 continue;
59
60                         BUG_ON(mirrored[j]);
61                         memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
62                         mirrored[j] = true;
63                         break;
64                 }
65
66                 if (i) {
67                         if (!src)
68                                 src = mirrored[i - 1] ?
69                                         percpu_data + (i - 1) * PAGE_SIZE :
70                                         kmap_atomic(compressed_pages[i - 1]);
71
72                         memcpy(dst, src + righthalf, pageofs);
73
74                         if (!mirrored[i - 1])
75                                 kunmap_atomic(src);
76
77                         if (unlikely(i >= clusterpages)) {
78                                 kunmap_atomic(dst);
79                                 break;
80                         }
81                 }
82
83                 if (!righthalf) {
84                         src = NULL;
85                 } else {
86                         src = mirrored[i] ? percpu_data + i * PAGE_SIZE :
87                                 kmap_atomic(compressed_pages[i]);
88
89                         memcpy(dst + pageofs, src, righthalf);
90                 }
91
92                 kunmap_atomic(dst);
93         }
94
95         if (src && !mirrored[j])
96                 kunmap_atomic(src);
97
98         preempt_enable();
99         return 0;
100 }
101
102 extern int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen);
103
104 int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
105                                   unsigned int clusterpages,
106                                   struct page **pages,
107                                   unsigned int outlen,
108                                   unsigned short pageofs,
109                                   void (*endio)(struct page *))
110 {
111         void *vin, *vout;
112         unsigned int nr_pages, i, j;
113         int ret;
114
115         if (outlen + pageofs > EROFS_PERCPU_NR_PAGES * PAGE_SIZE)
116                 return -ENOTSUPP;
117
118         nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
119
120         if (clusterpages == 1)
121                 vin = kmap_atomic(compressed_pages[0]);
122         else
123                 vin = erofs_vmap(compressed_pages, clusterpages);
124
125         preempt_disable();
126         vout = erofs_pcpubuf[smp_processor_id()].data;
127
128         ret = z_erofs_unzip_lz4(vin, vout + pageofs,
129                                 clusterpages * PAGE_SIZE, outlen);
130
131         if (ret >= 0) {
132                 outlen = ret;
133                 ret = 0;
134         }
135
136         for (i = 0; i < nr_pages; ++i) {
137                 j = min((unsigned int)PAGE_SIZE - pageofs, outlen);
138
139                 if (pages[i]) {
140                         if (ret < 0) {
141                                 SetPageError(pages[i]);
142                         } else if (clusterpages == 1 &&
143                                    pages[i] == compressed_pages[0]) {
144                                 memcpy(vin + pageofs, vout + pageofs, j);
145                         } else {
146                                 void *dst = kmap_atomic(pages[i]);
147
148                                 memcpy(dst + pageofs, vout + pageofs, j);
149                                 kunmap_atomic(dst);
150                         }
151                         endio(pages[i]);
152                 }
153                 vout += PAGE_SIZE;
154                 outlen -= j;
155                 pageofs = 0;
156         }
157         preempt_enable();
158
159         if (clusterpages == 1)
160                 kunmap_atomic(vin);
161         else
162                 erofs_vunmap(vin, clusterpages);
163
164         return ret;
165 }
166
167 int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
168                            unsigned int clusterpages,
169                            void *vout,
170                            unsigned int llen,
171                            unsigned short pageofs,
172                            bool overlapped)
173 {
174         void *vin;
175         unsigned int i;
176         int ret;
177
178         if (overlapped) {
179                 preempt_disable();
180                 vin = erofs_pcpubuf[smp_processor_id()].data;
181
182                 for (i = 0; i < clusterpages; ++i) {
183                         void *t = kmap_atomic(compressed_pages[i]);
184
185                         memcpy(vin + PAGE_SIZE * i, t, PAGE_SIZE);
186                         kunmap_atomic(t);
187                 }
188         } else if (clusterpages == 1) {
189                 vin = kmap_atomic(compressed_pages[0]);
190         } else {
191                 vin = erofs_vmap(compressed_pages, clusterpages);
192         }
193
194         ret = z_erofs_unzip_lz4(vin, vout + pageofs,
195                                 clusterpages * PAGE_SIZE, llen);
196         if (ret > 0)
197                 ret = 0;
198
199         if (!overlapped) {
200                 if (clusterpages == 1)
201                         kunmap_atomic(vin);
202                 else
203                         erofs_vunmap(vin, clusterpages);
204         } else {
205                 preempt_enable();
206         }
207         return ret;
208 }