1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/unzip_vle_lz4.c
5 * Copyright (C) 2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
13 #include "unzip_vle.h"
15 #if Z_EROFS_CLUSTER_MAX_PAGES > Z_EROFS_VLE_INLINE_PAGEVECS
16 #define EROFS_PERCPU_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES
18 #define EROFS_PERCPU_NR_PAGES Z_EROFS_VLE_INLINE_PAGEVECS
22 char data[PAGE_SIZE * EROFS_PERCPU_NR_PAGES];
23 } erofs_pcpubuf[NR_CPUS];
25 int z_erofs_vle_plain_copy(struct page **compressed_pages,
26 unsigned int clusterpages,
28 unsigned int nr_pages,
29 unsigned short pageofs)
33 const unsigned int righthalf = PAGE_SIZE - pageofs;
35 bool mirrored[Z_EROFS_CLUSTER_MAX_PAGES] = { 0 };
38 percpu_data = erofs_pcpubuf[smp_processor_id()].data;
41 for (i = 0; i < nr_pages; j = i++) {
42 struct page *page = pages[i];
54 dst = kmap_atomic(page);
56 for (; j < clusterpages; ++j) {
57 if (compressed_pages[j] != page)
61 memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
68 src = mirrored[i - 1] ?
69 percpu_data + (i - 1) * PAGE_SIZE :
70 kmap_atomic(compressed_pages[i - 1]);
72 memcpy(dst, src + righthalf, pageofs);
77 if (unlikely(i >= clusterpages)) {
86 src = mirrored[i] ? percpu_data + i * PAGE_SIZE :
87 kmap_atomic(compressed_pages[i]);
89 memcpy(dst + pageofs, src, righthalf);
95 if (src && !mirrored[j])
102 extern int z_erofs_unzip_lz4(void *in, void *out, size_t inlen, size_t outlen);
104 int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
105 unsigned int clusterpages,
108 unsigned short pageofs,
109 void (*endio)(struct page *))
112 unsigned int nr_pages, i, j;
115 if (outlen + pageofs > EROFS_PERCPU_NR_PAGES * PAGE_SIZE)
118 nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
120 if (clusterpages == 1)
121 vin = kmap_atomic(compressed_pages[0]);
123 vin = erofs_vmap(compressed_pages, clusterpages);
126 vout = erofs_pcpubuf[smp_processor_id()].data;
128 ret = z_erofs_unzip_lz4(vin, vout + pageofs,
129 clusterpages * PAGE_SIZE, outlen);
136 for (i = 0; i < nr_pages; ++i) {
137 j = min((unsigned int)PAGE_SIZE - pageofs, outlen);
141 SetPageError(pages[i]);
142 } else if (clusterpages == 1 &&
143 pages[i] == compressed_pages[0]) {
144 memcpy(vin + pageofs, vout + pageofs, j);
146 void *dst = kmap_atomic(pages[i]);
148 memcpy(dst + pageofs, vout + pageofs, j);
159 if (clusterpages == 1)
162 erofs_vunmap(vin, clusterpages);
167 int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
168 unsigned int clusterpages,
171 unsigned short pageofs,
180 vin = erofs_pcpubuf[smp_processor_id()].data;
182 for (i = 0; i < clusterpages; ++i) {
183 void *t = kmap_atomic(compressed_pages[i]);
185 memcpy(vin + PAGE_SIZE * i, t, PAGE_SIZE);
188 } else if (clusterpages == 1) {
189 vin = kmap_atomic(compressed_pages[0]);
191 vin = erofs_vmap(compressed_pages, clusterpages);
194 ret = z_erofs_unzip_lz4(vin, vout + pageofs,
195 clusterpages * PAGE_SIZE, llen);
200 if (clusterpages == 1)
203 erofs_vunmap(vin, clusterpages);