1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/staging/erofs/zmap.c
5 * Copyright (C) 2018-2019 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
10 #include <asm/unaligned.h>
11 #include <trace/events/erofs.h>
13 int z_erofs_fill_inode(struct inode *inode)
15 struct erofs_vnode *const vi = EROFS_V(inode);
16 struct super_block *const sb = inode->i_sb;
18 if (vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
20 vi->z_algorithmtype[0] = 0;
21 vi->z_algorithmtype[1] = 0;
22 vi->z_logical_clusterbits = EROFS_SB(sb)->clusterbits;
23 vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits;
24 vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits;
25 set_bit(EROFS_V_Z_INITED_BIT, &vi->flags);
28 inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops;
32 static int fill_inode_lazy(struct inode *inode)
34 struct erofs_vnode *const vi = EROFS_V(inode);
35 struct super_block *const sb = inode->i_sb;
40 struct z_erofs_map_header *h;
42 if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags))
45 if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_Z_BIT, TASK_KILLABLE))
49 if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags))
52 DBG_BUGON(vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY);
54 pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
56 page = erofs_get_meta_page(sb, erofs_blknr(pos), false);
62 kaddr = kmap_atomic(page);
64 h = kaddr + erofs_blkoff(pos);
65 vi->z_advise = le16_to_cpu(h->h_advise);
66 vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
67 vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
69 if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) {
70 errln("unknown compression format %u for nid %llu, please upgrade kernel",
71 vi->z_algorithmtype[0], vi->nid);
76 vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
77 vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits +
78 ((h->h_clusterbits >> 3) & 3);
80 if (vi->z_physical_clusterbits[0] != LOG_BLOCK_SIZE) {
81 errln("unsupported physical clusterbits %u for nid %llu, please upgrade kernel",
82 vi->z_physical_clusterbits[0], vi->nid);
87 vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits +
88 ((h->h_clusterbits >> 5) & 7);
94 set_bit(EROFS_V_Z_INITED_BIT, &vi->flags);
96 clear_and_wake_up_bit(EROFS_V_BL_Z_BIT, &vi->flags);
100 struct z_erofs_maprecorder {
102 struct erofs_map_blocks *map;
106 /* compression extent information gathered */
113 static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
116 struct super_block *const sb = m->inode->i_sb;
117 struct erofs_map_blocks *const map = m->map;
118 struct page *mpage = map->mpage;
121 if (mpage->index == eblk) {
123 m->kaddr = kmap_atomic(mpage);
128 kunmap_atomic(m->kaddr);
134 mpage = erofs_get_meta_page(sb, eblk, false);
137 return PTR_ERR(mpage);
139 m->kaddr = kmap_atomic(mpage);
145 static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
148 struct inode *const inode = m->inode;
149 struct erofs_vnode *const vi = EROFS_V(inode);
150 const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid);
151 const erofs_off_t pos =
152 Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize +
154 lcn * sizeof(struct z_erofs_vle_decompressed_index);
155 struct z_erofs_vle_decompressed_index *di;
156 unsigned int advise, type;
159 err = z_erofs_reload_indexes(m, erofs_blknr(pos));
164 di = m->kaddr + erofs_blkoff(pos);
166 advise = le16_to_cpu(di->di_advise);
167 type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
168 ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
170 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
171 m->clusterofs = 1 << vi->z_logical_clusterbits;
172 m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
173 m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
175 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
176 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
177 m->clusterofs = le16_to_cpu(di->di_clusterofs);
178 m->pblk = le32_to_cpu(di->di_u.blkaddr);
188 static unsigned int decode_compactedbits(unsigned int lobits,
190 u8 *in, unsigned int pos, u8 *type)
192 const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
193 const unsigned int lo = v & lomask;
195 *type = (v >> lobits) & 3;
199 static int unpack_compacted_index(struct z_erofs_maprecorder *m,
200 unsigned int amortizedshift,
203 struct erofs_vnode *const vi = EROFS_V(m->inode);
204 const unsigned int lclusterbits = vi->z_logical_clusterbits;
205 const unsigned int lomask = (1 << lclusterbits) - 1;
206 unsigned int vcnt, base, lo, encodebits, nblk;
210 if (1 << amortizedshift == 4)
212 else if (1 << amortizedshift == 2 && lclusterbits == 12)
217 encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
218 base = round_down(eofs, vcnt << amortizedshift);
219 in = m->kaddr + base;
221 i = (eofs - base) >> amortizedshift;
223 lo = decode_compactedbits(lclusterbits, lomask,
224 in, encodebits * i, &type);
226 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
227 m->clusterofs = 1 << lclusterbits;
233 * since the last lcluster in the pack is special,
234 * of which lo saves delta[1] rather than delta[0].
235 * Hence, get delta[0] by the previous lcluster indirectly.
237 lo = decode_compactedbits(lclusterbits, lomask,
238 in, encodebits * (i - 1), &type);
239 if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
241 m->delta[0] = lo + 1;
246 /* figout out blkaddr (pblk) for HEAD lclusters */
250 lo = decode_compactedbits(lclusterbits, lomask,
251 in, encodebits * i, &type);
252 if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
258 in += (vcnt << amortizedshift) - sizeof(__le32);
259 m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
263 static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
266 struct inode *const inode = m->inode;
267 struct erofs_vnode *const vi = EROFS_V(inode);
268 const unsigned int lclusterbits = vi->z_logical_clusterbits;
269 const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) +
270 vi->inode_isize + vi->xattr_isize, 8) +
271 sizeof(struct z_erofs_map_header);
272 const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
273 unsigned int compacted_4b_initial, compacted_2b;
274 unsigned int amortizedshift;
278 if (lclusterbits != 12)
285 /* used to align to 32-byte (compacted_2b) alignment */
286 compacted_4b_initial = (32 - ebase % 32) / 4;
287 if (compacted_4b_initial == 32 / 4)
288 compacted_4b_initial = 0;
290 if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B)
291 compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
296 if (lcn < compacted_4b_initial) {
300 pos += compacted_4b_initial * 4;
301 lcn -= compacted_4b_initial;
303 if (lcn < compacted_2b) {
307 pos += compacted_2b * 2;
311 pos += lcn * (1 << amortizedshift);
312 err = z_erofs_reload_indexes(m, erofs_blknr(pos));
315 return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos));
318 static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m,
321 const unsigned int datamode = EROFS_V(m->inode)->datamode;
323 if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
324 return vle_legacy_load_cluster_from_disk(m, lcn);
326 if (datamode == EROFS_INODE_FLAT_COMPRESSION)
327 return compacted_load_cluster_from_disk(m, lcn);
332 static int vle_extent_lookback(struct z_erofs_maprecorder *m,
333 unsigned int lookback_distance)
335 struct erofs_vnode *const vi = EROFS_V(m->inode);
336 struct erofs_map_blocks *const map = m->map;
337 const unsigned int lclusterbits = vi->z_logical_clusterbits;
338 unsigned long lcn = m->lcn;
341 if (lcn < lookback_distance) {
346 /* load extent head logical cluster if needed */
347 lcn -= lookback_distance;
348 err = vle_load_cluster_from_disk(m, lcn);
353 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
354 return vle_extent_lookback(m, m->delta[0]);
355 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
356 map->m_flags &= ~EROFS_MAP_ZIPPED;
358 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
359 map->m_la = (lcn << lclusterbits) | m->clusterofs;
362 errln("unknown type %u at lcn %lu of nid %llu",
363 m->type, lcn, vi->nid);
370 int z_erofs_map_blocks_iter(struct inode *inode,
371 struct erofs_map_blocks *map,
374 struct erofs_vnode *const vi = EROFS_V(inode);
375 struct z_erofs_maprecorder m = {
380 unsigned int lclusterbits, endoff;
381 unsigned long long ofs, end;
383 trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
385 /* when trying to read beyond EOF, leave it unmapped */
386 if (unlikely(map->m_la >= inode->i_size)) {
387 map->m_llen = map->m_la + 1 - inode->i_size;
388 map->m_la = inode->i_size;
393 err = fill_inode_lazy(inode);
397 lclusterbits = vi->z_logical_clusterbits;
399 m.lcn = ofs >> lclusterbits;
400 endoff = ofs & ((1 << lclusterbits) - 1);
402 err = vle_load_cluster_from_disk(&m, m.lcn);
406 map->m_flags = EROFS_MAP_ZIPPED; /* by default, compressed */
407 end = (m.lcn + 1ULL) << lclusterbits;
410 case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
411 if (endoff >= m.clusterofs)
412 map->m_flags &= ~EROFS_MAP_ZIPPED;
414 case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
415 if (endoff >= m.clusterofs) {
416 map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
419 /* m.lcn should be >= 1 if endoff < m.clusterofs */
420 if (unlikely(!m.lcn)) {
421 errln("invalid logical cluster 0 at nid %llu",
426 end = (m.lcn << lclusterbits) | m.clusterofs;
427 map->m_flags |= EROFS_MAP_FULL_MAPPED;
430 case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
431 /* get the correspoinding first chunk */
432 err = vle_extent_lookback(&m, m.delta[0]);
437 errln("unknown type %u at offset %llu of nid %llu",
438 m.type, ofs, vi->nid);
443 map->m_llen = end - map->m_la;
444 map->m_plen = 1 << lclusterbits;
445 map->m_pa = blknr_to_addr(m.pblk);
446 map->m_flags |= EROFS_MAP_MAPPED;
450 kunmap_atomic(m.kaddr);
453 debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
454 __func__, map->m_la, map->m_pa,
455 map->m_llen, map->m_plen, map->m_flags);
457 trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
459 /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
460 DBG_BUGON(err < 0 && err != -ENOMEM);