staging: erofs: drop multiref support temporarily
[linux-2.6-block.git] / drivers / staging / erofs / internal.h
CommitLineData
bfb8674d
GX
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * linux/drivers/staging/erofs/internal.h
4 *
5 * Copyright (C) 2017-2018 HUAWEI, Inc.
6 * http://www.huawei.com/
7 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file COPYING in the main directory of the Linux
11 * distribution for more details.
12 */
13#ifndef __INTERNAL_H
14#define __INTERNAL_H
15
16#include <linux/fs.h>
17#include <linux/dcache.h>
18#include <linux/mm.h>
19#include <linux/pagemap.h>
20#include <linux/bio.h>
21#include <linux/buffer_head.h>
22#include <linux/cleancache.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include "erofs_fs.h"
26
27/* redefine pr_fmt "erofs: " */
28#undef pr_fmt
29#define pr_fmt(fmt) "erofs: " fmt
30
31#define errln(x, ...) pr_err(x "\n", ##__VA_ARGS__)
32#define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__)
33#ifdef CONFIG_EROFS_FS_DEBUG
34#define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
35
36#define dbg_might_sleep might_sleep
37#define DBG_BUGON BUG_ON
38#else
39#define debugln(x, ...) ((void)0)
40
41#define dbg_might_sleep() ((void)0)
42#define DBG_BUGON(...) ((void)0)
43#endif
44
9c07b3b3
CY
45enum {
46 FAULT_KMALLOC,
47 FAULT_MAX,
48};
49
f8499d6e 50#ifdef CONFIG_EROFS_FAULT_INJECTION
9c07b3b3
CY
51extern char *erofs_fault_name[FAULT_MAX];
52#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
53
54struct erofs_fault_info {
55 atomic_t inject_ops;
56 unsigned int inject_rate;
57 unsigned int inject_type;
58};
59#endif
60
105d4ad8
GX
61#ifdef CONFIG_EROFS_FS_ZIP_CACHE_BIPOLAR
62#define EROFS_FS_ZIP_CACHE_LVL (2)
63#elif defined(EROFS_FS_ZIP_CACHE_UNIPOLAR)
64#define EROFS_FS_ZIP_CACHE_LVL (1)
65#else
66#define EROFS_FS_ZIP_CACHE_LVL (0)
67#endif
68
69#if (!defined(EROFS_FS_HAS_MANAGED_CACHE) && (EROFS_FS_ZIP_CACHE_LVL > 0))
70#define EROFS_FS_HAS_MANAGED_CACHE
71#endif
72
bfb8674d
GX
73/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
74#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
75
76typedef u64 erofs_nid_t;
77
78struct erofs_sb_info {
2497ee41
GX
79 /* list for all registered superblocks, mainly for shrinker */
80 struct list_head list;
a1581312 81 struct mutex umount_mutex;
2497ee41 82
bfb8674d
GX
83 u32 blocks;
84 u32 meta_blkaddr;
b17500a0
GX
85#ifdef CONFIG_EROFS_FS_XATTR
86 u32 xattr_blkaddr;
87#endif
bfb8674d
GX
88
89 /* inode slot unit size in bit shift */
90 unsigned char islotbits;
02827e17
GX
91#ifdef CONFIG_EROFS_FS_ZIP
92 /* cluster size in bit shift */
93 unsigned char clusterbits;
e7e9a307
GX
94
95 /* the dedicated workstation for compression */
96 struct radix_tree_root workstn_tree;
105d4ad8
GX
97
98#ifdef EROFS_FS_HAS_MANAGED_CACHE
99 struct inode *managed_cache;
100#endif
101
02827e17 102#endif
bfb8674d
GX
103
104 u32 build_time_nsec;
105 u64 build_time;
106
107 /* what we really care is nid, rather than ino.. */
108 erofs_nid_t root_nid;
109 /* used for statfs, f_files - f_favail */
110 u64 inos;
111
112 u8 uuid[16]; /* 128-bit uuid for volume */
113 u8 volume_name[16]; /* volume name */
114 char *dev_name;
115
116 unsigned int mount_opt;
a1581312 117 unsigned int shrinker_run_no;
9c07b3b3
CY
118
119#ifdef CONFIG_EROFS_FAULT_INJECTION
120 struct erofs_fault_info fault_info; /* For fault injection */
121#endif
bfb8674d
GX
122};
123
9c07b3b3
CY
124#ifdef CONFIG_EROFS_FAULT_INJECTION
125#define erofs_show_injection_info(type) \
126 infoln("inject %s in %s of %pS", erofs_fault_name[type], \
127 __func__, __builtin_return_address(0))
128
129static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
130{
131 struct erofs_fault_info *ffi = &sbi->fault_info;
132
133 if (!ffi->inject_rate)
134 return false;
135
136 if (!IS_FAULT_SET(ffi, type))
137 return false;
138
139 atomic_inc(&ffi->inject_ops);
140 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
141 atomic_set(&ffi->inject_ops, 0);
142 return true;
143 }
144 return false;
145}
f8499d6e
CX
146#else
147static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
148{
149 return false;
150}
151
152static inline void erofs_show_injection_info(int type)
153{
154}
9c07b3b3
CY
155#endif
156
157static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
158 size_t size, gfp_t flags)
159{
9c07b3b3
CY
160 if (time_to_inject(sbi, FAULT_KMALLOC)) {
161 erofs_show_injection_info(FAULT_KMALLOC);
162 return NULL;
163 }
9c07b3b3
CY
164 return kmalloc(size, flags);
165}
166
bfb8674d
GX
167#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
168#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
169
b17500a0
GX
170/* Mount flags set via mount options or defaults */
171#define EROFS_MOUNT_XATTR_USER 0x00000010
172#define EROFS_MOUNT_POSIX_ACL 0x00000020
9c07b3b3 173#define EROFS_MOUNT_FAULT_INJECTION 0x00000040
b17500a0 174
bfb8674d
GX
175#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option)
176#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option)
177#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
178
e7e9a307
GX
179#ifdef CONFIG_EROFS_FS_ZIP
180#define erofs_workstn_lock(sbi) xa_lock(&(sbi)->workstn_tree)
181#define erofs_workstn_unlock(sbi) xa_unlock(&(sbi)->workstn_tree)
182
183/* basic unit of the workstation of a super_block */
184struct erofs_workgroup {
185 /* the workgroup index in the workstation */
186 pgoff_t index;
187
188 /* overall workgroup reference count */
189 atomic_t refcount;
190};
191
192#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
193
194static inline bool erofs_workgroup_try_to_freeze(
195 struct erofs_workgroup *grp, int v)
196{
197#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
198 if (v != atomic_cmpxchg(&grp->refcount,
199 v, EROFS_LOCKED_MAGIC))
200 return false;
201 preempt_disable();
202#else
203 preempt_disable();
204 if (atomic_read(&grp->refcount) != v) {
205 preempt_enable();
206 return false;
207 }
208#endif
209 return true;
210}
211
212static inline void erofs_workgroup_unfreeze(
213 struct erofs_workgroup *grp, int v)
214{
215#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
216 atomic_set(&grp->refcount, v);
217#endif
218 preempt_enable();
219}
220
221static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
222{
223 const int locked = (int)EROFS_LOCKED_MAGIC;
224 int o;
225
226repeat:
227 o = atomic_read(&grp->refcount);
228
229 /* spin if it is temporarily locked at the reclaim path */
230 if (unlikely(o == locked)) {
231#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
232 do
233 cpu_relax();
234 while (atomic_read(&grp->refcount) == locked);
235#endif
236 goto repeat;
237 }
238
239 if (unlikely(o <= 0))
240 return -1;
241
242 if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o))
243 goto repeat;
244
245 *ocnt = o;
246 return 0;
247}
248
249#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
250
251extern int erofs_workgroup_put(struct erofs_workgroup *grp);
252
253extern struct erofs_workgroup *erofs_find_workgroup(
254 struct super_block *sb, pgoff_t index, bool *tag);
255
256extern int erofs_register_workgroup(struct super_block *sb,
257 struct erofs_workgroup *grp, bool tag);
258
259extern unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
260 unsigned long nr_shrink, bool cleanup);
261
262static inline void erofs_workstation_cleanup_all(struct super_block *sb)
263{
264 erofs_shrink_workstation(EROFS_SB(sb), ~0UL, true);
265}
266
105d4ad8
GX
267#ifdef EROFS_FS_HAS_MANAGED_CACHE
268#define EROFS_UNALLOCATED_CACHED_PAGE ((void *)0x5F0EF00D)
269
47e541a1 270extern int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
105d4ad8 271 struct erofs_workgroup *egrp);
47e541a1 272extern int erofs_try_to_free_cached_page(struct address_space *mapping,
105d4ad8
GX
273 struct page *page);
274#endif
275
e7e9a307
GX
276#endif
277
bfb8674d
GX
278/* we strictly follow PAGE_SIZE and no buffer head yet */
279#define LOG_BLOCK_SIZE PAGE_SHIFT
280
281#undef LOG_SECTORS_PER_BLOCK
282#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9)
283
284#undef SECTORS_PER_BLOCK
285#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK)
286
287#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE)
288
289#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
290#error erofs cannot be used in this platform
291#endif
292
293#define ROOT_NID(sb) ((sb)->root_nid)
294
0d40d6e3
GX
295#ifdef CONFIG_EROFS_FS_ZIP
296/* hard limit of pages per compressed cluster */
297#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
3883a79a
GX
298
299/* page count of a compressed cluster */
300#define erofs_clusterpages(sbi) ((1 << (sbi)->clusterbits) / PAGE_SIZE)
0d40d6e3
GX
301#endif
302
bfb8674d
GX
303typedef u64 erofs_off_t;
304
305/* data type for filesystem-wide blocks number */
306typedef u32 erofs_blk_t;
307
308#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
309#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
310#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
311
312static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
313{
314 return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
315}
316
317#define inode_set_inited_xattr(inode) (EROFS_V(inode)->flags |= 1)
318#define inode_has_inited_xattr(inode) (EROFS_V(inode)->flags & 1)
319
320struct erofs_vnode {
321 erofs_nid_t nid;
322 unsigned int flags;
323
324 unsigned char data_mapping_mode;
325 /* inline size in bytes */
326 unsigned char inode_isize;
327 unsigned short xattr_isize;
328
329 unsigned xattr_shared_count;
330 unsigned *xattr_shared_xattrs;
331
332 erofs_blk_t raw_blkaddr;
333
334 /* the corresponding vfs inode */
335 struct inode vfs_inode;
336};
337
338#define EROFS_V(ptr) \
339 container_of(ptr, struct erofs_vnode, vfs_inode)
340
341#define __inode_advise(x, bit, bits) \
342 (((x) >> (bit)) & ((1 << (bits)) - 1))
343
344#define __inode_version(advise) \
345 __inode_advise(advise, EROFS_I_VERSION_BIT, \
346 EROFS_I_VERSION_BITS)
347
348#define __inode_data_mapping(advise) \
349 __inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\
350 EROFS_I_DATA_MAPPING_BITS)
351
352static inline unsigned long inode_datablocks(struct inode *inode)
353{
354 /* since i_size cannot be changed */
355 return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
356}
357
358static inline bool is_inode_layout_plain(struct inode *inode)
359{
360 return EROFS_V(inode)->data_mapping_mode == EROFS_INODE_LAYOUT_PLAIN;
361}
362
363static inline bool is_inode_layout_compression(struct inode *inode)
364{
365 return EROFS_V(inode)->data_mapping_mode ==
366 EROFS_INODE_LAYOUT_COMPRESSION;
367}
368
369static inline bool is_inode_layout_inline(struct inode *inode)
370{
371 return EROFS_V(inode)->data_mapping_mode == EROFS_INODE_LAYOUT_INLINE;
372}
373
374extern const struct super_operations erofs_sops;
375extern const struct inode_operations erofs_dir_iops;
376extern const struct file_operations erofs_dir_fops;
377
378extern const struct address_space_operations erofs_raw_access_aops;
3883a79a
GX
379#ifdef CONFIG_EROFS_FS_ZIP
380extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
381#endif
bfb8674d
GX
382
383/*
384 * Logical to physical block mapping, used by erofs_map_blocks()
385 *
386 * Different with other file systems, it is used for 2 access modes:
387 *
388 * 1) RAW access mode:
389 *
390 * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
391 * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
392 *
393 * Note that m_lblk in the RAW access mode refers to the number of
394 * the compressed ondisk block rather than the uncompressed
395 * in-memory block for the compressed file.
396 *
397 * m_pofs equals to m_lofs except for the inline data page.
398 *
399 * 2) Normal access mode:
400 *
401 * If the inode is not compressed, it has no difference with
402 * the RAW access mode. However, if the inode is compressed,
403 * users should pass a valid (m_lblk, m_lofs) pair, and get
404 * the needed m_pblk, m_pofs, m_len to get the compressed data
405 * and the updated m_lblk, m_lofs which indicates the start
406 * of the corresponding uncompressed data in the file.
407 */
408enum {
409 BH_Zipped = BH_PrivateStart,
410};
411
412/* Has a disk mapping */
413#define EROFS_MAP_MAPPED (1 << BH_Mapped)
414/* Located in metadata (could be copied from bd_inode) */
415#define EROFS_MAP_META (1 << BH_Meta)
416/* The extent has been compressed */
417#define EROFS_MAP_ZIPPED (1 << BH_Zipped)
418
419struct erofs_map_blocks {
420 erofs_off_t m_pa, m_la;
421 u64 m_plen, m_llen;
422
423 unsigned int m_flags;
424};
425
426/* Flags used by erofs_map_blocks() */
427#define EROFS_GET_BLOCKS_RAW 0x0001
428
429/* data.c */
8be31270
GX
430static inline struct bio *
431erofs_grab_bio(struct super_block *sb,
432 erofs_blk_t blkaddr, unsigned int nr_pages,
433 bio_end_io_t endio, bool nofail)
55441958 434{
8be31270
GX
435 const gfp_t gfp = GFP_NOIO;
436 struct bio *bio;
437
438 do {
439 if (nr_pages == 1) {
440 bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1);
441 if (unlikely(bio == NULL)) {
442 DBG_BUGON(nofail);
443 return ERR_PTR(-ENOMEM);
55441958 444 }
8be31270
GX
445 break;
446 }
447 bio = bio_alloc(gfp, nr_pages);
448 nr_pages /= 2;
449 } while (unlikely(bio == NULL));
55441958
GX
450
451 bio->bi_end_io = endio;
452 bio_set_dev(bio, sb->s_bdev);
eed276c0 453 bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK;
55441958
GX
454 return bio;
455}
456
457static inline void __submit_bio(struct bio *bio, unsigned op, unsigned op_flags)
458{
459 bio_set_op_attrs(bio, op, op_flags);
460 submit_bio(bio);
461}
462
6e78901a
GX
463#ifndef CONFIG_EROFS_FS_IO_MAX_RETRIES
464#define EROFS_IO_MAX_RETRIES_NOFAIL 0
465#else
466#define EROFS_IO_MAX_RETRIES_NOFAIL CONFIG_EROFS_FS_IO_MAX_RETRIES
467#endif
468
469extern struct page *__erofs_get_meta_page(struct super_block *sb,
470 erofs_blk_t blkaddr, bool prio, bool nofail);
471
472static inline struct page *erofs_get_meta_page(struct super_block *sb,
473 erofs_blk_t blkaddr, bool prio)
474{
475 return __erofs_get_meta_page(sb, blkaddr, prio, false);
476}
477
478static inline struct page *erofs_get_meta_page_nofail(struct super_block *sb,
479 erofs_blk_t blkaddr, bool prio)
480{
481 return __erofs_get_meta_page(sb, blkaddr, prio, true);
482}
483
bfb8674d 484extern int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
02827e17
GX
485extern int erofs_map_blocks_iter(struct inode *, struct erofs_map_blocks *,
486 struct page **, int);
487
488struct erofs_map_blocks_iter {
489 struct erofs_map_blocks map;
490 struct page *mpage;
491};
492
bfb8674d 493
6e78901a 494static inline struct page *
cadf1ccf
GX
495erofs_get_inline_page(struct inode *inode,
496 erofs_blk_t blkaddr)
bfb8674d 497{
cadf1ccf 498 return erofs_get_meta_page(inode->i_sb,
bfb8674d
GX
499 blkaddr, S_ISDIR(inode->i_mode));
500}
501
502/* inode.c */
503extern struct inode *erofs_iget(struct super_block *sb,
504 erofs_nid_t nid, bool dir);
505
506/* dir.c */
507int erofs_namei(struct inode *dir, struct qstr *name,
508 erofs_nid_t *nid, unsigned *d_type);
509
b17500a0 510#ifdef CONFIG_EROFS_FS_XATTR
83a3b2ff 511/* xattr.c */
bfb8674d
GX
512extern const struct xattr_handler *erofs_xattr_handlers[];
513
83a3b2ff 514/* symlink and special inode */
b17500a0
GX
515extern const struct inode_operations erofs_symlink_xattr_iops;
516extern const struct inode_operations erofs_fast_symlink_xattr_iops;
d5beb31b 517extern const struct inode_operations erofs_special_inode_operations;
b17500a0
GX
518#endif
519
bfb8674d
GX
520static inline void set_inode_fast_symlink(struct inode *inode)
521{
b17500a0
GX
522#ifdef CONFIG_EROFS_FS_XATTR
523 inode->i_op = &erofs_fast_symlink_xattr_iops;
524#else
bfb8674d 525 inode->i_op = &simple_symlink_inode_operations;
b17500a0 526#endif
bfb8674d
GX
527}
528
529static inline bool is_inode_fast_symlink(struct inode *inode)
530{
b17500a0
GX
531#ifdef CONFIG_EROFS_FS_XATTR
532 return inode->i_op == &erofs_fast_symlink_xattr_iops;
533#else
bfb8674d 534 return inode->i_op == &simple_symlink_inode_operations;
b17500a0 535#endif
bfb8674d
GX
536}
537
538static inline void *erofs_vmap(struct page **pages, unsigned int count)
539{
540#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
541 int i = 0;
542
543 while (1) {
544 void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
545 /* retry two more times (totally 3 times) */
546 if (addr != NULL || ++i >= 3)
547 return addr;
548 vm_unmap_aliases();
549 }
550 return NULL;
551#else
552 return vmap(pages, count, VM_MAP, PAGE_KERNEL);
553#endif
554}
555
556static inline void erofs_vunmap(const void *mem, unsigned int count)
557{
558#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
559 vm_unmap_ram(mem, count);
560#else
561 vunmap(mem);
562#endif
563}
564
b29e64d8
GX
565/* utils.c */
566extern struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
567
2497ee41
GX
568extern void erofs_register_super(struct super_block *sb);
569extern void erofs_unregister_super(struct super_block *sb);
570
a1581312
GX
571extern unsigned long erofs_shrink_count(struct shrinker *shrink,
572 struct shrink_control *sc);
573extern unsigned long erofs_shrink_scan(struct shrinker *shrink,
574 struct shrink_control *sc);
575
b29e64d8
GX
576#ifndef lru_to_page
577#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
578#endif
579
bfb8674d
GX
580#endif
581