Merge branches 'acpi-bus' and 'acpi-video'
[linux-block.git] / fs / erofs / super.c
CommitLineData
29b24f6c 1// SPDX-License-Identifier: GPL-2.0-only
ba2b77a8 2/*
ba2b77a8 3 * Copyright (C) 2017-2018 HUAWEI, Inc.
592e7cd0 4 * https://www.huawei.com/
2b5379f7 5 * Copyright (C) 2021, Alibaba Cloud
ba2b77a8
GX
6 */
7#include <linux/module.h>
ba2b77a8
GX
8#include <linux/statfs.h>
9#include <linux/parser.h>
b17500a0 10#include <linux/seq_file.h>
b858a484 11#include <linux/crc32c.h>
f57a3fe4
CY
12#include <linux/fs_context.h>
13#include <linux/fs_parser.h>
06252e9c 14#include <linux/dax.h>
3e917cc3 15#include <linux/exportfs.h>
6af7b483 16#include "xattr.h"
ba2b77a8 17
13f06f48
CY
18#define CREATE_TRACE_POINTS
19#include <trace/events/erofs.h>
20
ba2b77a8 21static struct kmem_cache *erofs_inode_cachep __read_mostly;
2736e8ee 22struct file_system_type erofs_fs_type;
ba2b77a8 23
4f761fa2
GX
24void _erofs_err(struct super_block *sb, const char *function,
25 const char *fmt, ...)
26{
27 struct va_format vaf;
28 va_list args;
29
30 va_start(args, fmt);
31
32 vaf.fmt = fmt;
33 vaf.va = &args;
34
35 pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf);
36 va_end(args);
37}
38
39void _erofs_info(struct super_block *sb, const char *function,
40 const char *fmt, ...)
41{
42 struct va_format vaf;
43 va_list args;
44
45 va_start(args, fmt);
46
47 vaf.fmt = fmt;
48 vaf.va = &args;
49
50 pr_info("(device %s): %pV", sb->s_id, &vaf);
51 va_end(args);
52}
53
b858a484
PS
54static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
55{
3acea5fc 56 size_t len = 1 << EROFS_SB(sb)->blkszbits;
b858a484
PS
57 struct erofs_super_block *dsb;
58 u32 expected_crc, crc;
59
3acea5fc
JX
60 if (len > EROFS_SUPER_OFFSET)
61 len -= EROFS_SUPER_OFFSET;
62
63 dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL);
b858a484
PS
64 if (!dsb)
65 return -ENOMEM;
66
67 expected_crc = le32_to_cpu(dsb->checksum);
68 dsb->checksum = 0;
69 /* to allow for x86 boot sectors and other oddities. */
3acea5fc 70 crc = crc32c(~0, dsb, len);
b858a484
PS
71 kfree(dsb);
72
73 if (crc != expected_crc) {
74 erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
75 crc, expected_crc);
76 return -EBADMSG;
77 }
78 return 0;
79}
80
99634bf3 81static void erofs_inode_init_once(void *ptr)
ba2b77a8 82{
a5876e24 83 struct erofs_inode *vi = ptr;
ba2b77a8
GX
84
85 inode_init_once(&vi->vfs_inode);
86}
87
99634bf3 88static struct inode *erofs_alloc_inode(struct super_block *sb)
ba2b77a8 89{
a5876e24 90 struct erofs_inode *vi =
fd60b288 91 alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL);
ba2b77a8 92
e2ff9f15 93 if (!vi)
ba2b77a8
GX
94 return NULL;
95
96 /* zero out everything except vfs_inode */
a5876e24 97 memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
ba2b77a8
GX
98 return &vi->vfs_inode;
99}
100
99634bf3 101static void erofs_free_inode(struct inode *inode)
ba2b77a8 102{
a5876e24 103 struct erofs_inode *vi = EROFS_I(inode);
ba2b77a8 104
a2c75c81
GX
105 /* be careful of RCU symlink path */
106 if (inode->i_op == &erofs_fast_symlink_iops)
ba2b77a8 107 kfree(inode->i_link);
ba2b77a8
GX
108 kfree(vi->xattr_shared_xattrs);
109
110 kmem_cache_free(erofs_inode_cachep, vi);
111}
112
5efe5137 113static bool check_layout_compatibility(struct super_block *sb,
0259f209 114 struct erofs_super_block *dsb)
5efe5137 115{
0259f209 116 const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
5efe5137 117
426a9308 118 EROFS_SB(sb)->feature_incompat = feature;
5efe5137
GX
119
120 /* check if current kernel meets all mandatory requirements */
426a9308 121 if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
4f761fa2
GX
122 erofs_err(sb,
123 "unidentified incompatible feature %x, please upgrade kernel version",
124 feature & ~EROFS_ALL_FEATURE_INCOMPAT);
5efe5137
GX
125 return false;
126 }
127 return true;
128}
129
14373711 130/* read variable-sized metadata, offset will be aligned by 4-byte */
9e382914
JX
131void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
132 erofs_off_t *offset, int *lengthp)
14373711 133{
14373711
GX
134 u8 *buffer, *ptr;
135 int len, i, cnt;
14373711
GX
136
137 *offset = round_up(*offset, 4);
eb2c5e41 138 ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
2b5379f7
GX
139 if (IS_ERR(ptr))
140 return ptr;
14373711 141
3acea5fc 142 len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]);
14373711
GX
143 if (!len)
144 len = U16_MAX + 1;
145 buffer = kmalloc(len, GFP_KERNEL);
2b5379f7
GX
146 if (!buffer)
147 return ERR_PTR(-ENOMEM);
14373711
GX
148 *offset += sizeof(__le16);
149 *lengthp = len;
150
151 for (i = 0; i < len; i += cnt) {
3acea5fc
JX
152 cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
153 len - i);
eb2c5e41 154 ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
2b5379f7
GX
155 if (IS_ERR(ptr)) {
156 kfree(buffer);
157 return ptr;
14373711 158 }
3acea5fc 159 memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt);
14373711
GX
160 *offset += cnt;
161 }
14373711 162 return buffer;
14373711
GX
163}
164
9e382914 165#ifdef CONFIG_EROFS_FS_ZIP
14373711
GX
166static int erofs_load_compr_cfgs(struct super_block *sb,
167 struct erofs_super_block *dsb)
168{
2b5379f7
GX
169 struct erofs_sb_info *sbi = EROFS_SB(sb);
170 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
14373711
GX
171 unsigned int algs, alg;
172 erofs_off_t offset;
2b5379f7 173 int size, ret = 0;
14373711 174
14373711 175 sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
14373711
GX
176 if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
177 erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
178 sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
179 return -EINVAL;
180 }
181
eb2c5e41 182 erofs_init_metabuf(&buf, sb);
14373711 183 offset = EROFS_SUPER_OFFSET + sbi->sb_size;
14373711 184 alg = 0;
14373711
GX
185 for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
186 void *data;
187
188 if (!(algs & 1))
189 continue;
190
2b5379f7 191 data = erofs_read_metadata(sb, &buf, &offset, &size);
14373711
GX
192 if (IS_ERR(data)) {
193 ret = PTR_ERR(data);
2b5379f7 194 break;
14373711
GX
195 }
196
197 switch (alg) {
198 case Z_EROFS_COMPRESSION_LZ4:
199 ret = z_erofs_load_lz4_config(sb, dsb, data, size);
200 break;
622ceadd
GX
201 case Z_EROFS_COMPRESSION_LZMA:
202 ret = z_erofs_load_lzma_config(sb, dsb, data, size);
203 break;
14373711
GX
204 default:
205 DBG_BUGON(1);
206 ret = -EFAULT;
207 }
208 kfree(data);
209 if (ret)
2b5379f7 210 break;
14373711 211 }
2b5379f7 212 erofs_put_metabuf(&buf);
14373711
GX
213 return ret;
214}
215#else
216static int erofs_load_compr_cfgs(struct super_block *sb,
217 struct erofs_super_block *dsb)
218{
219 if (dsb->u1.available_compr_algs) {
220 erofs_err(sb, "try to load compressed fs when compression is disabled");
221 return -EINVAL;
222 }
223 return 0;
224}
225#endif
226
ba73eadd
JX
227static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
228 struct erofs_device_info *dif, erofs_off_t *pos)
229{
230 struct erofs_sb_info *sbi = EROFS_SB(sb);
e1de2da0 231 struct erofs_fscache *fscache;
ba73eadd
JX
232 struct erofs_deviceslot *dis;
233 struct block_device *bdev;
234 void *ptr;
ba73eadd 235
3acea5fc 236 ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
ba73eadd
JX
237 if (IS_ERR(ptr))
238 return PTR_ERR(ptr);
3acea5fc 239 dis = ptr + erofs_blkoff(sb, *pos);
ba73eadd
JX
240
241 if (!dif->path) {
242 if (!dis->tag[0]) {
243 erofs_err(sb, "empty device tag @ pos %llu", *pos);
244 return -EINVAL;
245 }
246 dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL);
247 if (!dif->path)
248 return -ENOMEM;
249 }
250
251 if (erofs_is_fscache_mode(sb)) {
27f2a2dc 252 fscache = erofs_fscache_register_cookie(sb, dif->path, 0);
e1de2da0
JZ
253 if (IS_ERR(fscache))
254 return PTR_ERR(fscache);
255 dif->fscache = fscache;
8b465fec 256 } else if (!sbi->devs->flatdev) {
05bdb996 257 bdev = blkdev_get_by_path(dif->path, BLK_OPEN_READ, sb->s_type,
2736e8ee 258 NULL);
ba73eadd
JX
259 if (IS_ERR(bdev))
260 return PTR_ERR(bdev);
261 dif->bdev = bdev;
8012b866
SR
262 dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off,
263 NULL, NULL);
ba73eadd
JX
264 }
265
266 dif->blocks = le32_to_cpu(dis->blocks);
267 dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
268 sbi->total_blocks += dif->blocks;
269 *pos += EROFS_DEVT_SLOT_SIZE;
270 return 0;
271}
272
273static int erofs_scan_devices(struct super_block *sb,
dfeab2e9
GX
274 struct erofs_super_block *dsb)
275{
276 struct erofs_sb_info *sbi = EROFS_SB(sb);
277 unsigned int ondisk_extradevs;
278 erofs_off_t pos;
2b5379f7 279 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
dfeab2e9 280 struct erofs_device_info *dif;
dfeab2e9
GX
281 int id, err = 0;
282
283 sbi->total_blocks = sbi->primarydevice_blocks;
284 if (!erofs_sb_has_device_table(sbi))
285 ondisk_extradevs = 0;
286 else
287 ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
288
ba73eadd
JX
289 if (sbi->devs->extra_devices &&
290 ondisk_extradevs != sbi->devs->extra_devices) {
dfeab2e9
GX
291 erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
292 ondisk_extradevs, sbi->devs->extra_devices);
293 return -EINVAL;
294 }
295 if (!ondisk_extradevs)
296 return 0;
297
8b465fec
JZ
298 if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb))
299 sbi->devs->flatdev = true;
300
dfeab2e9
GX
301 sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
302 pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
303 down_read(&sbi->devs->rwsem);
ba73eadd
JX
304 if (sbi->devs->extra_devices) {
305 idr_for_each_entry(&sbi->devs->tree, dif, id) {
306 err = erofs_init_device(&buf, sb, dif, &pos);
955b478e
JX
307 if (err)
308 break;
ba73eadd
JX
309 }
310 } else {
311 for (id = 0; id < ondisk_extradevs; id++) {
312 dif = kzalloc(sizeof(*dif), GFP_KERNEL);
313 if (!dif) {
314 err = -ENOMEM;
93b856bb
JX
315 break;
316 }
93b856bb 317
ba73eadd
JX
318 err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
319 if (err < 0) {
320 kfree(dif);
321 break;
322 }
323 ++sbi->devs->extra_devices;
324
325 err = erofs_init_device(&buf, sb, dif, &pos);
326 if (err)
327 break;
328 }
dfeab2e9 329 }
dfeab2e9 330 up_read(&sbi->devs->rwsem);
2b5379f7 331 erofs_put_metabuf(&buf);
dfeab2e9
GX
332 return err;
333}
334
99634bf3 335static int erofs_read_superblock(struct super_block *sb)
ba2b77a8
GX
336{
337 struct erofs_sb_info *sbi;
ed6e0401 338 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
0259f209 339 struct erofs_super_block *dsb;
fe7c2423 340 void *data;
ba2b77a8
GX
341 int ret;
342
ed6e0401
JX
343 data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP);
344 if (IS_ERR(data)) {
4f761fa2 345 erofs_err(sb, "cannot read erofs superblock");
ed6e0401 346 return PTR_ERR(data);
ba2b77a8
GX
347 }
348
349 sbi = EROFS_SB(sb);
fe7c2423 350 dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
ba2b77a8
GX
351
352 ret = -EINVAL;
0259f209 353 if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
4f761fa2 354 erofs_err(sb, "cannot find valid erofs superblock");
ba2b77a8
GX
355 goto out;
356 }
357
d3c4bdcc
JX
358 sbi->blkszbits = dsb->blkszbits;
359 if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) {
360 erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits);
361 goto out;
362 }
363 if (dsb->dirblkbits) {
364 erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits);
365 goto out;
366 }
367
b858a484 368 sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
de06a6a3 369 if (erofs_sb_has_sb_chksum(sbi)) {
b858a484
PS
370 ret = erofs_superblock_csum_verify(sb, data);
371 if (ret)
372 goto out;
373 }
374
0508c1ad 375 ret = -EINVAL;
0259f209 376 if (!check_layout_compatibility(sb, dsb))
5efe5137
GX
377 goto out;
378
14373711 379 sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
d3c4bdcc 380 if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
14373711
GX
381 erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
382 sbi->sb_size);
383 goto out;
384 }
dfeab2e9 385 sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
0259f209 386 sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
b17500a0 387#ifdef CONFIG_EROFS_FS_XATTR
0259f209 388 sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
6a318ccd
JX
389 sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start);
390 sbi->xattr_prefix_count = dsb->xattr_prefix_count;
b17500a0 391#endif
8a765682 392 sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
0259f209 393 sbi->root_nid = le16_to_cpu(dsb->root_nid);
cb9bce79 394 sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
0259f209 395 sbi->inos = le64_to_cpu(dsb->inos);
ba2b77a8 396
0259f209
GX
397 sbi->build_time = le64_to_cpu(dsb->build_time);
398 sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
ba2b77a8 399
0259f209 400 memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
ba2b77a8 401
0259f209
GX
402 ret = strscpy(sbi->volume_name, dsb->volume_name,
403 sizeof(dsb->volume_name));
a64d9493 404 if (ret < 0) { /* -E2BIG */
4f761fa2 405 erofs_err(sb, "bad volume name without NIL terminator");
a64d9493
GX
406 ret = -EFSCORRUPTED;
407 goto out;
408 }
5d50538f
HJ
409
410 /* parse on-disk compression configurations */
14373711
GX
411 if (erofs_sb_has_compr_cfgs(sbi))
412 ret = erofs_load_compr_cfgs(sb, dsb);
413 else
414 ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
dfeab2e9
GX
415 if (ret < 0)
416 goto out;
417
418 /* handle multiple devices */
ba73eadd 419 ret = erofs_scan_devices(sb, dsb);
ab92184f 420
9c0cc9c7
JX
421 if (erofs_is_fscache_mode(sb))
422 erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!");
b15b2e30
YH
423 if (erofs_sb_has_fragments(sbi))
424 erofs_info(sb, "EXPERIMENTAL compressed fragments feature in use. Use at your own risk!");
5c2a6425
GX
425 if (erofs_sb_has_dedupe(sbi))
426 erofs_info(sb, "EXPERIMENTAL global deduplication feature in use. Use at your own risk!");
ba2b77a8 427out:
ed6e0401 428 erofs_put_metabuf(&buf);
ba2b77a8
GX
429 return ret;
430}
431
4279f3f9 432/* set up default EROFS parameters */
f57a3fe4 433static void erofs_default_options(struct erofs_fs_context *ctx)
ba2b77a8 434{
5fb76bb0 435#ifdef CONFIG_EROFS_FS_ZIP
e6242465
GX
436 ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
437 ctx->opt.max_sync_decompress_pages = 3;
40452ffc 438 ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
5fb76bb0 439#endif
b17500a0 440#ifdef CONFIG_EROFS_FS_XATTR
e6242465 441 set_opt(&ctx->opt, XATTR_USER);
b17500a0 442#endif
b17500a0 443#ifdef CONFIG_EROFS_FS_POSIX_ACL
e6242465 444 set_opt(&ctx->opt, POSIX_ACL);
b17500a0 445#endif
ba2b77a8
GX
446}
447
448enum {
b17500a0 449 Opt_user_xattr,
b17500a0 450 Opt_acl,
4279f3f9 451 Opt_cache_strategy,
06252e9c
GX
452 Opt_dax,
453 Opt_dax_enum,
dfeab2e9 454 Opt_device,
9c0cc9c7 455 Opt_fsid,
2ef16441 456 Opt_domain_id,
ba2b77a8
GX
457 Opt_err
458};
459
f57a3fe4
CY
460static const struct constant_table erofs_param_cache_strategy[] = {
461 {"disabled", EROFS_ZIP_CACHE_DISABLED},
462 {"readahead", EROFS_ZIP_CACHE_READAHEAD},
463 {"readaround", EROFS_ZIP_CACHE_READAROUND},
464 {}
ba2b77a8
GX
465};
466
06252e9c
GX
467static const struct constant_table erofs_dax_param_enums[] = {
468 {"always", EROFS_MOUNT_DAX_ALWAYS},
469 {"never", EROFS_MOUNT_DAX_NEVER},
470 {}
471};
472
f57a3fe4
CY
473static const struct fs_parameter_spec erofs_fs_parameters[] = {
474 fsparam_flag_no("user_xattr", Opt_user_xattr),
475 fsparam_flag_no("acl", Opt_acl),
476 fsparam_enum("cache_strategy", Opt_cache_strategy,
477 erofs_param_cache_strategy),
06252e9c
GX
478 fsparam_flag("dax", Opt_dax),
479 fsparam_enum("dax", Opt_dax_enum, erofs_dax_param_enums),
dfeab2e9 480 fsparam_string("device", Opt_device),
9c0cc9c7 481 fsparam_string("fsid", Opt_fsid),
2ef16441 482 fsparam_string("domain_id", Opt_domain_id),
f57a3fe4
CY
483 {}
484};
ba2b77a8 485
06252e9c
GX
486static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
487{
488#ifdef CONFIG_FS_DAX
489 struct erofs_fs_context *ctx = fc->fs_private;
490
491 switch (mode) {
492 case EROFS_MOUNT_DAX_ALWAYS:
493 warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
e6242465
GX
494 set_opt(&ctx->opt, DAX_ALWAYS);
495 clear_opt(&ctx->opt, DAX_NEVER);
06252e9c
GX
496 return true;
497 case EROFS_MOUNT_DAX_NEVER:
e6242465
GX
498 set_opt(&ctx->opt, DAX_NEVER);
499 clear_opt(&ctx->opt, DAX_ALWAYS);
06252e9c
GX
500 return true;
501 default:
502 DBG_BUGON(1);
503 return false;
504 }
505#else
506 errorfc(fc, "dax options not supported");
507 return false;
508#endif
509}
510
f57a3fe4
CY
511static int erofs_fc_parse_param(struct fs_context *fc,
512 struct fs_parameter *param)
513{
dfeab2e9 514 struct erofs_fs_context *ctx = fc->fs_private;
f57a3fe4 515 struct fs_parse_result result;
dfeab2e9
GX
516 struct erofs_device_info *dif;
517 int opt, ret;
ba2b77a8 518
f57a3fe4
CY
519 opt = fs_parse(fc, erofs_fs_parameters, param, &result);
520 if (opt < 0)
521 return opt;
ba2b77a8 522
f57a3fe4
CY
523 switch (opt) {
524 case Opt_user_xattr:
b17500a0 525#ifdef CONFIG_EROFS_FS_XATTR
f57a3fe4 526 if (result.boolean)
e6242465 527 set_opt(&ctx->opt, XATTR_USER);
f57a3fe4 528 else
e6242465 529 clear_opt(&ctx->opt, XATTR_USER);
b17500a0 530#else
f57a3fe4 531 errorfc(fc, "{,no}user_xattr options not supported");
b17500a0 532#endif
f57a3fe4
CY
533 break;
534 case Opt_acl:
b17500a0 535#ifdef CONFIG_EROFS_FS_POSIX_ACL
f57a3fe4 536 if (result.boolean)
e6242465 537 set_opt(&ctx->opt, POSIX_ACL);
f57a3fe4 538 else
e6242465 539 clear_opt(&ctx->opt, POSIX_ACL);
b17500a0 540#else
f57a3fe4 541 errorfc(fc, "{,no}acl options not supported");
b17500a0 542#endif
f57a3fe4
CY
543 break;
544 case Opt_cache_strategy:
545#ifdef CONFIG_EROFS_FS_ZIP
e6242465 546 ctx->opt.cache_strategy = result.uint_32;
f57a3fe4
CY
547#else
548 errorfc(fc, "compression not supported, cache_strategy ignored");
549#endif
550 break;
06252e9c
GX
551 case Opt_dax:
552 if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
553 return -EINVAL;
554 break;
555 case Opt_dax_enum:
556 if (!erofs_fc_set_dax_mode(fc, result.uint_32))
557 return -EINVAL;
558 break;
dfeab2e9
GX
559 case Opt_device:
560 dif = kzalloc(sizeof(*dif), GFP_KERNEL);
561 if (!dif)
562 return -ENOMEM;
563 dif->path = kstrdup(param->string, GFP_KERNEL);
564 if (!dif->path) {
565 kfree(dif);
566 return -ENOMEM;
567 }
568 down_write(&ctx->devs->rwsem);
569 ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
570 up_write(&ctx->devs->rwsem);
571 if (ret < 0) {
572 kfree(dif->path);
573 kfree(dif);
574 return ret;
575 }
576 ++ctx->devs->extra_devices;
577 break;
9c0cc9c7 578#ifdef CONFIG_EROFS_FS_ONDEMAND
e02ac3e7 579 case Opt_fsid:
39bfcb81
JX
580 kfree(ctx->fsid);
581 ctx->fsid = kstrdup(param->string, GFP_KERNEL);
582 if (!ctx->fsid)
9c0cc9c7 583 return -ENOMEM;
2ef16441
JZ
584 break;
585 case Opt_domain_id:
39bfcb81
JX
586 kfree(ctx->domain_id);
587 ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
588 if (!ctx->domain_id)
2ef16441 589 return -ENOMEM;
e02ac3e7 590 break;
2ef16441 591#else
e02ac3e7
JX
592 case Opt_fsid:
593 case Opt_domain_id:
594 errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
9c0cc9c7 595 break;
e02ac3e7 596#endif
f57a3fe4
CY
597 default:
598 return -ENOPARAM;
ba2b77a8
GX
599 }
600 return 0;
601}
602
3e917cc3
HL
603static struct inode *erofs_nfs_get_inode(struct super_block *sb,
604 u64 ino, u32 generation)
605{
312fe643 606 return erofs_iget(sb, ino);
3e917cc3
HL
607}
608
609static struct dentry *erofs_fh_to_dentry(struct super_block *sb,
610 struct fid *fid, int fh_len, int fh_type)
611{
612 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
613 erofs_nfs_get_inode);
614}
615
616static struct dentry *erofs_fh_to_parent(struct super_block *sb,
617 struct fid *fid, int fh_len, int fh_type)
618{
619 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
620 erofs_nfs_get_inode);
621}
622
623static struct dentry *erofs_get_parent(struct dentry *child)
624{
625 erofs_nid_t nid;
626 unsigned int d_type;
627 int err;
628
629 err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type);
630 if (err)
631 return ERR_PTR(err);
312fe643 632 return d_obtain_alias(erofs_iget(child->d_sb, nid));
3e917cc3
HL
633}
634
635static const struct export_operations erofs_export_ops = {
636 .fh_to_dentry = erofs_fh_to_dentry,
637 .fh_to_parent = erofs_fh_to_parent,
638 .get_parent = erofs_get_parent,
639};
640
a9849560
JZ
641static int erofs_fc_fill_pseudo_super(struct super_block *sb, struct fs_context *fc)
642{
643 static const struct tree_descr empty_descr = {""};
644
645 return simple_fill_super(sb, EROFS_SUPER_MAGIC, &empty_descr);
646}
647
f57a3fe4 648static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
ba2b77a8
GX
649{
650 struct inode *inode;
651 struct erofs_sb_info *sbi;
f57a3fe4 652 struct erofs_fs_context *ctx = fc->fs_private;
8f7acdae 653 int err;
ba2b77a8 654
8f7acdae 655 sb->s_magic = EROFS_SUPER_MAGIC;
37c90c5f
JX
656 sb->s_flags |= SB_RDONLY | SB_NOATIME;
657 sb->s_maxbytes = MAX_LFS_FILESIZE;
658 sb->s_op = &erofs_sops;
8f7acdae 659
a9f69bd5 660 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
8d8a09b0 661 if (!sbi)
8f7acdae 662 return -ENOMEM;
ba2b77a8 663
8f7acdae 664 sb->s_fs_info = sbi;
e6242465 665 sbi->opt = ctx->opt;
dfeab2e9
GX
666 sbi->devs = ctx->devs;
667 ctx->devs = NULL;
39bfcb81
JX
668 sbi->fsid = ctx->fsid;
669 ctx->fsid = NULL;
670 sbi->domain_id = ctx->domain_id;
671 ctx->domain_id = NULL;
dfeab2e9 672
3acea5fc 673 sbi->blkszbits = PAGE_SHIFT;
93b856bb 674 if (erofs_is_fscache_mode(sb)) {
d3c4bdcc
JX
675 sb->s_blocksize = PAGE_SIZE;
676 sb->s_blocksize_bits = PAGE_SHIFT;
c6be2bd0
JX
677
678 err = erofs_fscache_register_fs(sb);
679 if (err)
680 return err;
37c90c5f 681
c665b394
JX
682 err = super_setup_bdi(sb);
683 if (err)
684 return err;
93b856bb 685 } else {
d3c4bdcc
JX
686 if (!sb_set_blocksize(sb, PAGE_SIZE)) {
687 errorfc(fc, "failed to set initial blksize");
93b856bb
JX
688 return -EINVAL;
689 }
690
691 sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
8012b866
SR
692 &sbi->dax_part_off,
693 NULL, NULL);
93b856bb
JX
694 }
695
99634bf3 696 err = erofs_read_superblock(sb);
ba2b77a8 697 if (err)
8f7acdae 698 return err;
ba2b77a8 699
d3c4bdcc
JX
700 if (sb->s_blocksize_bits != sbi->blkszbits) {
701 if (erofs_is_fscache_mode(sb)) {
702 errorfc(fc, "unsupported blksize for fscache mode");
703 return -EINVAL;
704 }
705 if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
706 errorfc(fc, "failed to set erofs blksize");
707 return -EINVAL;
708 }
709 }
7b0800d0 710
d3c4bdcc 711 if (test_opt(&sbi->opt, DAX_ALWAYS)) {
7b0800d0
CH
712 if (!sbi->dax_dev) {
713 errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
714 clear_opt(&sbi->opt, DAX_ALWAYS);
d3c4bdcc
JX
715 } else if (sbi->blkszbits != PAGE_SHIFT) {
716 errorfc(fc, "unsupported blocksize for DAX");
717 clear_opt(&sbi->opt, DAX_ALWAYS);
7b0800d0 718 }
06252e9c 719 }
ba2b77a8 720
37c90c5f 721 sb->s_time_gran = 1;
b17500a0 722 sb->s_xattr = erofs_xattr_handlers;
3e917cc3 723 sb->s_export_op = &erofs_export_ops;
e7cda1ee 724
e6242465 725 if (test_opt(&sbi->opt, POSIX_ACL))
516c115c
GX
726 sb->s_flags |= SB_POSIXACL;
727 else
728 sb->s_flags &= ~SB_POSIXACL;
729
e7e9a307 730#ifdef CONFIG_EROFS_FS_ZIP
64094a04 731 xa_init(&sbi->managed_pslots);
e7e9a307
GX
732#endif
733
ba2b77a8 734 /* get the root inode */
312fe643 735 inode = erofs_iget(sb, ROOT_NID(sbi));
8f7acdae
GX
736 if (IS_ERR(inode))
737 return PTR_ERR(inode);
ba2b77a8 738
8d8a09b0 739 if (!S_ISDIR(inode->i_mode)) {
4f761fa2
GX
740 erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
741 ROOT_NID(sbi), inode->i_mode);
94832d93 742 iput(inode);
8f7acdae 743 return -EINVAL;
ba2b77a8
GX
744 }
745
746 sb->s_root = d_make_root(inode);
8d8a09b0 747 if (!sb->s_root)
8f7acdae 748 return -ENOMEM;
ba2b77a8 749
22fe04a7 750 erofs_shrinker_register(sb);
8f7acdae 751 /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
cb9bce79
JX
752 if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
753 sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
754 if (IS_ERR(sbi->packed_inode)) {
755 err = PTR_ERR(sbi->packed_inode);
756 sbi->packed_inode = NULL;
757 return err;
758 }
759 }
8f7acdae 760 err = erofs_init_managed_cache(sb);
8d8a09b0 761 if (err)
8f7acdae 762 return err;
2497ee41 763
6a318ccd
JX
764 err = erofs_xattr_prefixes_init(sb);
765 if (err)
766 return err;
767
168e9a76
HJ
768 err = erofs_register_sysfs(sb);
769 if (err)
770 return err;
771
f57a3fe4
CY
772 erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
773 return 0;
774}
775
a9849560
JZ
776static int erofs_fc_anon_get_tree(struct fs_context *fc)
777{
778 return get_tree_nodev(fc, erofs_fc_fill_pseudo_super);
779}
780
f57a3fe4
CY
781static int erofs_fc_get_tree(struct fs_context *fc)
782{
9c0cc9c7
JX
783 struct erofs_fs_context *ctx = fc->fs_private;
784
39bfcb81 785 if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
9c0cc9c7
JX
786 return get_tree_nodev(fc, erofs_fc_fill_super);
787
f57a3fe4
CY
788 return get_tree_bdev(fc, erofs_fc_fill_super);
789}
790
791static int erofs_fc_reconfigure(struct fs_context *fc)
792{
793 struct super_block *sb = fc->root->d_sb;
794 struct erofs_sb_info *sbi = EROFS_SB(sb);
795 struct erofs_fs_context *ctx = fc->fs_private;
796
797 DBG_BUGON(!sb_rdonly(sb));
798
39bfcb81
JX
799 if (ctx->fsid || ctx->domain_id)
800 erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
801
e6242465 802 if (test_opt(&ctx->opt, POSIX_ACL))
f57a3fe4
CY
803 fc->sb_flags |= SB_POSIXACL;
804 else
805 fc->sb_flags &= ~SB_POSIXACL;
806
e6242465 807 sbi->opt = ctx->opt;
f57a3fe4
CY
808
809 fc->sb_flags |= SB_RDONLY;
ba2b77a8 810 return 0;
8f7acdae
GX
811}
812
dfeab2e9
GX
813static int erofs_release_device_info(int id, void *ptr, void *data)
814{
815 struct erofs_device_info *dif = ptr;
816
8012b866 817 fs_put_dax(dif->dax_dev, NULL);
dfeab2e9 818 if (dif->bdev)
2736e8ee 819 blkdev_put(dif->bdev, &erofs_fs_type);
e1de2da0
JZ
820 erofs_fscache_unregister_cookie(dif->fscache);
821 dif->fscache = NULL;
dfeab2e9
GX
822 kfree(dif->path);
823 kfree(dif);
824 return 0;
825}
826
827static void erofs_free_dev_context(struct erofs_dev_context *devs)
828{
829 if (!devs)
830 return;
831 idr_for_each(&devs->tree, &erofs_release_device_info, NULL);
832 idr_destroy(&devs->tree);
833 kfree(devs);
834}
835
f57a3fe4
CY
836static void erofs_fc_free(struct fs_context *fc)
837{
dfeab2e9
GX
838 struct erofs_fs_context *ctx = fc->fs_private;
839
840 erofs_free_dev_context(ctx->devs);
39bfcb81
JX
841 kfree(ctx->fsid);
842 kfree(ctx->domain_id);
dfeab2e9 843 kfree(ctx);
f57a3fe4
CY
844}
845
846static const struct fs_context_operations erofs_context_ops = {
847 .parse_param = erofs_fc_parse_param,
848 .get_tree = erofs_fc_get_tree,
849 .reconfigure = erofs_fc_reconfigure,
850 .free = erofs_fc_free,
851};
852
a9849560
JZ
853static const struct fs_context_operations erofs_anon_context_ops = {
854 .get_tree = erofs_fc_anon_get_tree,
855};
856
f57a3fe4 857static int erofs_init_fs_context(struct fs_context *fc)
8f7acdae 858{
a9849560
JZ
859 struct erofs_fs_context *ctx;
860
861 /* pseudo mount for anon inodes */
862 if (fc->sb_flags & SB_KERNMOUNT) {
863 fc->ops = &erofs_anon_context_ops;
864 return 0;
865 }
f57a3fe4 866
a9849560 867 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
dfeab2e9
GX
868 if (!ctx)
869 return -ENOMEM;
870 ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
871 if (!ctx->devs) {
872 kfree(ctx);
873 return -ENOMEM;
874 }
875 fc->fs_private = ctx;
f57a3fe4 876
dfeab2e9
GX
877 idr_init(&ctx->devs->tree);
878 init_rwsem(&ctx->devs->rwsem);
879 erofs_default_options(ctx);
f57a3fe4 880 fc->ops = &erofs_context_ops;
f57a3fe4 881 return 0;
ba2b77a8
GX
882}
883
884/*
885 * could be triggered after deactivate_locked_super()
886 * is called, thus including umount and failed to initialize.
887 */
8f7acdae 888static void erofs_kill_sb(struct super_block *sb)
ba2b77a8 889{
8f7acdae
GX
890 struct erofs_sb_info *sbi;
891
a9849560
JZ
892 /* pseudo mount for anon inodes */
893 if (sb->s_flags & SB_KERNMOUNT) {
894 kill_anon_super(sb);
895 return;
896 }
897
9c0cc9c7 898 if (erofs_is_fscache_mode(sb))
1015c101 899 kill_anon_super(sb);
9c0cc9c7
JX
900 else
901 kill_block_super(sb);
8f7acdae
GX
902
903 sbi = EROFS_SB(sb);
e2ff9f15 904 if (!sbi)
ba2b77a8 905 return;
dfeab2e9
GX
906
907 erofs_free_dev_context(sbi->devs);
8012b866 908 fs_put_dax(sbi->dax_dev, NULL);
c6be2bd0 909 erofs_fscache_unregister_fs(sb);
39bfcb81
JX
910 kfree(sbi->fsid);
911 kfree(sbi->domain_id);
8f7acdae
GX
912 kfree(sbi);
913 sb->s_fs_info = NULL;
914}
ba2b77a8 915
8f7acdae
GX
916/* called when ->s_root is non-NULL */
917static void erofs_put_super(struct super_block *sb)
918{
919 struct erofs_sb_info *const sbi = EROFS_SB(sb);
ba2b77a8 920
8f7acdae 921 DBG_BUGON(!sbi);
ba2b77a8 922
168e9a76 923 erofs_unregister_sysfs(sb);
22fe04a7 924 erofs_shrinker_unregister(sb);
6a318ccd 925 erofs_xattr_prefixes_cleanup(sb);
4279f3f9 926#ifdef CONFIG_EROFS_FS_ZIP
105d4ad8 927 iput(sbi->managed_cache);
8f7acdae 928 sbi->managed_cache = NULL;
a97a218b 929#endif
b15b2e30
YH
930 iput(sbi->packed_inode);
931 sbi->packed_inode = NULL;
61fef989
JX
932 erofs_free_dev_context(sbi->devs);
933 sbi->devs = NULL;
e1de2da0 934 erofs_fscache_unregister_fs(sb);
ba2b77a8
GX
935}
936
a9849560 937struct file_system_type erofs_fs_type = {
ba2b77a8
GX
938 .owner = THIS_MODULE,
939 .name = "erofs",
f57a3fe4 940 .init_fs_context = erofs_init_fs_context,
8f7acdae 941 .kill_sb = erofs_kill_sb,
6c459b78 942 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
ba2b77a8
GX
943};
944MODULE_ALIAS_FS("erofs");
945
946static int __init erofs_module_init(void)
947{
948 int err;
949
950 erofs_check_ondisk_layout_definitions();
ba2b77a8 951
1c2dfbf9 952 erofs_inode_cachep = kmem_cache_create("erofs_inode",
a5876e24 953 sizeof(struct erofs_inode), 0,
1c2dfbf9 954 SLAB_RECLAIM_ACCOUNT,
99634bf3 955 erofs_inode_init_once);
19905955
YL
956 if (!erofs_inode_cachep)
957 return -ENOMEM;
ba2b77a8 958
22fe04a7 959 err = erofs_init_shrinker();
a1581312
GX
960 if (err)
961 goto shrinker_err;
962
622ceadd
GX
963 err = z_erofs_lzma_init();
964 if (err)
965 goto lzma_err;
966
52488734 967 erofs_pcpubuf_init();
3883a79a
GX
968 err = z_erofs_init_zip_subsystem();
969 if (err)
970 goto zip_err;
3883a79a 971
168e9a76
HJ
972 err = erofs_init_sysfs();
973 if (err)
974 goto sysfs_err;
975
ba2b77a8
GX
976 err = register_filesystem(&erofs_fs_type);
977 if (err)
978 goto fs_err;
979
ba2b77a8
GX
980 return 0;
981
982fs_err:
168e9a76
HJ
983 erofs_exit_sysfs();
984sysfs_err:
3883a79a
GX
985 z_erofs_exit_zip_subsystem();
986zip_err:
622ceadd
GX
987 z_erofs_lzma_exit();
988lzma_err:
22fe04a7 989 erofs_exit_shrinker();
a1581312 990shrinker_err:
1c2dfbf9 991 kmem_cache_destroy(erofs_inode_cachep);
ba2b77a8
GX
992 return err;
993}
994
995static void __exit erofs_module_exit(void)
996{
997 unregister_filesystem(&erofs_fs_type);
1c2dfbf9 998
622ceadd 999 /* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
1c2dfbf9 1000 rcu_barrier();
622ceadd 1001
168e9a76 1002 erofs_exit_sysfs();
622ceadd
GX
1003 z_erofs_exit_zip_subsystem();
1004 z_erofs_lzma_exit();
1005 erofs_exit_shrinker();
1c2dfbf9 1006 kmem_cache_destroy(erofs_inode_cachep);
52488734 1007 erofs_pcpubuf_exit();
ba2b77a8
GX
1008}
1009
1010/* get filesystem statistics */
1011static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
1012{
1013 struct super_block *sb = dentry->d_sb;
1014 struct erofs_sb_info *sbi = EROFS_SB(sb);
93b856bb
JX
1015 u64 id = 0;
1016
1017 if (!erofs_is_fscache_mode(sb))
1018 id = huge_encode_dev(sb->s_bdev->bd_dev);
ba2b77a8
GX
1019
1020 buf->f_type = sb->s_magic;
3acea5fc 1021 buf->f_bsize = sb->s_blocksize;
dfeab2e9 1022 buf->f_blocks = sbi->total_blocks;
ba2b77a8
GX
1023 buf->f_bfree = buf->f_bavail = 0;
1024
1025 buf->f_files = ULLONG_MAX;
1026 buf->f_ffree = ULLONG_MAX - sbi->inos;
1027
1028 buf->f_namelen = EROFS_NAME_LEN;
1029
6d1349c7 1030 buf->f_fsid = u64_to_fsid(id);
ba2b77a8
GX
1031 return 0;
1032}
1033
1034static int erofs_show_options(struct seq_file *seq, struct dentry *root)
1035{
06252e9c 1036 struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
e6242465 1037 struct erofs_mount_opts *opt = &sbi->opt;
b17500a0
GX
1038
1039#ifdef CONFIG_EROFS_FS_XATTR
e6242465 1040 if (test_opt(opt, XATTR_USER))
b17500a0
GX
1041 seq_puts(seq, ",user_xattr");
1042 else
1043 seq_puts(seq, ",nouser_xattr");
1044#endif
1045#ifdef CONFIG_EROFS_FS_POSIX_ACL
e6242465 1046 if (test_opt(opt, POSIX_ACL))
b17500a0
GX
1047 seq_puts(seq, ",acl");
1048 else
1049 seq_puts(seq, ",noacl");
9c07b3b3 1050#endif
4279f3f9 1051#ifdef CONFIG_EROFS_FS_ZIP
e6242465 1052 if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
4279f3f9 1053 seq_puts(seq, ",cache_strategy=disabled");
e6242465 1054 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
4279f3f9 1055 seq_puts(seq, ",cache_strategy=readahead");
e6242465 1056 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
4279f3f9 1057 seq_puts(seq, ",cache_strategy=readaround");
4279f3f9 1058#endif
e6242465 1059 if (test_opt(opt, DAX_ALWAYS))
06252e9c 1060 seq_puts(seq, ",dax=always");
e6242465 1061 if (test_opt(opt, DAX_NEVER))
06252e9c 1062 seq_puts(seq, ",dax=never");
9c0cc9c7 1063#ifdef CONFIG_EROFS_FS_ONDEMAND
39bfcb81
JX
1064 if (sbi->fsid)
1065 seq_printf(seq, ",fsid=%s", sbi->fsid);
1066 if (sbi->domain_id)
1067 seq_printf(seq, ",domain_id=%s", sbi->domain_id);
9c0cc9c7 1068#endif
ba2b77a8
GX
1069 return 0;
1070}
1071
ba2b77a8
GX
1072const struct super_operations erofs_sops = {
1073 .put_super = erofs_put_super,
99634bf3
GX
1074 .alloc_inode = erofs_alloc_inode,
1075 .free_inode = erofs_free_inode,
ba2b77a8
GX
1076 .statfs = erofs_statfs,
1077 .show_options = erofs_show_options,
ba2b77a8
GX
1078};
1079
1080module_init(erofs_module_init);
1081module_exit(erofs_module_exit);
1082
1083MODULE_DESCRIPTION("Enhanced ROM File System");
bc33d9f3 1084MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
ba2b77a8 1085MODULE_LICENSE("GPL");