1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
9 #include <trace/events/erofs.h>
11 static void *erofs_read_inode(struct erofs_buf *buf,
12 struct inode *inode, unsigned int *ofs)
14 struct super_block *sb = inode->i_sb;
15 struct erofs_sb_info *sbi = EROFS_SB(sb);
16 struct erofs_inode *vi = EROFS_I(inode);
17 const erofs_off_t inode_loc = erofs_iloc(inode);
19 erofs_blk_t blkaddr, nblks = 0;
21 struct erofs_inode_compact *dic;
22 struct erofs_inode_extended *die, *copied = NULL;
26 blkaddr = erofs_blknr(sb, inode_loc);
27 *ofs = erofs_blkoff(sb, inode_loc);
29 kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP);
31 erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
32 vi->nid, PTR_ERR(kaddr));
37 ifmt = le16_to_cpu(dic->i_format);
39 if (ifmt & ~EROFS_I_ALL) {
40 erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu",
46 vi->datalayout = erofs_inode_datalayout(ifmt);
47 if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
48 erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
49 vi->datalayout, vi->nid);
54 switch (erofs_inode_version(ifmt)) {
55 case EROFS_INODE_LAYOUT_EXTENDED:
56 vi->inode_isize = sizeof(struct erofs_inode_extended);
57 /* check if the extended inode acrosses block boundary */
58 if (*ofs + vi->inode_isize <= sb->s_blocksize) {
59 *ofs += vi->inode_isize;
60 die = (struct erofs_inode_extended *)dic;
62 const unsigned int gotten = sb->s_blocksize - *ofs;
64 copied = kmalloc(vi->inode_isize, GFP_NOFS);
69 memcpy(copied, dic, gotten);
70 kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1,
73 erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
74 vi->nid, PTR_ERR(kaddr));
78 *ofs = vi->inode_isize - gotten;
79 memcpy((u8 *)copied + gotten, kaddr, *ofs);
82 vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
84 inode->i_mode = le16_to_cpu(die->i_mode);
85 switch (inode->i_mode & S_IFMT) {
89 vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
94 new_decode_dev(le32_to_cpu(die->i_u.rdev));
103 i_uid_write(inode, le32_to_cpu(die->i_uid));
104 i_gid_write(inode, le32_to_cpu(die->i_gid));
105 set_nlink(inode, le32_to_cpu(die->i_nlink));
107 /* extended inode has its own timestamp */
108 inode->i_ctime.tv_sec = le64_to_cpu(die->i_mtime);
109 inode->i_ctime.tv_nsec = le32_to_cpu(die->i_mtime_nsec);
111 inode->i_size = le64_to_cpu(die->i_size);
113 /* total blocks for compressed files */
114 if (erofs_inode_is_data_compressed(vi->datalayout))
115 nblks = le32_to_cpu(die->i_u.compressed_blocks);
116 else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
117 /* fill chunked inode summary info */
118 vi->chunkformat = le16_to_cpu(die->i_u.c.format);
122 case EROFS_INODE_LAYOUT_COMPACT:
123 vi->inode_isize = sizeof(struct erofs_inode_compact);
124 *ofs += vi->inode_isize;
125 vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
127 inode->i_mode = le16_to_cpu(dic->i_mode);
128 switch (inode->i_mode & S_IFMT) {
132 vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
137 new_decode_dev(le32_to_cpu(dic->i_u.rdev));
146 i_uid_write(inode, le16_to_cpu(dic->i_uid));
147 i_gid_write(inode, le16_to_cpu(dic->i_gid));
148 set_nlink(inode, le16_to_cpu(dic->i_nlink));
150 /* use build time for compact inodes */
151 inode->i_ctime.tv_sec = sbi->build_time;
152 inode->i_ctime.tv_nsec = sbi->build_time_nsec;
154 inode->i_size = le32_to_cpu(dic->i_size);
155 if (erofs_inode_is_data_compressed(vi->datalayout))
156 nblks = le32_to_cpu(dic->i_u.compressed_blocks);
157 else if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
158 vi->chunkformat = le16_to_cpu(dic->i_u.c.format);
161 erofs_err(inode->i_sb,
162 "unsupported on-disk inode version %u of nid %llu",
163 erofs_inode_version(ifmt), vi->nid);
168 if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
169 if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
170 erofs_err(inode->i_sb,
171 "unsupported chunk format %x of nid %llu",
172 vi->chunkformat, vi->nid);
176 vi->chunkbits = sb->s_blocksize_bits +
177 (vi->chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
179 inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
180 inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
181 inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
182 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
184 inode->i_flags &= ~S_DAX;
185 if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) &&
186 (vi->datalayout == EROFS_INODE_FLAT_PLAIN ||
187 vi->datalayout == EROFS_INODE_CHUNK_BASED))
188 inode->i_flags |= S_DAX;
191 /* measure inode.i_blocks as generic filesystems */
192 inode->i_blocks = round_up(inode->i_size, sb->s_blocksize) >> 9;
194 inode->i_blocks = nblks << (sb->s_blocksize_bits - 9);
198 erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
199 inode->i_mode, vi->nid);
204 erofs_put_metabuf(buf);
208 static int erofs_fill_symlink(struct inode *inode, void *kaddr,
211 struct erofs_inode *vi = EROFS_I(inode);
212 unsigned int bsz = i_blocksize(inode);
215 /* if it cannot be handled with fast symlink scheme */
216 if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
217 inode->i_size >= bsz || inode->i_size < 0) {
218 inode->i_op = &erofs_symlink_iops;
222 lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
226 m_pofs += vi->xattr_isize;
227 /* inline symlink data shouldn't cross block boundary */
228 if (m_pofs + inode->i_size > bsz) {
230 erofs_err(inode->i_sb,
231 "inline data cross block boundary @ nid %llu",
234 return -EFSCORRUPTED;
236 memcpy(lnk, kaddr + m_pofs, inode->i_size);
237 lnk[inode->i_size] = '\0';
240 inode->i_op = &erofs_fast_symlink_iops;
244 static int erofs_fill_inode(struct inode *inode)
246 struct erofs_inode *vi = EROFS_I(inode);
247 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
252 trace_erofs_fill_inode(inode);
254 /* read inode base data from disk */
255 kaddr = erofs_read_inode(&buf, inode, &ofs);
257 return PTR_ERR(kaddr);
259 /* setup the new inode */
260 switch (inode->i_mode & S_IFMT) {
262 inode->i_op = &erofs_generic_iops;
263 if (erofs_inode_is_data_compressed(vi->datalayout))
264 inode->i_fop = &generic_ro_fops;
266 inode->i_fop = &erofs_file_fops;
269 inode->i_op = &erofs_dir_iops;
270 inode->i_fop = &erofs_dir_fops;
271 inode_nohighmem(inode);
274 err = erofs_fill_symlink(inode, kaddr, ofs);
277 inode_nohighmem(inode);
283 inode->i_op = &erofs_generic_iops;
284 init_special_inode(inode, inode->i_mode, inode->i_rdev);
291 if (erofs_inode_is_data_compressed(vi->datalayout)) {
292 #ifdef CONFIG_EROFS_FS_ZIP
293 if (!erofs_is_fscache_mode(inode->i_sb) &&
294 inode->i_sb->s_blocksize_bits == PAGE_SHIFT) {
295 inode->i_mapping->a_ops = &z_erofs_aops;
303 inode->i_mapping->a_ops = &erofs_raw_access_aops;
304 mapping_set_large_folios(inode->i_mapping);
305 #ifdef CONFIG_EROFS_FS_ONDEMAND
306 if (erofs_is_fscache_mode(inode->i_sb))
307 inode->i_mapping->a_ops = &erofs_fscache_access_aops;
311 erofs_put_metabuf(&buf);
316 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
317 * so that it will fit.
319 static ino_t erofs_squash_ino(erofs_nid_t nid)
321 ino_t ino = (ino_t)nid;
323 if (sizeof(ino_t) < sizeof(erofs_nid_t))
324 ino ^= nid >> (sizeof(erofs_nid_t) - sizeof(ino_t)) * 8;
328 static int erofs_iget5_eq(struct inode *inode, void *opaque)
330 return EROFS_I(inode)->nid == *(erofs_nid_t *)opaque;
333 static int erofs_iget5_set(struct inode *inode, void *opaque)
335 const erofs_nid_t nid = *(erofs_nid_t *)opaque;
337 inode->i_ino = erofs_squash_ino(nid);
338 EROFS_I(inode)->nid = nid;
342 struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid)
346 inode = iget5_locked(sb, erofs_squash_ino(nid), erofs_iget5_eq,
347 erofs_iget5_set, &nid);
349 return ERR_PTR(-ENOMEM);
351 if (inode->i_state & I_NEW) {
352 int err = erofs_fill_inode(inode);
358 unlock_new_inode(inode);
363 int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
364 struct kstat *stat, u32 request_mask,
365 unsigned int query_flags)
367 struct inode *const inode = d_inode(path->dentry);
369 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
370 stat->attributes |= STATX_ATTR_COMPRESSED;
372 stat->attributes |= STATX_ATTR_IMMUTABLE;
373 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
374 STATX_ATTR_IMMUTABLE);
376 generic_fillattr(idmap, inode, stat);
380 const struct inode_operations erofs_generic_iops = {
381 .getattr = erofs_getattr,
382 .listxattr = erofs_listxattr,
383 .get_inode_acl = erofs_get_acl,
384 .fiemap = erofs_fiemap,
387 const struct inode_operations erofs_symlink_iops = {
388 .get_link = page_get_link,
389 .getattr = erofs_getattr,
390 .listxattr = erofs_listxattr,
391 .get_inode_acl = erofs_get_acl,
394 const struct inode_operations erofs_fast_symlink_iops = {
395 .get_link = simple_get_link,
396 .getattr = erofs_getattr,
397 .listxattr = erofs_listxattr,
398 .get_inode_acl = erofs_get_acl,