Linux 6.10-rc6
[linux-block.git] / fs / erofs / data.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017-2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2021, Alibaba Cloud
6  */
7 #include "internal.h"
8 #include <linux/sched/mm.h>
9 #include <trace/events/erofs.h>
10
11 void erofs_unmap_metabuf(struct erofs_buf *buf)
12 {
13         if (buf->kmap_type == EROFS_KMAP)
14                 kunmap_local(buf->base);
15         buf->base = NULL;
16         buf->kmap_type = EROFS_NO_KMAP;
17 }
18
19 void erofs_put_metabuf(struct erofs_buf *buf)
20 {
21         if (!buf->page)
22                 return;
23         erofs_unmap_metabuf(buf);
24         put_page(buf->page);
25         buf->page = NULL;
26 }
27
28 /*
29  * Derive the block size from inode->i_blkbits to make compatible with
30  * anonymous inode in fscache mode.
31  */
32 void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
33                   enum erofs_kmap_type type)
34 {
35         pgoff_t index = offset >> PAGE_SHIFT;
36         struct page *page = buf->page;
37         struct folio *folio;
38         unsigned int nofs_flag;
39
40         if (!page || page->index != index) {
41                 erofs_put_metabuf(buf);
42
43                 nofs_flag = memalloc_nofs_save();
44                 folio = read_cache_folio(buf->mapping, index, NULL, NULL);
45                 memalloc_nofs_restore(nofs_flag);
46                 if (IS_ERR(folio))
47                         return folio;
48
49                 /* should already be PageUptodate, no need to lock page */
50                 page = folio_file_page(folio, index);
51                 buf->page = page;
52         }
53         if (buf->kmap_type == EROFS_NO_KMAP) {
54                 if (type == EROFS_KMAP)
55                         buf->base = kmap_local_page(page);
56                 buf->kmap_type = type;
57         } else if (buf->kmap_type != type) {
58                 DBG_BUGON(1);
59                 return ERR_PTR(-EFAULT);
60         }
61         if (type == EROFS_NO_KMAP)
62                 return NULL;
63         return buf->base + (offset & ~PAGE_MASK);
64 }
65
66 void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
67 {
68         if (erofs_is_fscache_mode(sb))
69                 buf->mapping = EROFS_SB(sb)->s_fscache->inode->i_mapping;
70         else
71                 buf->mapping = sb->s_bdev->bd_mapping;
72 }
73
74 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
75                          erofs_off_t offset, enum erofs_kmap_type type)
76 {
77         erofs_init_metabuf(buf, sb);
78         return erofs_bread(buf, offset, type);
79 }
80
81 static int erofs_map_blocks_flatmode(struct inode *inode,
82                                      struct erofs_map_blocks *map)
83 {
84         erofs_blk_t nblocks, lastblk;
85         u64 offset = map->m_la;
86         struct erofs_inode *vi = EROFS_I(inode);
87         struct super_block *sb = inode->i_sb;
88         bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
89
90         nblocks = erofs_iblks(inode);
91         lastblk = nblocks - tailendpacking;
92
93         /* there is no hole in flatmode */
94         map->m_flags = EROFS_MAP_MAPPED;
95         if (offset < erofs_pos(sb, lastblk)) {
96                 map->m_pa = erofs_pos(sb, vi->raw_blkaddr) + map->m_la;
97                 map->m_plen = erofs_pos(sb, lastblk) - offset;
98         } else if (tailendpacking) {
99                 map->m_pa = erofs_iloc(inode) + vi->inode_isize +
100                         vi->xattr_isize + erofs_blkoff(sb, offset);
101                 map->m_plen = inode->i_size - offset;
102
103                 /* inline data should be located in the same meta block */
104                 if (erofs_blkoff(sb, map->m_pa) + map->m_plen > sb->s_blocksize) {
105                         erofs_err(sb, "inline data cross block boundary @ nid %llu",
106                                   vi->nid);
107                         DBG_BUGON(1);
108                         return -EFSCORRUPTED;
109                 }
110                 map->m_flags |= EROFS_MAP_META;
111         } else {
112                 erofs_err(sb, "internal error @ nid: %llu (size %llu), m_la 0x%llx",
113                           vi->nid, inode->i_size, map->m_la);
114                 DBG_BUGON(1);
115                 return -EIO;
116         }
117         return 0;
118 }
119
120 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
121 {
122         struct super_block *sb = inode->i_sb;
123         struct erofs_inode *vi = EROFS_I(inode);
124         struct erofs_inode_chunk_index *idx;
125         struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
126         u64 chunknr;
127         unsigned int unit;
128         erofs_off_t pos;
129         void *kaddr;
130         int err = 0;
131
132         trace_erofs_map_blocks_enter(inode, map, 0);
133         map->m_deviceid = 0;
134         if (map->m_la >= inode->i_size) {
135                 /* leave out-of-bound access unmapped */
136                 map->m_flags = 0;
137                 map->m_plen = 0;
138                 goto out;
139         }
140
141         if (vi->datalayout != EROFS_INODE_CHUNK_BASED) {
142                 err = erofs_map_blocks_flatmode(inode, map);
143                 goto out;
144         }
145
146         if (vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
147                 unit = sizeof(*idx);                    /* chunk index */
148         else
149                 unit = EROFS_BLOCK_MAP_ENTRY_SIZE;      /* block map */
150
151         chunknr = map->m_la >> vi->chunkbits;
152         pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
153                     vi->xattr_isize, unit) + unit * chunknr;
154
155         kaddr = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP);
156         if (IS_ERR(kaddr)) {
157                 err = PTR_ERR(kaddr);
158                 goto out;
159         }
160         map->m_la = chunknr << vi->chunkbits;
161         map->m_plen = min_t(erofs_off_t, 1UL << vi->chunkbits,
162                         round_up(inode->i_size - map->m_la, sb->s_blocksize));
163
164         /* handle block map */
165         if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
166                 __le32 *blkaddr = kaddr;
167
168                 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
169                         map->m_flags = 0;
170                 } else {
171                         map->m_pa = erofs_pos(sb, le32_to_cpu(*blkaddr));
172                         map->m_flags = EROFS_MAP_MAPPED;
173                 }
174                 goto out_unlock;
175         }
176         /* parse chunk indexes */
177         idx = kaddr;
178         switch (le32_to_cpu(idx->blkaddr)) {
179         case EROFS_NULL_ADDR:
180                 map->m_flags = 0;
181                 break;
182         default:
183                 map->m_deviceid = le16_to_cpu(idx->device_id) &
184                         EROFS_SB(sb)->device_id_mask;
185                 map->m_pa = erofs_pos(sb, le32_to_cpu(idx->blkaddr));
186                 map->m_flags = EROFS_MAP_MAPPED;
187                 break;
188         }
189 out_unlock:
190         erofs_put_metabuf(&buf);
191 out:
192         if (!err)
193                 map->m_llen = map->m_plen;
194         trace_erofs_map_blocks_exit(inode, map, 0, err);
195         return err;
196 }
197
198 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
199 {
200         struct erofs_dev_context *devs = EROFS_SB(sb)->devs;
201         struct erofs_device_info *dif;
202         int id;
203
204         map->m_bdev = sb->s_bdev;
205         map->m_daxdev = EROFS_SB(sb)->dax_dev;
206         map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
207         map->m_fscache = EROFS_SB(sb)->s_fscache;
208
209         if (map->m_deviceid) {
210                 down_read(&devs->rwsem);
211                 dif = idr_find(&devs->tree, map->m_deviceid - 1);
212                 if (!dif) {
213                         up_read(&devs->rwsem);
214                         return -ENODEV;
215                 }
216                 if (devs->flatdev) {
217                         map->m_pa += erofs_pos(sb, dif->mapped_blkaddr);
218                         up_read(&devs->rwsem);
219                         return 0;
220                 }
221                 map->m_bdev = dif->bdev_file ? file_bdev(dif->bdev_file) : NULL;
222                 map->m_daxdev = dif->dax_dev;
223                 map->m_dax_part_off = dif->dax_part_off;
224                 map->m_fscache = dif->fscache;
225                 up_read(&devs->rwsem);
226         } else if (devs->extra_devices && !devs->flatdev) {
227                 down_read(&devs->rwsem);
228                 idr_for_each_entry(&devs->tree, dif, id) {
229                         erofs_off_t startoff, length;
230
231                         if (!dif->mapped_blkaddr)
232                                 continue;
233                         startoff = erofs_pos(sb, dif->mapped_blkaddr);
234                         length = erofs_pos(sb, dif->blocks);
235
236                         if (map->m_pa >= startoff &&
237                             map->m_pa < startoff + length) {
238                                 map->m_pa -= startoff;
239                                 map->m_bdev = dif->bdev_file ?
240                                               file_bdev(dif->bdev_file) : NULL;
241                                 map->m_daxdev = dif->dax_dev;
242                                 map->m_dax_part_off = dif->dax_part_off;
243                                 map->m_fscache = dif->fscache;
244                                 break;
245                         }
246                 }
247                 up_read(&devs->rwsem);
248         }
249         return 0;
250 }
251
252 static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
253                 unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
254 {
255         int ret;
256         struct super_block *sb = inode->i_sb;
257         struct erofs_map_blocks map;
258         struct erofs_map_dev mdev;
259
260         map.m_la = offset;
261         map.m_llen = length;
262
263         ret = erofs_map_blocks(inode, &map);
264         if (ret < 0)
265                 return ret;
266
267         mdev = (struct erofs_map_dev) {
268                 .m_deviceid = map.m_deviceid,
269                 .m_pa = map.m_pa,
270         };
271         ret = erofs_map_dev(sb, &mdev);
272         if (ret)
273                 return ret;
274
275         iomap->offset = map.m_la;
276         if (flags & IOMAP_DAX)
277                 iomap->dax_dev = mdev.m_daxdev;
278         else
279                 iomap->bdev = mdev.m_bdev;
280         iomap->length = map.m_llen;
281         iomap->flags = 0;
282         iomap->private = NULL;
283
284         if (!(map.m_flags & EROFS_MAP_MAPPED)) {
285                 iomap->type = IOMAP_HOLE;
286                 iomap->addr = IOMAP_NULL_ADDR;
287                 if (!iomap->length)
288                         iomap->length = length;
289                 return 0;
290         }
291
292         if (map.m_flags & EROFS_MAP_META) {
293                 void *ptr;
294                 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
295
296                 iomap->type = IOMAP_INLINE;
297                 ptr = erofs_read_metabuf(&buf, sb, mdev.m_pa, EROFS_KMAP);
298                 if (IS_ERR(ptr))
299                         return PTR_ERR(ptr);
300                 iomap->inline_data = ptr;
301                 iomap->private = buf.base;
302         } else {
303                 iomap->type = IOMAP_MAPPED;
304                 iomap->addr = mdev.m_pa;
305                 if (flags & IOMAP_DAX)
306                         iomap->addr += mdev.m_dax_part_off;
307         }
308         return 0;
309 }
310
311 static int erofs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
312                 ssize_t written, unsigned int flags, struct iomap *iomap)
313 {
314         void *ptr = iomap->private;
315
316         if (ptr) {
317                 struct erofs_buf buf = {
318                         .page = kmap_to_page(ptr),
319                         .base = ptr,
320                         .kmap_type = EROFS_KMAP,
321                 };
322
323                 DBG_BUGON(iomap->type != IOMAP_INLINE);
324                 erofs_put_metabuf(&buf);
325         } else {
326                 DBG_BUGON(iomap->type == IOMAP_INLINE);
327         }
328         return written;
329 }
330
331 static const struct iomap_ops erofs_iomap_ops = {
332         .iomap_begin = erofs_iomap_begin,
333         .iomap_end = erofs_iomap_end,
334 };
335
336 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
337                  u64 start, u64 len)
338 {
339         if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
340 #ifdef CONFIG_EROFS_FS_ZIP
341                 return iomap_fiemap(inode, fieinfo, start, len,
342                                     &z_erofs_iomap_report_ops);
343 #else
344                 return -EOPNOTSUPP;
345 #endif
346         }
347         return iomap_fiemap(inode, fieinfo, start, len, &erofs_iomap_ops);
348 }
349
350 /*
351  * since we dont have write or truncate flows, so no inode
352  * locking needs to be held at the moment.
353  */
354 static int erofs_read_folio(struct file *file, struct folio *folio)
355 {
356         return iomap_read_folio(folio, &erofs_iomap_ops);
357 }
358
359 static void erofs_readahead(struct readahead_control *rac)
360 {
361         return iomap_readahead(rac, &erofs_iomap_ops);
362 }
363
364 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
365 {
366         return iomap_bmap(mapping, block, &erofs_iomap_ops);
367 }
368
369 static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
370 {
371         struct inode *inode = file_inode(iocb->ki_filp);
372
373         /* no need taking (shared) inode lock since it's a ro filesystem */
374         if (!iov_iter_count(to))
375                 return 0;
376
377 #ifdef CONFIG_FS_DAX
378         if (IS_DAX(inode))
379                 return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
380 #endif
381         if (iocb->ki_flags & IOCB_DIRECT) {
382                 struct block_device *bdev = inode->i_sb->s_bdev;
383                 unsigned int blksize_mask;
384
385                 if (bdev)
386                         blksize_mask = bdev_logical_block_size(bdev) - 1;
387                 else
388                         blksize_mask = i_blocksize(inode) - 1;
389
390                 if ((iocb->ki_pos | iov_iter_count(to) |
391                      iov_iter_alignment(to)) & blksize_mask)
392                         return -EINVAL;
393
394                 return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
395                                     NULL, 0, NULL, 0);
396         }
397         return filemap_read(iocb, to, 0);
398 }
399
400 /* for uncompressed (aligned) files and raw access for other files */
401 const struct address_space_operations erofs_raw_access_aops = {
402         .read_folio = erofs_read_folio,
403         .readahead = erofs_readahead,
404         .bmap = erofs_bmap,
405         .direct_IO = noop_direct_IO,
406         .release_folio = iomap_release_folio,
407         .invalidate_folio = iomap_invalidate_folio,
408 };
409
410 #ifdef CONFIG_FS_DAX
411 static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
412                 unsigned int order)
413 {
414         return dax_iomap_fault(vmf, order, NULL, NULL, &erofs_iomap_ops);
415 }
416
417 static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
418 {
419         return erofs_dax_huge_fault(vmf, 0);
420 }
421
422 static const struct vm_operations_struct erofs_dax_vm_ops = {
423         .fault          = erofs_dax_fault,
424         .huge_fault     = erofs_dax_huge_fault,
425 };
426
427 static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
428 {
429         if (!IS_DAX(file_inode(file)))
430                 return generic_file_readonly_mmap(file, vma);
431
432         if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
433                 return -EINVAL;
434
435         vma->vm_ops = &erofs_dax_vm_ops;
436         vm_flags_set(vma, VM_HUGEPAGE);
437         return 0;
438 }
439 #else
440 #define erofs_file_mmap generic_file_readonly_mmap
441 #endif
442
443 const struct file_operations erofs_file_fops = {
444         .llseek         = generic_file_llseek,
445         .read_iter      = erofs_file_read_iter,
446         .mmap           = erofs_file_mmap,
447         .get_unmapped_area = thp_get_unmapped_area,
448         .splice_read    = filemap_splice_read,
449 };