1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
8 #include <linux/prefetch.h>
10 #include <trace/events/erofs.h>
12 static void erofs_readendio(struct bio *bio)
15 blk_status_t err = bio->bi_status;
16 struct bvec_iter_all iter_all;
18 bio_for_each_segment_all(bvec, bio, iter_all) {
19 struct page *page = bvec->bv_page;
21 /* page is already locked */
22 DBG_BUGON(PageUptodate(page));
27 SetPageUptodate(page);
30 /* page could be reclaimed now */
35 static struct bio *erofs_grab_raw_bio(struct super_block *sb,
37 unsigned int nr_pages,
40 struct bio *bio = bio_alloc(GFP_NOIO, nr_pages);
42 bio->bi_end_io = erofs_readendio;
43 bio_set_dev(bio, sb->s_bdev);
44 bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK;
46 bio->bi_opf = REQ_OP_READ | REQ_META;
48 bio->bi_opf = REQ_OP_READ;
53 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
55 struct inode *const bd_inode = sb->s_bdev->bd_inode;
56 struct address_space *const mapping = bd_inode->i_mapping;
57 const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS);
62 page = find_or_create_page(mapping, blkaddr, gfp);
64 return ERR_PTR(-ENOMEM);
66 DBG_BUGON(!PageLocked(page));
68 if (!PageUptodate(page)) {
71 bio = erofs_grab_raw_bio(sb, blkaddr, 1, true);
73 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) {
81 /* this page has been truncated by others */
82 if (page->mapping != mapping) {
88 /* more likely a read error */
89 if (!PageUptodate(page)) {
102 static int erofs_map_blocks_flatmode(struct inode *inode,
103 struct erofs_map_blocks *map,
107 erofs_blk_t nblocks, lastblk;
108 u64 offset = map->m_la;
109 struct erofs_inode *vi = EROFS_I(inode);
110 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
112 trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
114 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
115 lastblk = nblocks - tailendpacking;
117 if (offset >= inode->i_size) {
118 /* leave out-of-bound access unmapped */
124 /* there is no hole in flatmode */
125 map->m_flags = EROFS_MAP_MAPPED;
127 if (offset < blknr_to_addr(lastblk)) {
128 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
129 map->m_plen = blknr_to_addr(lastblk) - offset;
130 } else if (tailendpacking) {
131 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
132 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
134 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
135 vi->xattr_isize + erofs_blkoff(map->m_la);
136 map->m_plen = inode->i_size - offset;
138 /* inline data should be located in one meta block */
139 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
140 errln("inline data cross block boundary @ nid %llu",
147 map->m_flags |= EROFS_MAP_META;
149 errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
150 vi->nid, inode->i_size, map->m_la);
157 map->m_llen = map->m_plen;
160 trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
164 int erofs_map_blocks(struct inode *inode,
165 struct erofs_map_blocks *map, int flags)
167 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
168 int err = z_erofs_map_blocks_iter(inode, map, flags);
171 put_page(map->mpage);
176 return erofs_map_blocks_flatmode(inode, map, flags);
179 static inline struct bio *erofs_read_raw_page(struct bio *bio,
180 struct address_space *mapping,
182 erofs_off_t *last_block,
183 unsigned int nblocks,
186 struct inode *const inode = mapping->host;
187 struct super_block *const sb = inode->i_sb;
188 erofs_off_t current_block = (erofs_off_t)page->index;
193 if (PageUptodate(page)) {
198 /* note that for readpage case, bio also equals to NULL */
201 *last_block + 1 != current_block) {
208 struct erofs_map_blocks map = {
209 .m_la = blknr_to_addr(current_block),
214 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
218 /* zero out the holed page */
219 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
220 zero_user_segment(page, 0, PAGE_SIZE);
221 SetPageUptodate(page);
223 /* imply err = 0, see erofs_map_blocks */
227 /* for RAW access mode, m_plen must be equal to m_llen */
228 DBG_BUGON(map.m_plen != map.m_llen);
230 blknr = erofs_blknr(map.m_pa);
231 blkoff = erofs_blkoff(map.m_pa);
233 /* deal with inline page */
234 if (map.m_flags & EROFS_MAP_META) {
238 DBG_BUGON(map.m_plen > PAGE_SIZE);
240 ipage = erofs_get_meta_page(inode->i_sb, blknr);
243 err = PTR_ERR(ipage);
247 vsrc = kmap_atomic(ipage);
248 vto = kmap_atomic(page);
249 memcpy(vto, vsrc + blkoff, map.m_plen);
250 memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
253 flush_dcache_page(page);
255 SetPageUptodate(page);
256 /* TODO: could we unlock the page earlier? */
260 /* imply err = 0, see erofs_map_blocks */
264 /* pa must be block-aligned for raw reading */
265 DBG_BUGON(erofs_blkoff(map.m_pa));
267 /* max # of continuous pages */
268 if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
269 nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
270 if (nblocks > BIO_MAX_PAGES)
271 nblocks = BIO_MAX_PAGES;
273 bio = erofs_grab_raw_bio(sb, blknr, nblocks, false);
276 err = bio_add_page(bio, page, PAGE_SIZE, 0);
277 /* out of the extent or bio is full */
279 goto submit_bio_retry;
281 *last_block = current_block;
283 /* shift in advance in case of it followed by too many gaps */
284 if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
285 /* err should reassign to 0 after submitting */
293 /* for sync reading, set page error immediately */
296 ClearPageUptodate(page);
301 /* if updated manually, continuous pages has a gap */
305 return err ? ERR_PTR(err) : NULL;
309 * since we dont have write or truncate flows, so no inode
310 * locking needs to be held at the moment.
312 static int erofs_raw_access_readpage(struct file *file, struct page *page)
314 erofs_off_t last_block;
317 trace_erofs_readpage(page, true);
319 bio = erofs_read_raw_page(NULL, page->mapping,
320 page, &last_block, 1, false);
325 DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
329 static int erofs_raw_access_readpages(struct file *filp,
330 struct address_space *mapping,
331 struct list_head *pages,
332 unsigned int nr_pages)
334 erofs_off_t last_block;
335 struct bio *bio = NULL;
336 gfp_t gfp = readahead_gfp_mask(mapping);
337 struct page *page = list_last_entry(pages, struct page, lru);
339 trace_erofs_readpages(mapping->host, page, nr_pages, true);
341 for (; nr_pages; --nr_pages) {
342 page = list_entry(pages->prev, struct page, lru);
344 prefetchw(&page->flags);
345 list_del(&page->lru);
347 if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
348 bio = erofs_read_raw_page(bio, mapping, page,
349 &last_block, nr_pages, true);
351 /* all the page errors are ignored when readahead */
353 pr_err("%s, readahead error at page %lu of nid %llu\n",
354 __func__, page->index,
355 EROFS_I(mapping->host)->nid);
361 /* pages could still be locked */
364 DBG_BUGON(!list_empty(pages));
366 /* the rare case (end in gaps) */
372 static int erofs_get_block(struct inode *inode, sector_t iblock,
373 struct buffer_head *bh, int create)
375 struct erofs_map_blocks map = {
380 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
384 if (map.m_flags & EROFS_MAP_MAPPED)
385 bh->b_blocknr = erofs_blknr(map.m_pa);
390 static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
392 struct inode *inode = mapping->host;
394 if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
395 erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
397 if (block >> LOG_SECTORS_PER_BLOCK >= blks)
401 return generic_block_bmap(mapping, block, erofs_get_block);
404 /* for uncompressed (aligned) files and raw access for other files */
405 const struct address_space_operations erofs_raw_access_aops = {
406 .readpage = erofs_raw_access_readpage,
407 .readpages = erofs_raw_access_readpages,