erofs: fix erofs_get_meta_page locking due to a cleanup
[linux-2.6-block.git] / fs / erofs / data.c
CommitLineData
29b24f6c 1// SPDX-License-Identifier: GPL-2.0-only
81781b02 2/*
81781b02
GX
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * http://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
81781b02
GX
6 */
7#include "internal.h"
8#include <linux/prefetch.h>
9
13f06f48
CY
10#include <trace/events/erofs.h>
11
99634bf3 12static void erofs_readendio(struct bio *bio)
81781b02 13{
81781b02 14 struct bio_vec *bvec;
14a56ec6 15 blk_status_t err = bio->bi_status;
6dc4f100 16 struct bvec_iter_all iter_all;
81781b02 17
2b070cfe 18 bio_for_each_segment_all(bvec, bio, iter_all) {
81781b02
GX
19 struct page *page = bvec->bv_page;
20
21 /* page is already locked */
9141b60c 22 DBG_BUGON(PageUptodate(page));
81781b02 23
8d8a09b0 24 if (err)
81781b02
GX
25 SetPageError(page);
26 else
27 SetPageUptodate(page);
28
29 unlock_page(page);
30 /* page could be reclaimed now */
31 }
32 bio_put(bio);
33}
34
e655b5b3 35struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
81781b02 36{
55252ab7
GX
37 struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
38 struct page *page;
6e78901a 39
55252ab7 40 page = read_cache_page_gfp(mapping, blkaddr,
618f40ea 41 mapping_gfp_constraint(mapping, ~__GFP_FS));
55252ab7
GX
42 /* should already be PageUptodate */
43 if (!IS_ERR(page))
44 lock_page(page);
45 return page;
81781b02
GX
46}
47
48static int erofs_map_blocks_flatmode(struct inode *inode,
f0950b02
BD
49 struct erofs_map_blocks *map,
50 int flags)
81781b02 51{
9141b60c 52 int err = 0;
81781b02
GX
53 erofs_blk_t nblocks, lastblk;
54 u64 offset = map->m_la;
a5876e24 55 struct erofs_inode *vi = EROFS_I(inode);
8a765682 56 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
81781b02 57
13f06f48 58 trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
81781b02
GX
59
60 nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
8a765682 61 lastblk = nblocks - tailendpacking;
81781b02 62
8d8a09b0 63 if (offset >= inode->i_size) {
81781b02
GX
64 /* leave out-of-bound access unmapped */
65 map->m_flags = 0;
66 map->m_plen = 0;
67 goto out;
68 }
69
70 /* there is no hole in flatmode */
71 map->m_flags = EROFS_MAP_MAPPED;
72
73 if (offset < blknr_to_addr(lastblk)) {
74 map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
75 map->m_plen = blknr_to_addr(lastblk) - offset;
8a765682 76 } else if (tailendpacking) {
81781b02
GX
77 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
78 struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
79
80 map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
81 vi->xattr_isize + erofs_blkoff(map->m_la);
82 map->m_plen = inode->i_size - offset;
83
a6b9b1d5 84 /* inline data should be located in one meta block */
9141b60c 85 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
4f761fa2
GX
86 erofs_err(inode->i_sb,
87 "inline data cross block boundary @ nid %llu",
88 vi->nid);
9141b60c 89 DBG_BUGON(1);
a6b9b1d5 90 err = -EFSCORRUPTED;
9141b60c
CG
91 goto err_out;
92 }
93
81781b02
GX
94 map->m_flags |= EROFS_MAP_META;
95 } else {
4f761fa2
GX
96 erofs_err(inode->i_sb,
97 "internal error @ nid: %llu (size %llu), m_la 0x%llx",
98 vi->nid, inode->i_size, map->m_la);
9141b60c
CG
99 DBG_BUGON(1);
100 err = -EIO;
101 goto err_out;
81781b02
GX
102 }
103
104out:
105 map->m_llen = map->m_plen;
9141b60c
CG
106
107err_out:
13f06f48 108 trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
9141b60c 109 return err;
81781b02
GX
110}
111
112int erofs_map_blocks(struct inode *inode,
f0950b02 113 struct erofs_map_blocks *map, int flags)
81781b02 114{
a5876e24 115 if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
3b423417 116 int err = z_erofs_map_blocks_iter(inode, map, flags);
81781b02 117
3b423417
CY
118 if (map->mpage) {
119 put_page(map->mpage);
120 map->mpage = NULL;
121 }
02827e17
GX
122 return err;
123 }
81781b02
GX
124 return erofs_map_blocks_flatmode(inode, map, flags);
125}
126
f0950b02
BD
127static inline struct bio *erofs_read_raw_page(struct bio *bio,
128 struct address_space *mapping,
129 struct page *page,
130 erofs_off_t *last_block,
131 unsigned int nblocks,
132 bool ra)
81781b02 133{
14a56ec6
GX
134 struct inode *const inode = mapping->host;
135 struct super_block *const sb = inode->i_sb;
81781b02
GX
136 erofs_off_t current_block = (erofs_off_t)page->index;
137 int err;
138
9141b60c 139 DBG_BUGON(!nblocks);
81781b02
GX
140
141 if (PageUptodate(page)) {
142 err = 0;
143 goto has_updated;
144 }
145
81781b02 146 /* note that for readpage case, bio also equals to NULL */
d1ab8244 147 if (bio &&
f0950b02
BD
148 /* not continuous */
149 *last_block + 1 != current_block) {
81781b02 150submit_bio_retry:
94e4e153 151 submit_bio(bio);
81781b02
GX
152 bio = NULL;
153 }
154
d1ab8244 155 if (!bio) {
81781b02
GX
156 struct erofs_map_blocks map = {
157 .m_la = blknr_to_addr(current_block),
158 };
55441958 159 erofs_blk_t blknr;
7dd68b14 160 unsigned int blkoff;
81781b02
GX
161
162 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
8d8a09b0 163 if (err)
81781b02
GX
164 goto err_out;
165
166 /* zero out the holed page */
8d8a09b0 167 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
81781b02
GX
168 zero_user_segment(page, 0, PAGE_SIZE);
169 SetPageUptodate(page);
170
171 /* imply err = 0, see erofs_map_blocks */
172 goto has_updated;
173 }
174
175 /* for RAW access mode, m_plen must be equal to m_llen */
9141b60c 176 DBG_BUGON(map.m_plen != map.m_llen);
81781b02 177
55441958
GX
178 blknr = erofs_blknr(map.m_pa);
179 blkoff = erofs_blkoff(map.m_pa);
180
81781b02
GX
181 /* deal with inline page */
182 if (map.m_flags & EROFS_MAP_META) {
183 void *vsrc, *vto;
184 struct page *ipage;
185
9141b60c 186 DBG_BUGON(map.m_plen > PAGE_SIZE);
81781b02 187
e655b5b3 188 ipage = erofs_get_meta_page(inode->i_sb, blknr);
81781b02
GX
189
190 if (IS_ERR(ipage)) {
191 err = PTR_ERR(ipage);
192 goto err_out;
193 }
194
195 vsrc = kmap_atomic(ipage);
196 vto = kmap_atomic(page);
55441958 197 memcpy(vto, vsrc + blkoff, map.m_plen);
81781b02
GX
198 memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
199 kunmap_atomic(vto);
200 kunmap_atomic(vsrc);
201 flush_dcache_page(page);
202
203 SetPageUptodate(page);
204 /* TODO: could we unlock the page earlier? */
205 unlock_page(ipage);
206 put_page(ipage);
207
208 /* imply err = 0, see erofs_map_blocks */
209 goto has_updated;
210 }
211
212 /* pa must be block-aligned for raw reading */
9141b60c 213 DBG_BUGON(erofs_blkoff(map.m_pa));
81781b02
GX
214
215 /* max # of continuous pages */
216 if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
217 nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
218 if (nblocks > BIO_MAX_PAGES)
219 nblocks = BIO_MAX_PAGES;
220
618f40ea
GX
221 bio = bio_alloc(GFP_NOIO, nblocks);
222
223 bio->bi_end_io = erofs_readendio;
224 bio_set_dev(bio, sb->s_bdev);
225 bio->bi_iter.bi_sector = (sector_t)blknr <<
226 LOG_SECTORS_PER_BLOCK;
227 bio->bi_opf = REQ_OP_READ;
81781b02
GX
228 }
229
230 err = bio_add_page(bio, page, PAGE_SIZE, 0);
231 /* out of the extent or bio is full */
232 if (err < PAGE_SIZE)
233 goto submit_bio_retry;
234
235 *last_block = current_block;
236
237 /* shift in advance in case of it followed by too many gaps */
f4e97f5d 238 if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
81781b02
GX
239 /* err should reassign to 0 after submitting */
240 err = 0;
241 goto submit_bio_out;
242 }
243
244 return bio;
245
246err_out:
247 /* for sync reading, set page error immediately */
248 if (!ra) {
249 SetPageError(page);
250 ClearPageUptodate(page);
251 }
252has_updated:
253 unlock_page(page);
254
255 /* if updated manually, continuous pages has a gap */
d1ab8244 256 if (bio)
81781b02 257submit_bio_out:
94e4e153 258 submit_bio(bio);
8d8a09b0 259 return err ? ERR_PTR(err) : NULL;
81781b02
GX
260}
261
262/*
263 * since we dont have write or truncate flows, so no inode
264 * locking needs to be held at the moment.
265 */
266static int erofs_raw_access_readpage(struct file *file, struct page *page)
267{
268 erofs_off_t last_block;
269 struct bio *bio;
270
13f06f48
CY
271 trace_erofs_readpage(page, true);
272
81781b02 273 bio = erofs_read_raw_page(NULL, page->mapping,
f0950b02 274 page, &last_block, 1, false);
81781b02
GX
275
276 if (IS_ERR(bio))
277 return PTR_ERR(bio);
278
9141b60c 279 DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
81781b02
GX
280 return 0;
281}
282
283static int erofs_raw_access_readpages(struct file *filp,
f0950b02
BD
284 struct address_space *mapping,
285 struct list_head *pages,
286 unsigned int nr_pages)
81781b02
GX
287{
288 erofs_off_t last_block;
289 struct bio *bio = NULL;
290 gfp_t gfp = readahead_gfp_mask(mapping);
13f06f48
CY
291 struct page *page = list_last_entry(pages, struct page, lru);
292
293 trace_erofs_readpages(mapping->host, page, nr_pages, true);
81781b02
GX
294
295 for (; nr_pages; --nr_pages) {
13f06f48 296 page = list_entry(pages->prev, struct page, lru);
81781b02
GX
297
298 prefetchw(&page->flags);
299 list_del(&page->lru);
300
301 if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
302 bio = erofs_read_raw_page(bio, mapping, page,
f0950b02 303 &last_block, nr_pages, true);
81781b02
GX
304
305 /* all the page errors are ignored when readahead */
306 if (IS_ERR(bio)) {
307 pr_err("%s, readahead error at page %lu of nid %llu\n",
f0950b02 308 __func__, page->index,
a5876e24 309 EROFS_I(mapping->host)->nid);
81781b02
GX
310
311 bio = NULL;
312 }
313 }
314
315 /* pages could still be locked */
316 put_page(page);
317 }
9141b60c 318 DBG_BUGON(!list_empty(pages));
81781b02
GX
319
320 /* the rare case (end in gaps) */
8d8a09b0 321 if (bio)
94e4e153 322 submit_bio(bio);
81781b02
GX
323 return 0;
324}
325
9da681e0
CY
326static int erofs_get_block(struct inode *inode, sector_t iblock,
327 struct buffer_head *bh, int create)
328{
329 struct erofs_map_blocks map = {
330 .m_la = iblock << 9,
331 };
332 int err;
333
334 err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
335 if (err)
336 return err;
337
338 if (map.m_flags & EROFS_MAP_MAPPED)
339 bh->b_blocknr = erofs_blknr(map.m_pa);
340
341 return err;
342}
343
344static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
345{
346 struct inode *inode = mapping->host;
347
a5876e24 348 if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
9da681e0
CY
349 erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
350
351 if (block >> LOG_SECTORS_PER_BLOCK >= blks)
352 return 0;
353 }
354
355 return generic_block_bmap(mapping, block, erofs_get_block);
356}
357
81781b02
GX
358/* for uncompressed (aligned) files and raw access for other files */
359const struct address_space_operations erofs_raw_access_aops = {
360 .readpage = erofs_raw_access_readpage,
361 .readpages = erofs_raw_access_readpages,
9da681e0 362 .bmap = erofs_bmap,
81781b02
GX
363};
364