libfs: improve path_from_stashed()
[linux-block.git] / fs / gfs2 / aops.c
CommitLineData
7336d0e6 1// SPDX-License-Identifier: GPL-2.0-only
b3b94faa
DT
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
7eabb77e 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
5 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
fd88de56 13#include <linux/pagevec.h>
9b124fbb 14#include <linux/mpage.h>
d1665e41 15#include <linux/fs.h>
a8d638e3 16#include <linux/writeback.h>
7765ec26 17#include <linux/swap.h>
5c676f6d 18#include <linux/gfs2_ondisk.h>
47e83b50 19#include <linux/backing-dev.h>
e2e40f2c 20#include <linux/uio.h>
774016b2 21#include <trace/events/writeback.h>
64bc06bb 22#include <linux/sched/signal.h>
b3b94faa
DT
23
24#include "gfs2.h"
5c676f6d 25#include "incore.h"
b3b94faa
DT
26#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
b3b94faa
DT
29#include "log.h"
30#include "meta_io.h"
b3b94faa
DT
31#include "quota.h"
32#include "trans.h"
18ec7d5c 33#include "rgrp.h"
cd81a4ba 34#include "super.h"
5c676f6d 35#include "util.h"
4340fe62 36#include "glops.h"
64bc06bb 37#include "aops.h"
b3b94faa 38
ba7f7290 39
c1b0c3cf 40void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
285e0fc9 41 size_t from, size_t len)
ba7f7290 42{
c1b0c3cf 43 struct buffer_head *head = folio_buffers(folio);
ba7f7290
SW
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
285e0fc9
MWO
46 size_t to = from + len;
47 size_t start, end;
ba7f7290
SW
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
88b65ce5 52 if (end <= from)
ba7f7290 53 continue;
88b65ce5
AG
54 if (start >= to)
55 break;
845802b1 56 set_buffer_uptodate(bh);
350a9b0a 57 gfs2_trans_add_data(ip->i_gl, bh);
ba7f7290
SW
58 }
59}
60
b3b94faa 61/**
7a6bbacb 62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
b3b94faa
DT
63 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
7a6bbacb
SW
71static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
b3b94faa 73{
b3b94faa
DT
74 int error;
75
e9e1ef2b 76 error = gfs2_block_map(inode, lblock, bh_result, 0);
b3b94faa
DT
77 if (error)
78 return error;
de986e85 79 if (!buffer_mapped(bh_result))
4e79e3f0 80 return -ENODATA;
7a6bbacb 81 return 0;
b3b94faa
DT
82}
83
21b6924b 84/**
17bf23a9 85 * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio
c1401fd1 86 * @folio: The folio to write
21b6924b
BP
87 * @wbc: The writeback control
88 *
17bf23a9 89 * This is the same as calling block_write_full_folio, but it also
fd4c5748
BM
90 * writes pages outside of i_size
91 */
c1401fd1 92static int gfs2_write_jdata_folio(struct folio *folio,
21b6924b 93 struct writeback_control *wbc)
fd4c5748 94{
c1401fd1 95 struct inode * const inode = folio->mapping->host;
fd4c5748 96 loff_t i_size = i_size_read(inode);
fd4c5748
BM
97
98 /*
c1401fd1 99 * The folio straddles i_size. It must be zeroed out on each and every
fd4c5748
BM
100 * writepage invocation because it may be mmapped. "A file is mapped
101 * in multiples of the page size. For a file that is not a multiple of
c1401fd1 102 * the page size, the remaining memory is zeroed when mapped, and
fd4c5748
BM
103 * writes to that region are not written out to the file."
104 */
c1401fd1
MWO
105 if (folio_pos(folio) < i_size &&
106 i_size < folio_pos(folio) + folio_size(folio))
107 folio_zero_segment(folio, offset_in_folio(folio, i_size),
108 folio_size(folio));
fd4c5748 109
53418a18 110 return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
14059f66 111 wbc);
fd4c5748
BM
112}
113
b8e7cbb6 114/**
d0cfcaee
MWO
115 * __gfs2_jdata_write_folio - The core of jdata writepage
116 * @folio: The folio to write
b8e7cbb6
SW
117 * @wbc: The writeback control
118 *
119 * This is shared between writepage and writepages and implements the
120 * core of the writepage operation. If a transaction is required then
d0cfcaee 121 * the checked flag will have been set and the transaction will have
b8e7cbb6
SW
122 * already been started before this is called.
123 */
d0cfcaee
MWO
124static int __gfs2_jdata_write_folio(struct folio *folio,
125 struct writeback_control *wbc)
b8e7cbb6 126{
d0cfcaee 127 struct inode *inode = folio->mapping->host;
b8e7cbb6 128 struct gfs2_inode *ip = GFS2_I(inode);
b8e7cbb6 129
d0cfcaee
MWO
130 if (folio_test_checked(folio)) {
131 folio_clear_checked(folio);
132 if (!folio_buffers(folio)) {
0a88810d 133 create_empty_buffers(folio,
d0cfcaee
MWO
134 inode->i_sb->s_blocksize,
135 BIT(BH_Dirty)|BIT(BH_Uptodate));
b8e7cbb6 136 }
d0cfcaee 137 gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
b8e7cbb6 138 }
c1401fd1 139 return gfs2_write_jdata_folio(folio, wbc);
b8e7cbb6
SW
140}
141
9ff8ec32
SW
142/**
143 * gfs2_jdata_writepage - Write complete page
144 * @page: Page to write
1272574b 145 * @wbc: The writeback control
9ff8ec32
SW
146 *
147 * Returns: errno
148 *
149 */
150
151static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
152{
c0ba597d 153 struct folio *folio = page_folio(page);
9ff8ec32 154 struct inode *inode = page->mapping->host;
fd4c5748 155 struct gfs2_inode *ip = GFS2_I(inode);
9ff8ec32 156 struct gfs2_sbd *sdp = GFS2_SB(inode);
9ff8ec32 157
21d9067e 158 if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
fd4c5748 159 goto out;
c0ba597d 160 if (folio_test_checked(folio) || current->journal_info)
fd4c5748 161 goto out_ignore;
d0cfcaee 162 return __gfs2_jdata_write_folio(folio, wbc);
18ec7d5c
SW
163
164out_ignore:
c0ba597d 165 folio_redirty_for_writepage(wbc, folio);
fd4c5748 166out:
c0ba597d 167 folio_unlock(folio);
18ec7d5c 168 return 0;
b3b94faa
DT
169}
170
a8d638e3 171/**
45138990 172 * gfs2_writepages - Write a bunch of dirty pages back to disk
a8d638e3
SW
173 * @mapping: The mapping to write
174 * @wbc: Write-back control
175 *
45138990 176 * Used for both ordered and writeback modes.
a8d638e3 177 */
45138990
SW
178static int gfs2_writepages(struct address_space *mapping,
179 struct writeback_control *wbc)
a8d638e3 180{
b066a4ee 181 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
2164f9b9
CH
182 struct iomap_writepage_ctx wpc = { };
183 int ret;
b066a4ee
AD
184
185 /*
b74cd55a 186 * Even if we didn't write enough pages here, we might still be holding
b066a4ee
AD
187 * dirty pages in the ail. We forcibly flush the ail because we don't
188 * want balance_dirty_pages() to loop indefinitely trying to write out
189 * pages held in the ail that it can't find.
190 */
2164f9b9 191 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
b74cd55a 192 if (ret == 0 && wbc->nr_to_write > 0)
b066a4ee 193 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
b066a4ee 194 return ret;
a8d638e3
SW
195}
196
b8e7cbb6 197/**
87ed37e6 198 * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
b8e7cbb6
SW
199 * @mapping: The mapping
200 * @wbc: The writeback control
87ed37e6 201 * @fbatch: The batch of folios
1272574b 202 * @done_index: Page index
b8e7cbb6
SW
203 *
204 * Returns: non-zero if loop should terminate, zero otherwise
205 */
206
87ed37e6 207static int gfs2_write_jdata_batch(struct address_space *mapping,
b8e7cbb6 208 struct writeback_control *wbc,
87ed37e6 209 struct folio_batch *fbatch,
774016b2 210 pgoff_t *done_index)
b8e7cbb6
SW
211{
212 struct inode *inode = mapping->host;
213 struct gfs2_sbd *sdp = GFS2_SB(inode);
87ed37e6 214 unsigned nrblocks;
b8e7cbb6
SW
215 int i;
216 int ret;
d6d64dac 217 size_t size = 0;
87ed37e6
VMO
218 int nr_folios = folio_batch_count(fbatch);
219
220 for (i = 0; i < nr_folios; i++)
d6d64dac
AG
221 size += folio_size(fbatch->folios[i]);
222 nrblocks = size >> inode->i_blkbits;
b8e7cbb6 223
20b95bf2 224 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
b8e7cbb6
SW
225 if (ret < 0)
226 return ret;
227
87ed37e6
VMO
228 for (i = 0; i < nr_folios; i++) {
229 struct folio *folio = fbatch->folios[i];
b8e7cbb6 230
87ed37e6 231 *done_index = folio->index;
774016b2 232
87ed37e6 233 folio_lock(folio);
b8e7cbb6 234
87ed37e6 235 if (unlikely(folio->mapping != mapping)) {
774016b2 236continue_unlock:
87ed37e6 237 folio_unlock(folio);
b8e7cbb6
SW
238 continue;
239 }
240
87ed37e6 241 if (!folio_test_dirty(folio)) {
774016b2
SW
242 /* someone wrote it for us */
243 goto continue_unlock;
b8e7cbb6
SW
244 }
245
87ed37e6 246 if (folio_test_writeback(folio)) {
774016b2 247 if (wbc->sync_mode != WB_SYNC_NONE)
87ed37e6 248 folio_wait_writeback(folio);
774016b2
SW
249 else
250 goto continue_unlock;
b8e7cbb6
SW
251 }
252
87ed37e6
VMO
253 BUG_ON(folio_test_writeback(folio));
254 if (!folio_clear_dirty_for_io(folio))
774016b2
SW
255 goto continue_unlock;
256
de1414a6 257 trace_wbc_writepage(wbc, inode_to_bdi(inode));
b8e7cbb6 258
d0cfcaee 259 ret = __gfs2_jdata_write_folio(folio, wbc);
774016b2
SW
260 if (unlikely(ret)) {
261 if (ret == AOP_WRITEPAGE_ACTIVATE) {
87ed37e6 262 folio_unlock(folio);
774016b2
SW
263 ret = 0;
264 } else {
265
266 /*
267 * done_index is set past this page,
268 * so media errors will not choke
269 * background writeout for the entire
270 * file. This has consequences for
271 * range_cyclic semantics (ie. it may
272 * not be suitable for data integrity
273 * writeout).
274 */
5f02d168 275 *done_index = folio_next_index(folio);
774016b2
SW
276 ret = 1;
277 break;
278 }
279 }
b8e7cbb6 280
774016b2
SW
281 /*
282 * We stop writing back only if we are not doing
283 * integrity sync. In case of integrity sync we have to
284 * keep going until we have written all the pages
285 * we tagged for writeback prior to entering this loop.
286 */
287 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
b8e7cbb6 288 ret = 1;
774016b2
SW
289 break;
290 }
291
b8e7cbb6
SW
292 }
293 gfs2_trans_end(sdp);
294 return ret;
295}
296
297/**
298 * gfs2_write_cache_jdata - Like write_cache_pages but different
299 * @mapping: The mapping to write
300 * @wbc: The writeback control
b8e7cbb6
SW
301 *
302 * The reason that we use our own function here is that we need to
303 * start transactions before we grab page locks. This allows us
304 * to get the ordering right.
305 */
306
307static int gfs2_write_cache_jdata(struct address_space *mapping,
308 struct writeback_control *wbc)
309{
b8e7cbb6
SW
310 int ret = 0;
311 int done = 0;
87ed37e6
VMO
312 struct folio_batch fbatch;
313 int nr_folios;
3f649ab7 314 pgoff_t writeback_index;
b8e7cbb6
SW
315 pgoff_t index;
316 pgoff_t end;
774016b2
SW
317 pgoff_t done_index;
318 int cycled;
b8e7cbb6 319 int range_whole = 0;
10bbd235 320 xa_mark_t tag;
b8e7cbb6 321
87ed37e6 322 folio_batch_init(&fbatch);
b8e7cbb6 323 if (wbc->range_cyclic) {
774016b2
SW
324 writeback_index = mapping->writeback_index; /* prev offset */
325 index = writeback_index;
326 if (index == 0)
327 cycled = 1;
328 else
329 cycled = 0;
b8e7cbb6
SW
330 end = -1;
331 } else {
09cbfeaf
KS
332 index = wbc->range_start >> PAGE_SHIFT;
333 end = wbc->range_end >> PAGE_SHIFT;
b8e7cbb6
SW
334 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
335 range_whole = 1;
774016b2 336 cycled = 1; /* ignore range_cyclic tests */
b8e7cbb6 337 }
774016b2
SW
338 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
339 tag = PAGECACHE_TAG_TOWRITE;
340 else
341 tag = PAGECACHE_TAG_DIRTY;
b8e7cbb6
SW
342
343retry:
774016b2
SW
344 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
345 tag_pages_for_writeback(mapping, index, end);
346 done_index = index;
347 while (!done && (index <= end)) {
87ed37e6
VMO
348 nr_folios = filemap_get_folios_tag(mapping, &index, end,
349 tag, &fbatch);
350 if (nr_folios == 0)
774016b2
SW
351 break;
352
87ed37e6
VMO
353 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
354 &done_index);
b8e7cbb6
SW
355 if (ret)
356 done = 1;
357 if (ret > 0)
358 ret = 0;
87ed37e6 359 folio_batch_release(&fbatch);
b8e7cbb6
SW
360 cond_resched();
361 }
362
774016b2 363 if (!cycled && !done) {
b8e7cbb6 364 /*
774016b2 365 * range_cyclic:
b8e7cbb6
SW
366 * We hit the last page and there is more work to be done: wrap
367 * back to the start of the file
368 */
774016b2 369 cycled = 1;
b8e7cbb6 370 index = 0;
774016b2 371 end = writeback_index - 1;
b8e7cbb6
SW
372 goto retry;
373 }
374
375 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
774016b2
SW
376 mapping->writeback_index = done_index;
377
b8e7cbb6
SW
378 return ret;
379}
380
381
382/**
383 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
384 * @mapping: The mapping to write
385 * @wbc: The writeback control
386 *
387 */
388
389static int gfs2_jdata_writepages(struct address_space *mapping,
390 struct writeback_control *wbc)
391{
392 struct gfs2_inode *ip = GFS2_I(mapping->host);
393 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
394 int ret;
395
396 ret = gfs2_write_cache_jdata(mapping, wbc);
397 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
805c0907
BP
398 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
399 GFS2_LFC_JDATA_WPAGES);
b8e7cbb6
SW
400 ret = gfs2_write_cache_jdata(mapping, wbc);
401 }
402 return ret;
403}
404
b3b94faa 405/**
78c3c112 406 * stuffed_read_folio - Fill in a Linux folio with stuffed file data
b3b94faa 407 * @ip: the inode
7fa4964b 408 * @folio: the folio
b3b94faa
DT
409 *
410 * Returns: errno
411 */
78c3c112 412static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio)
b3b94faa 413{
78c3c112
MWO
414 struct buffer_head *dibh = NULL;
415 size_t dsize = i_size_read(&ip->i_inode);
416 void *from = NULL;
417 int error = 0;
b3b94faa 418
bf126aee 419 /*
3c18ddd1 420 * Due to the order of unstuffing files and ->fault(), we can be
7fa4964b 421 * asked for a zero folio in the case of a stuffed file being extended,
bf126aee
SW
422 * so we need to supply one here. It doesn't happen often.
423 */
7fa4964b 424 if (unlikely(folio->index)) {
78c3c112
MWO
425 dsize = 0;
426 } else {
427 error = gfs2_meta_inode_buffer(ip, &dibh);
428 if (error)
429 goto out;
430 from = dibh->b_data + sizeof(struct gfs2_dinode);
bf126aee 431 }
fd88de56 432
78c3c112 433 folio_fill_tail(folio, 0, from, dsize);
b3b94faa 434 brelse(dibh);
78c3c112
MWO
435out:
436 folio_end_read(folio, error == 0);
b3b94faa 437
78c3c112 438 return error;
b3b94faa
DT
439}
440
e9b5b23e
MWO
441/**
442 * gfs2_read_folio - read a folio from a file
443 * @file: The file to read
444 * @folio: The folio in the file
445 */
446static int gfs2_read_folio(struct file *file, struct folio *folio)
b3b94faa 447{
e9b5b23e 448 struct inode *inode = folio->mapping->host;
2164f9b9
CH
449 struct gfs2_inode *ip = GFS2_I(inode);
450 struct gfs2_sbd *sdp = GFS2_SB(inode);
b3b94faa
DT
451 int error;
452
2164f9b9 453 if (!gfs2_is_jdata(ip) ||
e9b5b23e 454 (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
7479c505 455 error = iomap_read_folio(folio, &gfs2_iomap_ops);
f95cbb44 456 } else if (gfs2_is_stuffed(ip)) {
78c3c112 457 error = stuffed_read_folio(ip, folio);
51ff87bd 458 } else {
f132ab7d 459 error = mpage_read_folio(folio, gfs2_block_map);
51ff87bd 460 }
b3b94faa 461
4d927b03 462 if (gfs2_withdrawing_or_withdrawn(sdp))
51ff87bd 463 return -EIO;
b3b94faa 464
51ff87bd
SW
465 return error;
466}
467
51ff87bd
SW
468/**
469 * gfs2_internal_read - read an internal file
470 * @ip: The gfs2 inode
51ff87bd
SW
471 * @buf: The buffer to fill
472 * @pos: The file position
473 * @size: The amount to read
474 *
475 */
476
be7f6a6b
AG
477ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
478 size_t size)
51ff87bd
SW
479{
480 struct address_space *mapping = ip->i_inode.i_mapping;
45eb0504 481 unsigned long index = *pos >> PAGE_SHIFT;
be7f6a6b 482 size_t copied = 0;
51ff87bd
SW
483
484 do {
be7f6a6b
AG
485 size_t offset, chunk;
486 struct folio *folio;
487
488 folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
489 if (IS_ERR(folio)) {
490 if (PTR_ERR(folio) == -EINTR)
cea44032 491 continue;
be7f6a6b 492 return PTR_ERR(folio);
cea44032 493 }
be7f6a6b
AG
494 offset = *pos + copied - folio_pos(folio);
495 chunk = min(size - copied, folio_size(folio) - offset);
496 memcpy_from_folio(buf + copied, folio, offset, chunk);
497 index = folio_next_index(folio);
498 folio_put(folio);
499 copied += chunk;
51ff87bd
SW
500 } while(copied < size);
501 (*pos) += size;
502 return size;
fd88de56
SW
503}
504
fd88de56 505/**
d4388340 506 * gfs2_readahead - Read a bunch of pages at once
c551f66c 507 * @rac: Read-ahead control structure
fd88de56
SW
508 *
509 * Some notes:
510 * 1. This is only for readahead, so we can simply ignore any things
511 * which are slightly inconvenient (such as locking conflicts between
512 * the page lock and the glock) and return having done no I/O. Its
513 * obviously not something we'd want to do on too regular a basis.
514 * Any I/O we ignore at this time will be done via readpage later.
e1d5b18a 515 * 2. We don't handle stuffed files here we let readpage do the honours.
d4388340 516 * 3. mpage_readahead() does most of the heavy lifting in the common case.
e9e1ef2b 517 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
fd88de56 518 */
3cc3f710 519
d4388340 520static void gfs2_readahead(struct readahead_control *rac)
fd88de56 521{
d4388340 522 struct inode *inode = rac->mapping->host;
feaa7bba 523 struct gfs2_inode *ip = GFS2_I(inode);
fd88de56 524
2164f9b9
CH
525 if (gfs2_is_stuffed(ip))
526 ;
527 else if (gfs2_is_jdata(ip))
d4388340 528 mpage_readahead(rac, gfs2_block_map);
2164f9b9
CH
529 else
530 iomap_readahead(rac, &gfs2_iomap_ops);
b3b94faa
DT
531}
532
7ae8fa84
RP
533/**
534 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
535 * @inode: the rindex inode
536 */
64bc06bb 537void adjust_fs_space(struct inode *inode)
7ae8fa84 538{
d0a22a4b 539 struct gfs2_sbd *sdp = GFS2_SB(inode);
1946f70a 540 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
7ae8fa84
RP
541 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
542 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
70c11ba8 543 struct buffer_head *m_bh;
7ae8fa84
RP
544 u64 fs_total, new_free;
545
d0a22a4b
AG
546 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
547 return;
548
7ae8fa84
RP
549 /* Total up the file system space, according to the latest rindex. */
550 fs_total = gfs2_ri_total(sdp);
1946f70a 551 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
d0a22a4b 552 goto out;
7ae8fa84
RP
553
554 spin_lock(&sdp->sd_statfs_spin);
1946f70a
BM
555 gfs2_statfs_change_in(m_sc, m_bh->b_data +
556 sizeof(struct gfs2_dinode));
7ae8fa84
RP
557 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
558 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
559 else
560 new_free = 0;
561 spin_unlock(&sdp->sd_statfs_spin);
6c53267f
RP
562 fs_warn(sdp, "File system extended by %llu blocks.\n",
563 (unsigned long long)new_free);
7ae8fa84 564 gfs2_statfs_change(sdp, new_free, new_free, 0);
1946f70a 565
70c11ba8 566 update_statfs(sdp, m_bh);
1946f70a 567 brelse(m_bh);
d0a22a4b
AG
568out:
569 sdp->sd_rindex_uptodate = 0;
570 gfs2_trans_end(sdp);
7ae8fa84
RP
571}
572
e621900a
MWO
573static bool jdata_dirty_folio(struct address_space *mapping,
574 struct folio *folio)
8fb68595 575{
6302d6f4 576 if (current->journal_info)
e621900a
MWO
577 folio_set_checked(folio);
578 return block_dirty_folio(mapping, folio);
8fb68595
RP
579}
580
b3b94faa
DT
581/**
582 * gfs2_bmap - Block map function
583 * @mapping: Address space info
584 * @lblock: The block to map
585 *
586 * Returns: The disk address for the block or 0 on hole or error
587 */
588
589static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
590{
feaa7bba 591 struct gfs2_inode *ip = GFS2_I(mapping->host);
b3b94faa
DT
592 struct gfs2_holder i_gh;
593 sector_t dblock = 0;
594 int error;
595
b3b94faa
DT
596 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
597 if (error)
598 return 0;
599
600 if (!gfs2_is_stuffed(ip))
7770c93a 601 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
b3b94faa
DT
602
603 gfs2_glock_dq_uninit(&i_gh);
604
605 return dblock;
606}
607
d7b616e2
SW
608static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
609{
610 struct gfs2_bufdata *bd;
611
612 lock_buffer(bh);
613 gfs2_log_lock(sdp);
614 clear_buffer_dirty(bh);
615 bd = bh->b_private;
616 if (bd) {
c0752aa7
BP
617 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
618 list_del_init(&bd->bd_list);
68942870
BP
619 else {
620 spin_lock(&sdp->sd_ail_lock);
68cd4ce2 621 gfs2_remove_from_journal(bh, REMOVE_JDATA);
68942870
BP
622 spin_unlock(&sdp->sd_ail_lock);
623 }
d7b616e2
SW
624 }
625 bh->b_bdev = NULL;
626 clear_buffer_mapped(bh);
627 clear_buffer_req(bh);
628 clear_buffer_new(bh);
629 gfs2_log_unlock(sdp);
630 unlock_buffer(bh);
631}
632
5f4b2976
MWO
633static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
634 size_t length)
b3b94faa 635{
5f4b2976
MWO
636 struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
637 size_t stop = offset + length;
638 int partial_page = (offset || length < folio_size(folio));
d7b616e2
SW
639 struct buffer_head *bh, *head;
640 unsigned long pos = 0;
641
5f4b2976 642 BUG_ON(!folio_test_locked(folio));
5c0bb97c 643 if (!partial_page)
5f4b2976
MWO
644 folio_clear_checked(folio);
645 head = folio_buffers(folio);
646 if (!head)
d7b616e2 647 goto out;
b3b94faa 648
5f4b2976 649 bh = head;
d7b616e2 650 do {
5c0bb97c
LC
651 if (pos + bh->b_size > stop)
652 return;
653
d7b616e2
SW
654 if (offset <= pos)
655 gfs2_discard(sdp, bh);
656 pos += bh->b_size;
657 bh = bh->b_this_page;
658 } while (bh != head);
659out:
5c0bb97c 660 if (!partial_page)
5f4b2976 661 filemap_release_folio(folio, 0);
b3b94faa
DT
662}
663
4340fe62 664/**
e45c20d1
MWO
665 * gfs2_release_folio - free the metadata associated with a folio
666 * @folio: the folio that's being released
4340fe62
SW
667 * @gfp_mask: passed from Linux VFS, ignored by us
668 *
e45c20d1 669 * Calls try_to_free_buffers() to free the buffers and put the folio if the
0ebbe4f9 670 * buffers can be released.
4340fe62 671 *
e45c20d1 672 * Returns: true if the folio was put or else false
4340fe62
SW
673 */
674
e45c20d1 675bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
4340fe62 676{
e45c20d1 677 struct address_space *mapping = folio->mapping;
009d8518 678 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
4340fe62
SW
679 struct buffer_head *bh, *head;
680 struct gfs2_bufdata *bd;
4340fe62 681
e45c20d1
MWO
682 head = folio_buffers(folio);
683 if (!head)
684 return false;
4340fe62 685
1c185c02 686 /*
e45c20d1
MWO
687 * mm accommodates an old ext3 case where clean folios might
688 * not have had the dirty bit cleared. Thus, it can send actual
689 * dirty folios to ->release_folio() via shrink_active_list().
1c185c02 690 *
e45c20d1
MWO
691 * As a workaround, we skip folios that contain dirty buffers
692 * below. Once ->release_folio isn't called on dirty folios
693 * anymore, we can warn on dirty buffers like we used to here
694 * again.
1c185c02
AG
695 */
696
bb3b0e3d 697 gfs2_log_lock(sdp);
e45c20d1 698 bh = head;
4340fe62 699 do {
bb3b0e3d
SW
700 if (atomic_read(&bh->b_count))
701 goto cannot_release;
702 bd = bh->b_private;
16ca9412 703 if (bd && bd->bd_tr)
bb3b0e3d 704 goto cannot_release;
1c185c02
AG
705 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
706 goto cannot_release;
bb3b0e3d 707 bh = bh->b_this_page;
e45c20d1 708 } while (bh != head);
4340fe62 709
e45c20d1 710 bh = head;
bb3b0e3d 711 do {
4340fe62
SW
712 bd = bh->b_private;
713 if (bd) {
714 gfs2_assert_warn(sdp, bd->bd_bh == bh);
e4f29206 715 bd->bd_bh = NULL;
4340fe62 716 bh->b_private = NULL;
019dd669
BP
717 /*
718 * The bd may still be queued as a revoke, in which
719 * case we must not dequeue nor free it.
720 */
721 if (!bd->bd_blkno && !list_empty(&bd->bd_list))
722 list_del_init(&bd->bd_list);
723 if (list_empty(&bd->bd_list))
724 kmem_cache_free(gfs2_bufdata_cachep, bd);
e4f29206 725 }
4340fe62
SW
726
727 bh = bh->b_this_page;
166afccd 728 } while (bh != head);
e4f29206 729 gfs2_log_unlock(sdp);
4340fe62 730
68189fef 731 return try_to_free_buffers(folio);
8f065d36 732
bb3b0e3d
SW
733cannot_release:
734 gfs2_log_unlock(sdp);
e45c20d1 735 return false;
4340fe62
SW
736}
737
eadd7535 738static const struct address_space_operations gfs2_aops = {
45138990 739 .writepages = gfs2_writepages,
f132ab7d 740 .read_folio = gfs2_read_folio,
d4388340 741 .readahead = gfs2_readahead,
4ce02c67 742 .dirty_folio = iomap_dirty_folio,
8597447d 743 .release_folio = iomap_release_folio,
d82354f6 744 .invalidate_folio = iomap_invalidate_folio,
5561093e 745 .bmap = gfs2_bmap,
2ec810d5 746 .migrate_folio = filemap_migrate_folio,
2164f9b9 747 .is_partially_uptodate = iomap_is_partially_uptodate,
af7628d6 748 .error_remove_folio = generic_error_remove_folio,
5561093e
SW
749};
750
5561093e 751static const struct address_space_operations gfs2_jdata_aops = {
9ff8ec32 752 .writepage = gfs2_jdata_writepage,
b8e7cbb6 753 .writepages = gfs2_jdata_writepages,
f132ab7d 754 .read_folio = gfs2_read_folio,
d4388340 755 .readahead = gfs2_readahead,
e621900a 756 .dirty_folio = jdata_dirty_folio,
5561093e 757 .bmap = gfs2_bmap,
5f4b2976 758 .invalidate_folio = gfs2_invalidate_folio,
e45c20d1 759 .release_folio = gfs2_release_folio,
229615de 760 .is_partially_uptodate = block_is_partially_uptodate,
af7628d6 761 .error_remove_folio = generic_error_remove_folio,
5561093e
SW
762};
763
764void gfs2_set_aops(struct inode *inode)
765{
eadd7535 766 if (gfs2_is_jdata(GFS2_I(inode)))
977767a7 767 inode->i_mapping->a_ops = &gfs2_jdata_aops;
5561093e 768 else
eadd7535 769 inode->i_mapping->a_ops = &gfs2_aops;
5561093e 770}