buffer: remove folio_create_empty_buffers()
[linux-block.git] / fs / gfs2 / aops.c
CommitLineData
7336d0e6 1// SPDX-License-Identifier: GPL-2.0-only
b3b94faa
DT
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
7eabb77e 4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
5 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
fd88de56 13#include <linux/pagevec.h>
9b124fbb 14#include <linux/mpage.h>
d1665e41 15#include <linux/fs.h>
a8d638e3 16#include <linux/writeback.h>
7765ec26 17#include <linux/swap.h>
5c676f6d 18#include <linux/gfs2_ondisk.h>
47e83b50 19#include <linux/backing-dev.h>
e2e40f2c 20#include <linux/uio.h>
774016b2 21#include <trace/events/writeback.h>
64bc06bb 22#include <linux/sched/signal.h>
b3b94faa
DT
23
24#include "gfs2.h"
5c676f6d 25#include "incore.h"
b3b94faa
DT
26#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
b3b94faa
DT
29#include "log.h"
30#include "meta_io.h"
b3b94faa
DT
31#include "quota.h"
32#include "trans.h"
18ec7d5c 33#include "rgrp.h"
cd81a4ba 34#include "super.h"
5c676f6d 35#include "util.h"
4340fe62 36#include "glops.h"
64bc06bb 37#include "aops.h"
b3b94faa 38
ba7f7290 39
c1b0c3cf 40void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
285e0fc9 41 size_t from, size_t len)
ba7f7290 42{
c1b0c3cf 43 struct buffer_head *head = folio_buffers(folio);
ba7f7290
SW
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
285e0fc9
MWO
46 size_t to = from + len;
47 size_t start, end;
ba7f7290
SW
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
88b65ce5 52 if (end <= from)
ba7f7290 53 continue;
88b65ce5
AG
54 if (start >= to)
55 break;
845802b1 56 set_buffer_uptodate(bh);
350a9b0a 57 gfs2_trans_add_data(ip->i_gl, bh);
ba7f7290
SW
58 }
59}
60
b3b94faa 61/**
7a6bbacb 62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
b3b94faa
DT
63 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
7a6bbacb
SW
71static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
b3b94faa 73{
b3b94faa
DT
74 int error;
75
e9e1ef2b 76 error = gfs2_block_map(inode, lblock, bh_result, 0);
b3b94faa
DT
77 if (error)
78 return error;
de986e85 79 if (!buffer_mapped(bh_result))
4e79e3f0 80 return -ENODATA;
7a6bbacb 81 return 0;
b3b94faa
DT
82}
83
21b6924b 84/**
c1401fd1
MWO
85 * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_page
86 * @folio: The folio to write
21b6924b
BP
87 * @wbc: The writeback control
88 *
89 * This is the same as calling block_write_full_page, but it also
fd4c5748
BM
90 * writes pages outside of i_size
91 */
c1401fd1 92static int gfs2_write_jdata_folio(struct folio *folio,
21b6924b 93 struct writeback_control *wbc)
fd4c5748 94{
c1401fd1 95 struct inode * const inode = folio->mapping->host;
fd4c5748 96 loff_t i_size = i_size_read(inode);
fd4c5748
BM
97
98 /*
c1401fd1 99 * The folio straddles i_size. It must be zeroed out on each and every
fd4c5748
BM
100 * writepage invocation because it may be mmapped. "A file is mapped
101 * in multiples of the page size. For a file that is not a multiple of
c1401fd1 102 * the page size, the remaining memory is zeroed when mapped, and
fd4c5748
BM
103 * writes to that region are not written out to the file."
104 */
c1401fd1
MWO
105 if (folio_pos(folio) < i_size &&
106 i_size < folio_pos(folio) + folio_size(folio))
107 folio_zero_segment(folio, offset_in_folio(folio, i_size),
108 folio_size(folio));
fd4c5748 109
53418a18
MWO
110 return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
111 wbc, end_buffer_async_write);
fd4c5748
BM
112}
113
b8e7cbb6 114/**
d0cfcaee
MWO
115 * __gfs2_jdata_write_folio - The core of jdata writepage
116 * @folio: The folio to write
b8e7cbb6
SW
117 * @wbc: The writeback control
118 *
119 * This is shared between writepage and writepages and implements the
120 * core of the writepage operation. If a transaction is required then
d0cfcaee 121 * the checked flag will have been set and the transaction will have
b8e7cbb6
SW
122 * already been started before this is called.
123 */
d0cfcaee
MWO
124static int __gfs2_jdata_write_folio(struct folio *folio,
125 struct writeback_control *wbc)
b8e7cbb6 126{
d0cfcaee 127 struct inode *inode = folio->mapping->host;
b8e7cbb6 128 struct gfs2_inode *ip = GFS2_I(inode);
b8e7cbb6 129
d0cfcaee
MWO
130 if (folio_test_checked(folio)) {
131 folio_clear_checked(folio);
132 if (!folio_buffers(folio)) {
0a88810d 133 create_empty_buffers(folio,
d0cfcaee
MWO
134 inode->i_sb->s_blocksize,
135 BIT(BH_Dirty)|BIT(BH_Uptodate));
b8e7cbb6 136 }
d0cfcaee 137 gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
b8e7cbb6 138 }
c1401fd1 139 return gfs2_write_jdata_folio(folio, wbc);
b8e7cbb6
SW
140}
141
9ff8ec32
SW
142/**
143 * gfs2_jdata_writepage - Write complete page
144 * @page: Page to write
1272574b 145 * @wbc: The writeback control
9ff8ec32
SW
146 *
147 * Returns: errno
148 *
149 */
150
151static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
152{
c0ba597d 153 struct folio *folio = page_folio(page);
9ff8ec32 154 struct inode *inode = page->mapping->host;
fd4c5748 155 struct gfs2_inode *ip = GFS2_I(inode);
9ff8ec32 156 struct gfs2_sbd *sdp = GFS2_SB(inode);
9ff8ec32 157
fd4c5748
BM
158 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
159 goto out;
c0ba597d 160 if (folio_test_checked(folio) || current->journal_info)
fd4c5748 161 goto out_ignore;
d0cfcaee 162 return __gfs2_jdata_write_folio(folio, wbc);
18ec7d5c
SW
163
164out_ignore:
c0ba597d 165 folio_redirty_for_writepage(wbc, folio);
fd4c5748 166out:
c0ba597d 167 folio_unlock(folio);
18ec7d5c 168 return 0;
b3b94faa
DT
169}
170
a8d638e3 171/**
45138990 172 * gfs2_writepages - Write a bunch of dirty pages back to disk
a8d638e3
SW
173 * @mapping: The mapping to write
174 * @wbc: Write-back control
175 *
45138990 176 * Used for both ordered and writeback modes.
a8d638e3 177 */
45138990
SW
178static int gfs2_writepages(struct address_space *mapping,
179 struct writeback_control *wbc)
a8d638e3 180{
b066a4ee 181 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
2164f9b9
CH
182 struct iomap_writepage_ctx wpc = { };
183 int ret;
b066a4ee
AD
184
185 /*
b74cd55a 186 * Even if we didn't write enough pages here, we might still be holding
b066a4ee
AD
187 * dirty pages in the ail. We forcibly flush the ail because we don't
188 * want balance_dirty_pages() to loop indefinitely trying to write out
189 * pages held in the ail that it can't find.
190 */
2164f9b9 191 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
b74cd55a 192 if (ret == 0 && wbc->nr_to_write > 0)
b066a4ee 193 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
b066a4ee 194 return ret;
a8d638e3
SW
195}
196
b8e7cbb6 197/**
87ed37e6 198 * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
b8e7cbb6
SW
199 * @mapping: The mapping
200 * @wbc: The writeback control
87ed37e6 201 * @fbatch: The batch of folios
1272574b 202 * @done_index: Page index
b8e7cbb6
SW
203 *
204 * Returns: non-zero if loop should terminate, zero otherwise
205 */
206
87ed37e6 207static int gfs2_write_jdata_batch(struct address_space *mapping,
b8e7cbb6 208 struct writeback_control *wbc,
87ed37e6 209 struct folio_batch *fbatch,
774016b2 210 pgoff_t *done_index)
b8e7cbb6
SW
211{
212 struct inode *inode = mapping->host;
213 struct gfs2_sbd *sdp = GFS2_SB(inode);
87ed37e6 214 unsigned nrblocks;
b8e7cbb6
SW
215 int i;
216 int ret;
87ed37e6
VMO
217 int nr_pages = 0;
218 int nr_folios = folio_batch_count(fbatch);
219
220 for (i = 0; i < nr_folios; i++)
221 nr_pages += folio_nr_pages(fbatch->folios[i]);
222 nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
b8e7cbb6 223
20b95bf2 224 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
b8e7cbb6
SW
225 if (ret < 0)
226 return ret;
227
87ed37e6
VMO
228 for (i = 0; i < nr_folios; i++) {
229 struct folio *folio = fbatch->folios[i];
b8e7cbb6 230
87ed37e6 231 *done_index = folio->index;
774016b2 232
87ed37e6 233 folio_lock(folio);
b8e7cbb6 234
87ed37e6 235 if (unlikely(folio->mapping != mapping)) {
774016b2 236continue_unlock:
87ed37e6 237 folio_unlock(folio);
b8e7cbb6
SW
238 continue;
239 }
240
87ed37e6 241 if (!folio_test_dirty(folio)) {
774016b2
SW
242 /* someone wrote it for us */
243 goto continue_unlock;
b8e7cbb6
SW
244 }
245
87ed37e6 246 if (folio_test_writeback(folio)) {
774016b2 247 if (wbc->sync_mode != WB_SYNC_NONE)
87ed37e6 248 folio_wait_writeback(folio);
774016b2
SW
249 else
250 goto continue_unlock;
b8e7cbb6
SW
251 }
252
87ed37e6
VMO
253 BUG_ON(folio_test_writeback(folio));
254 if (!folio_clear_dirty_for_io(folio))
774016b2
SW
255 goto continue_unlock;
256
de1414a6 257 trace_wbc_writepage(wbc, inode_to_bdi(inode));
b8e7cbb6 258
d0cfcaee 259 ret = __gfs2_jdata_write_folio(folio, wbc);
774016b2
SW
260 if (unlikely(ret)) {
261 if (ret == AOP_WRITEPAGE_ACTIVATE) {
87ed37e6 262 folio_unlock(folio);
774016b2
SW
263 ret = 0;
264 } else {
265
266 /*
267 * done_index is set past this page,
268 * so media errors will not choke
269 * background writeout for the entire
270 * file. This has consequences for
271 * range_cyclic semantics (ie. it may
272 * not be suitable for data integrity
273 * writeout).
274 */
5f02d168 275 *done_index = folio_next_index(folio);
774016b2
SW
276 ret = 1;
277 break;
278 }
279 }
b8e7cbb6 280
774016b2
SW
281 /*
282 * We stop writing back only if we are not doing
283 * integrity sync. In case of integrity sync we have to
284 * keep going until we have written all the pages
285 * we tagged for writeback prior to entering this loop.
286 */
287 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
b8e7cbb6 288 ret = 1;
774016b2
SW
289 break;
290 }
291
b8e7cbb6
SW
292 }
293 gfs2_trans_end(sdp);
294 return ret;
295}
296
297/**
298 * gfs2_write_cache_jdata - Like write_cache_pages but different
299 * @mapping: The mapping to write
300 * @wbc: The writeback control
b8e7cbb6
SW
301 *
302 * The reason that we use our own function here is that we need to
303 * start transactions before we grab page locks. This allows us
304 * to get the ordering right.
305 */
306
307static int gfs2_write_cache_jdata(struct address_space *mapping,
308 struct writeback_control *wbc)
309{
b8e7cbb6
SW
310 int ret = 0;
311 int done = 0;
87ed37e6
VMO
312 struct folio_batch fbatch;
313 int nr_folios;
3f649ab7 314 pgoff_t writeback_index;
b8e7cbb6
SW
315 pgoff_t index;
316 pgoff_t end;
774016b2
SW
317 pgoff_t done_index;
318 int cycled;
b8e7cbb6 319 int range_whole = 0;
10bbd235 320 xa_mark_t tag;
b8e7cbb6 321
87ed37e6 322 folio_batch_init(&fbatch);
b8e7cbb6 323 if (wbc->range_cyclic) {
774016b2
SW
324 writeback_index = mapping->writeback_index; /* prev offset */
325 index = writeback_index;
326 if (index == 0)
327 cycled = 1;
328 else
329 cycled = 0;
b8e7cbb6
SW
330 end = -1;
331 } else {
09cbfeaf
KS
332 index = wbc->range_start >> PAGE_SHIFT;
333 end = wbc->range_end >> PAGE_SHIFT;
b8e7cbb6
SW
334 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
335 range_whole = 1;
774016b2 336 cycled = 1; /* ignore range_cyclic tests */
b8e7cbb6 337 }
774016b2
SW
338 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
339 tag = PAGECACHE_TAG_TOWRITE;
340 else
341 tag = PAGECACHE_TAG_DIRTY;
b8e7cbb6
SW
342
343retry:
774016b2
SW
344 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
345 tag_pages_for_writeback(mapping, index, end);
346 done_index = index;
347 while (!done && (index <= end)) {
87ed37e6
VMO
348 nr_folios = filemap_get_folios_tag(mapping, &index, end,
349 tag, &fbatch);
350 if (nr_folios == 0)
774016b2
SW
351 break;
352
87ed37e6
VMO
353 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
354 &done_index);
b8e7cbb6
SW
355 if (ret)
356 done = 1;
357 if (ret > 0)
358 ret = 0;
87ed37e6 359 folio_batch_release(&fbatch);
b8e7cbb6
SW
360 cond_resched();
361 }
362
774016b2 363 if (!cycled && !done) {
b8e7cbb6 364 /*
774016b2 365 * range_cyclic:
b8e7cbb6
SW
366 * We hit the last page and there is more work to be done: wrap
367 * back to the start of the file
368 */
774016b2 369 cycled = 1;
b8e7cbb6 370 index = 0;
774016b2 371 end = writeback_index - 1;
b8e7cbb6
SW
372 goto retry;
373 }
374
375 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
774016b2
SW
376 mapping->writeback_index = done_index;
377
b8e7cbb6
SW
378 return ret;
379}
380
381
382/**
383 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
384 * @mapping: The mapping to write
385 * @wbc: The writeback control
386 *
387 */
388
389static int gfs2_jdata_writepages(struct address_space *mapping,
390 struct writeback_control *wbc)
391{
392 struct gfs2_inode *ip = GFS2_I(mapping->host);
393 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
394 int ret;
395
396 ret = gfs2_write_cache_jdata(mapping, wbc);
397 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
805c0907
BP
398 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
399 GFS2_LFC_JDATA_WPAGES);
b8e7cbb6
SW
400 ret = gfs2_write_cache_jdata(mapping, wbc);
401 }
402 return ret;
403}
404
b3b94faa
DT
405/**
406 * stuffed_readpage - Fill in a Linux page with stuffed file data
407 * @ip: the inode
408 * @page: the page
409 *
410 * Returns: errno
411 */
378b6cbf 412static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
b3b94faa
DT
413{
414 struct buffer_head *dibh;
602c89d2 415 u64 dsize = i_size_read(&ip->i_inode);
b3b94faa
DT
416 void *kaddr;
417 int error;
418
bf126aee 419 /*
3c18ddd1 420 * Due to the order of unstuffing files and ->fault(), we can be
bf126aee
SW
421 * asked for a zero page in the case of a stuffed file being extended,
422 * so we need to supply one here. It doesn't happen often.
423 */
424 if (unlikely(page->index)) {
09cbfeaf 425 zero_user(page, 0, PAGE_SIZE);
0a7ab79c 426 SetPageUptodate(page);
bf126aee
SW
427 return 0;
428 }
fd88de56 429
b3b94faa
DT
430 error = gfs2_meta_inode_buffer(ip, &dibh);
431 if (error)
432 return error;
433
58721bd4 434 kaddr = kmap_local_page(page);
602c89d2 435 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
09cbfeaf 436 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
58721bd4 437 kunmap_local(kaddr);
bf126aee 438 flush_dcache_page(page);
b3b94faa 439 brelse(dibh);
b3b94faa
DT
440 SetPageUptodate(page);
441
442 return 0;
443}
444
e9b5b23e
MWO
445/**
446 * gfs2_read_folio - read a folio from a file
447 * @file: The file to read
448 * @folio: The folio in the file
449 */
450static int gfs2_read_folio(struct file *file, struct folio *folio)
b3b94faa 451{
e9b5b23e 452 struct inode *inode = folio->mapping->host;
2164f9b9
CH
453 struct gfs2_inode *ip = GFS2_I(inode);
454 struct gfs2_sbd *sdp = GFS2_SB(inode);
b3b94faa
DT
455 int error;
456
2164f9b9 457 if (!gfs2_is_jdata(ip) ||
e9b5b23e 458 (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
7479c505 459 error = iomap_read_folio(folio, &gfs2_iomap_ops);
f95cbb44 460 } else if (gfs2_is_stuffed(ip)) {
e9b5b23e
MWO
461 error = stuffed_readpage(ip, &folio->page);
462 folio_unlock(folio);
51ff87bd 463 } else {
f132ab7d 464 error = mpage_read_folio(folio, gfs2_block_map);
51ff87bd 465 }
b3b94faa 466
eb43e660 467 if (unlikely(gfs2_withdrawn(sdp)))
51ff87bd 468 return -EIO;
b3b94faa 469
51ff87bd
SW
470 return error;
471}
472
51ff87bd
SW
473/**
474 * gfs2_internal_read - read an internal file
475 * @ip: The gfs2 inode
51ff87bd
SW
476 * @buf: The buffer to fill
477 * @pos: The file position
478 * @size: The amount to read
479 *
480 */
481
4306629e
AP
482int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
483 unsigned size)
51ff87bd
SW
484{
485 struct address_space *mapping = ip->i_inode.i_mapping;
45eb0504 486 unsigned long index = *pos >> PAGE_SHIFT;
09cbfeaf 487 unsigned offset = *pos & (PAGE_SIZE - 1);
51ff87bd
SW
488 unsigned copied = 0;
489 unsigned amt;
490 struct page *page;
51ff87bd
SW
491
492 do {
e9b5b23e 493 page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
cea44032
AG
494 if (IS_ERR(page)) {
495 if (PTR_ERR(page) == -EINTR)
496 continue;
51ff87bd 497 return PTR_ERR(page);
cea44032 498 }
51ff87bd 499 amt = size - copied;
09cbfeaf
KS
500 if (offset + size > PAGE_SIZE)
501 amt = PAGE_SIZE - offset;
d68d0c6c 502 memcpy_from_page(buf + copied, page, offset, amt);
09cbfeaf 503 put_page(page);
51ff87bd
SW
504 copied += amt;
505 index++;
506 offset = 0;
507 } while(copied < size);
508 (*pos) += size;
509 return size;
fd88de56
SW
510}
511
fd88de56 512/**
d4388340 513 * gfs2_readahead - Read a bunch of pages at once
c551f66c 514 * @rac: Read-ahead control structure
fd88de56
SW
515 *
516 * Some notes:
517 * 1. This is only for readahead, so we can simply ignore any things
518 * which are slightly inconvenient (such as locking conflicts between
519 * the page lock and the glock) and return having done no I/O. Its
520 * obviously not something we'd want to do on too regular a basis.
521 * Any I/O we ignore at this time will be done via readpage later.
e1d5b18a 522 * 2. We don't handle stuffed files here we let readpage do the honours.
d4388340 523 * 3. mpage_readahead() does most of the heavy lifting in the common case.
e9e1ef2b 524 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
fd88de56 525 */
3cc3f710 526
d4388340 527static void gfs2_readahead(struct readahead_control *rac)
fd88de56 528{
d4388340 529 struct inode *inode = rac->mapping->host;
feaa7bba 530 struct gfs2_inode *ip = GFS2_I(inode);
fd88de56 531
2164f9b9
CH
532 if (gfs2_is_stuffed(ip))
533 ;
534 else if (gfs2_is_jdata(ip))
d4388340 535 mpage_readahead(rac, gfs2_block_map);
2164f9b9
CH
536 else
537 iomap_readahead(rac, &gfs2_iomap_ops);
b3b94faa
DT
538}
539
7ae8fa84
RP
540/**
541 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
542 * @inode: the rindex inode
543 */
64bc06bb 544void adjust_fs_space(struct inode *inode)
7ae8fa84 545{
d0a22a4b 546 struct gfs2_sbd *sdp = GFS2_SB(inode);
1946f70a 547 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
7ae8fa84
RP
548 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
549 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
70c11ba8 550 struct buffer_head *m_bh;
7ae8fa84
RP
551 u64 fs_total, new_free;
552
d0a22a4b
AG
553 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
554 return;
555
7ae8fa84
RP
556 /* Total up the file system space, according to the latest rindex. */
557 fs_total = gfs2_ri_total(sdp);
1946f70a 558 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
d0a22a4b 559 goto out;
7ae8fa84
RP
560
561 spin_lock(&sdp->sd_statfs_spin);
1946f70a
BM
562 gfs2_statfs_change_in(m_sc, m_bh->b_data +
563 sizeof(struct gfs2_dinode));
7ae8fa84
RP
564 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
565 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
566 else
567 new_free = 0;
568 spin_unlock(&sdp->sd_statfs_spin);
6c53267f
RP
569 fs_warn(sdp, "File system extended by %llu blocks.\n",
570 (unsigned long long)new_free);
7ae8fa84 571 gfs2_statfs_change(sdp, new_free, new_free, 0);
1946f70a 572
70c11ba8 573 update_statfs(sdp, m_bh);
1946f70a 574 brelse(m_bh);
d0a22a4b
AG
575out:
576 sdp->sd_rindex_uptodate = 0;
577 gfs2_trans_end(sdp);
7ae8fa84
RP
578}
579
e621900a
MWO
580static bool jdata_dirty_folio(struct address_space *mapping,
581 struct folio *folio)
8fb68595 582{
6302d6f4 583 if (current->journal_info)
e621900a
MWO
584 folio_set_checked(folio);
585 return block_dirty_folio(mapping, folio);
8fb68595
RP
586}
587
b3b94faa
DT
588/**
589 * gfs2_bmap - Block map function
590 * @mapping: Address space info
591 * @lblock: The block to map
592 *
593 * Returns: The disk address for the block or 0 on hole or error
594 */
595
596static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
597{
feaa7bba 598 struct gfs2_inode *ip = GFS2_I(mapping->host);
b3b94faa
DT
599 struct gfs2_holder i_gh;
600 sector_t dblock = 0;
601 int error;
602
b3b94faa
DT
603 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
604 if (error)
605 return 0;
606
607 if (!gfs2_is_stuffed(ip))
7770c93a 608 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
b3b94faa
DT
609
610 gfs2_glock_dq_uninit(&i_gh);
611
612 return dblock;
613}
614
d7b616e2
SW
615static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
616{
617 struct gfs2_bufdata *bd;
618
619 lock_buffer(bh);
620 gfs2_log_lock(sdp);
621 clear_buffer_dirty(bh);
622 bd = bh->b_private;
623 if (bd) {
c0752aa7
BP
624 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
625 list_del_init(&bd->bd_list);
68942870
BP
626 else {
627 spin_lock(&sdp->sd_ail_lock);
68cd4ce2 628 gfs2_remove_from_journal(bh, REMOVE_JDATA);
68942870
BP
629 spin_unlock(&sdp->sd_ail_lock);
630 }
d7b616e2
SW
631 }
632 bh->b_bdev = NULL;
633 clear_buffer_mapped(bh);
634 clear_buffer_req(bh);
635 clear_buffer_new(bh);
636 gfs2_log_unlock(sdp);
637 unlock_buffer(bh);
638}
639
5f4b2976
MWO
640static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
641 size_t length)
b3b94faa 642{
5f4b2976
MWO
643 struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
644 size_t stop = offset + length;
645 int partial_page = (offset || length < folio_size(folio));
d7b616e2
SW
646 struct buffer_head *bh, *head;
647 unsigned long pos = 0;
648
5f4b2976 649 BUG_ON(!folio_test_locked(folio));
5c0bb97c 650 if (!partial_page)
5f4b2976
MWO
651 folio_clear_checked(folio);
652 head = folio_buffers(folio);
653 if (!head)
d7b616e2 654 goto out;
b3b94faa 655
5f4b2976 656 bh = head;
d7b616e2 657 do {
5c0bb97c
LC
658 if (pos + bh->b_size > stop)
659 return;
660
d7b616e2
SW
661 if (offset <= pos)
662 gfs2_discard(sdp, bh);
663 pos += bh->b_size;
664 bh = bh->b_this_page;
665 } while (bh != head);
666out:
5c0bb97c 667 if (!partial_page)
5f4b2976 668 filemap_release_folio(folio, 0);
b3b94faa
DT
669}
670
4340fe62 671/**
e45c20d1
MWO
672 * gfs2_release_folio - free the metadata associated with a folio
673 * @folio: the folio that's being released
4340fe62
SW
674 * @gfp_mask: passed from Linux VFS, ignored by us
675 *
e45c20d1 676 * Calls try_to_free_buffers() to free the buffers and put the folio if the
0ebbe4f9 677 * buffers can be released.
4340fe62 678 *
e45c20d1 679 * Returns: true if the folio was put or else false
4340fe62
SW
680 */
681
e45c20d1 682bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
4340fe62 683{
e45c20d1 684 struct address_space *mapping = folio->mapping;
009d8518 685 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
4340fe62
SW
686 struct buffer_head *bh, *head;
687 struct gfs2_bufdata *bd;
4340fe62 688
e45c20d1
MWO
689 head = folio_buffers(folio);
690 if (!head)
691 return false;
4340fe62 692
1c185c02 693 /*
e45c20d1
MWO
694 * mm accommodates an old ext3 case where clean folios might
695 * not have had the dirty bit cleared. Thus, it can send actual
696 * dirty folios to ->release_folio() via shrink_active_list().
1c185c02 697 *
e45c20d1
MWO
698 * As a workaround, we skip folios that contain dirty buffers
699 * below. Once ->release_folio isn't called on dirty folios
700 * anymore, we can warn on dirty buffers like we used to here
701 * again.
1c185c02
AG
702 */
703
bb3b0e3d 704 gfs2_log_lock(sdp);
e45c20d1 705 bh = head;
4340fe62 706 do {
bb3b0e3d
SW
707 if (atomic_read(&bh->b_count))
708 goto cannot_release;
709 bd = bh->b_private;
16ca9412 710 if (bd && bd->bd_tr)
bb3b0e3d 711 goto cannot_release;
1c185c02
AG
712 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
713 goto cannot_release;
bb3b0e3d 714 bh = bh->b_this_page;
e45c20d1 715 } while (bh != head);
4340fe62 716
e45c20d1 717 bh = head;
bb3b0e3d 718 do {
4340fe62
SW
719 bd = bh->b_private;
720 if (bd) {
721 gfs2_assert_warn(sdp, bd->bd_bh == bh);
e4f29206 722 bd->bd_bh = NULL;
4340fe62 723 bh->b_private = NULL;
019dd669
BP
724 /*
725 * The bd may still be queued as a revoke, in which
726 * case we must not dequeue nor free it.
727 */
728 if (!bd->bd_blkno && !list_empty(&bd->bd_list))
729 list_del_init(&bd->bd_list);
730 if (list_empty(&bd->bd_list))
731 kmem_cache_free(gfs2_bufdata_cachep, bd);
e4f29206 732 }
4340fe62
SW
733
734 bh = bh->b_this_page;
166afccd 735 } while (bh != head);
e4f29206 736 gfs2_log_unlock(sdp);
4340fe62 737
68189fef 738 return try_to_free_buffers(folio);
8f065d36 739
bb3b0e3d
SW
740cannot_release:
741 gfs2_log_unlock(sdp);
e45c20d1 742 return false;
4340fe62
SW
743}
744
eadd7535 745static const struct address_space_operations gfs2_aops = {
45138990 746 .writepages = gfs2_writepages,
f132ab7d 747 .read_folio = gfs2_read_folio,
d4388340 748 .readahead = gfs2_readahead,
4ce02c67 749 .dirty_folio = iomap_dirty_folio,
8597447d 750 .release_folio = iomap_release_folio,
d82354f6 751 .invalidate_folio = iomap_invalidate_folio,
5561093e 752 .bmap = gfs2_bmap,
2ec810d5 753 .migrate_folio = filemap_migrate_folio,
2164f9b9 754 .is_partially_uptodate = iomap_is_partially_uptodate,
aa261f54 755 .error_remove_page = generic_error_remove_page,
5561093e
SW
756};
757
5561093e 758static const struct address_space_operations gfs2_jdata_aops = {
9ff8ec32 759 .writepage = gfs2_jdata_writepage,
b8e7cbb6 760 .writepages = gfs2_jdata_writepages,
f132ab7d 761 .read_folio = gfs2_read_folio,
d4388340 762 .readahead = gfs2_readahead,
e621900a 763 .dirty_folio = jdata_dirty_folio,
5561093e 764 .bmap = gfs2_bmap,
5f4b2976 765 .invalidate_folio = gfs2_invalidate_folio,
e45c20d1 766 .release_folio = gfs2_release_folio,
229615de 767 .is_partially_uptodate = block_is_partially_uptodate,
aa261f54 768 .error_remove_page = generic_error_remove_page,
5561093e
SW
769};
770
771void gfs2_set_aops(struct inode *inode)
772{
eadd7535 773 if (gfs2_is_jdata(GFS2_I(inode)))
977767a7 774 inode->i_mapping->a_ops = &gfs2_jdata_aops;
5561093e 775 else
eadd7535 776 inode->i_mapping->a_ops = &gfs2_aops;
5561093e 777}