GFS2: Fix slab memory leak in gfs2_bufdata
[linux-2.6-block.git] / fs / gfs2 / aops.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
7eabb77e 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
fd88de56 16#include <linux/pagevec.h>
9b124fbb 17#include <linux/mpage.h>
d1665e41 18#include <linux/fs.h>
a8d638e3 19#include <linux/writeback.h>
7765ec26 20#include <linux/swap.h>
5c676f6d 21#include <linux/gfs2_ondisk.h>
47e83b50 22#include <linux/backing-dev.h>
a27bb332 23#include <linux/aio.h>
b3b94faa
DT
24
25#include "gfs2.h"
5c676f6d 26#include "incore.h"
b3b94faa
DT
27#include "bmap.h"
28#include "glock.h"
29#include "inode.h"
b3b94faa
DT
30#include "log.h"
31#include "meta_io.h"
b3b94faa
DT
32#include "quota.h"
33#include "trans.h"
18ec7d5c 34#include "rgrp.h"
cd81a4ba 35#include "super.h"
5c676f6d 36#include "util.h"
4340fe62 37#include "glops.h"
b3b94faa 38
ba7f7290 39
b120193e
BP
40static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41 unsigned int from, unsigned int to)
ba7f7290
SW
42{
43 struct buffer_head *head = page_buffers(page);
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
46 unsigned int start, end;
47
48 for (bh = head, start = 0; bh != head || !start;
49 bh = bh->b_this_page, start = end) {
50 end = start + bsize;
51 if (end <= from || start >= to)
52 continue;
ddf4b426
BM
53 if (gfs2_is_jdata(ip))
54 set_buffer_uptodate(bh);
350a9b0a 55 gfs2_trans_add_data(ip->i_gl, bh);
ba7f7290
SW
56 }
57}
58
b3b94faa 59/**
7a6bbacb 60 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
b3b94faa
DT
61 * @inode: The inode
62 * @lblock: The block number to look up
63 * @bh_result: The buffer head to return the result in
64 * @create: Non-zero if we may add block to the file
65 *
66 * Returns: errno
67 */
68
7a6bbacb
SW
69static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
70 struct buffer_head *bh_result, int create)
b3b94faa 71{
b3b94faa
DT
72 int error;
73
e9e1ef2b 74 error = gfs2_block_map(inode, lblock, bh_result, 0);
b3b94faa
DT
75 if (error)
76 return error;
de986e85 77 if (!buffer_mapped(bh_result))
7a6bbacb
SW
78 return -EIO;
79 return 0;
b3b94faa
DT
80}
81
7a6bbacb
SW
82static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
83 struct buffer_head *bh_result, int create)
623d9355 84{
e9e1ef2b 85 return gfs2_block_map(inode, lblock, bh_result, 0);
623d9355 86}
7a6bbacb 87
b3b94faa 88/**
9ff8ec32
SW
89 * gfs2_writepage_common - Common bits of writepage
90 * @page: The page to be written
91 * @wbc: The writeback control
b3b94faa 92 *
9ff8ec32 93 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
b3b94faa
DT
94 */
95
9ff8ec32
SW
96static int gfs2_writepage_common(struct page *page,
97 struct writeback_control *wbc)
b3b94faa 98{
18ec7d5c 99 struct inode *inode = page->mapping->host;
f4387149
SW
100 struct gfs2_inode *ip = GFS2_I(inode);
101 struct gfs2_sbd *sdp = GFS2_SB(inode);
18ec7d5c
SW
102 loff_t i_size = i_size_read(inode);
103 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
104 unsigned offset;
b3b94faa 105
9ff8ec32
SW
106 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
107 goto out;
5c676f6d 108 if (current->journal_info)
9ff8ec32 109 goto redirty;
18ec7d5c 110 /* Is the page fully outside i_size? (truncate in progress) */
9ff8ec32 111 offset = i_size & (PAGE_CACHE_SIZE-1);
d2d7b8a2 112 if (page->index > end_index || (page->index == end_index && !offset)) {
d47992f8 113 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
9ff8ec32 114 goto out;
b3b94faa 115 }
9ff8ec32
SW
116 return 1;
117redirty:
118 redirty_page_for_writepage(wbc, page);
119out:
120 unlock_page(page);
121 return 0;
122}
123
124/**
9d358143 125 * gfs2_writepage - Write page for writeback mappings
9ff8ec32
SW
126 * @page: The page
127 * @wbc: The writeback control
128 *
129 */
130
9d358143 131static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
9ff8ec32
SW
132{
133 int ret;
134
135 ret = gfs2_writepage_common(page, wbc);
136 if (ret <= 0)
137 return ret;
138
30116ff6 139 return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
9ff8ec32
SW
140}
141
b8e7cbb6
SW
142/**
143 * __gfs2_jdata_writepage - The core of jdata writepage
144 * @page: The page to write
145 * @wbc: The writeback control
146 *
147 * This is shared between writepage and writepages and implements the
148 * core of the writepage operation. If a transaction is required then
149 * PageChecked will have been set and the transaction will have
150 * already been started before this is called.
151 */
152
153static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
154{
155 struct inode *inode = page->mapping->host;
156 struct gfs2_inode *ip = GFS2_I(inode);
157 struct gfs2_sbd *sdp = GFS2_SB(inode);
158
159 if (PageChecked(page)) {
160 ClearPageChecked(page);
161 if (!page_has_buffers(page)) {
162 create_empty_buffers(page, inode->i_sb->s_blocksize,
163 (1 << BH_Dirty)|(1 << BH_Uptodate));
164 }
165 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
166 }
167 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
168}
169
9ff8ec32
SW
170/**
171 * gfs2_jdata_writepage - Write complete page
172 * @page: Page to write
173 *
174 * Returns: errno
175 *
176 */
177
178static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
179{
180 struct inode *inode = page->mapping->host;
9ff8ec32 181 struct gfs2_sbd *sdp = GFS2_SB(inode);
1bb7322f 182 int ret;
9ff8ec32
SW
183 int done_trans = 0;
184
bf36a713 185 if (PageChecked(page)) {
b8e7cbb6
SW
186 if (wbc->sync_mode != WB_SYNC_ALL)
187 goto out_ignore;
1bb7322f
SW
188 ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
189 if (ret)
18ec7d5c 190 goto out_ignore;
18ec7d5c
SW
191 done_trans = 1;
192 }
1bb7322f
SW
193 ret = gfs2_writepage_common(page, wbc);
194 if (ret > 0)
195 ret = __gfs2_jdata_writepage(page, wbc);
18ec7d5c
SW
196 if (done_trans)
197 gfs2_trans_end(sdp);
1bb7322f 198 return ret;
18ec7d5c
SW
199
200out_ignore:
201 redirty_page_for_writepage(wbc, page);
202 unlock_page(page);
203 return 0;
b3b94faa
DT
204}
205
a8d638e3 206/**
45138990 207 * gfs2_writepages - Write a bunch of dirty pages back to disk
a8d638e3
SW
208 * @mapping: The mapping to write
209 * @wbc: Write-back control
210 *
45138990 211 * Used for both ordered and writeback modes.
a8d638e3 212 */
45138990
SW
213static int gfs2_writepages(struct address_space *mapping,
214 struct writeback_control *wbc)
a8d638e3 215{
5561093e 216 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
a8d638e3
SW
217}
218
b8e7cbb6
SW
219/**
220 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
221 * @mapping: The mapping
222 * @wbc: The writeback control
223 * @writepage: The writepage function to call for each page
224 * @pvec: The vector of pages
225 * @nr_pages: The number of pages to write
226 *
227 * Returns: non-zero if loop should terminate, zero otherwise
228 */
229
230static int gfs2_write_jdata_pagevec(struct address_space *mapping,
231 struct writeback_control *wbc,
232 struct pagevec *pvec,
233 int nr_pages, pgoff_t end)
234{
235 struct inode *inode = mapping->host;
236 struct gfs2_sbd *sdp = GFS2_SB(inode);
237 loff_t i_size = i_size_read(inode);
238 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
239 unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
240 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
b8e7cbb6
SW
241 int i;
242 int ret;
243
20b95bf2 244 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
b8e7cbb6
SW
245 if (ret < 0)
246 return ret;
247
248 for(i = 0; i < nr_pages; i++) {
249 struct page *page = pvec->pages[i];
250
251 lock_page(page);
252
253 if (unlikely(page->mapping != mapping)) {
254 unlock_page(page);
255 continue;
256 }
257
258 if (!wbc->range_cyclic && page->index > end) {
259 ret = 1;
260 unlock_page(page);
261 continue;
262 }
263
264 if (wbc->sync_mode != WB_SYNC_NONE)
265 wait_on_page_writeback(page);
266
267 if (PageWriteback(page) ||
268 !clear_page_dirty_for_io(page)) {
269 unlock_page(page);
270 continue;
271 }
272
273 /* Is the page fully outside i_size? (truncate in progress) */
274 if (page->index > end_index || (page->index == end_index && !offset)) {
d47992f8
LC
275 page->mapping->a_ops->invalidatepage(page, 0,
276 PAGE_CACHE_SIZE);
b8e7cbb6
SW
277 unlock_page(page);
278 continue;
279 }
280
281 ret = __gfs2_jdata_writepage(page, wbc);
282
283 if (ret || (--(wbc->nr_to_write) <= 0))
284 ret = 1;
b8e7cbb6
SW
285 }
286 gfs2_trans_end(sdp);
287 return ret;
288}
289
290/**
291 * gfs2_write_cache_jdata - Like write_cache_pages but different
292 * @mapping: The mapping to write
293 * @wbc: The writeback control
294 * @writepage: The writepage function to call
295 * @data: The data to pass to writepage
296 *
297 * The reason that we use our own function here is that we need to
298 * start transactions before we grab page locks. This allows us
299 * to get the ordering right.
300 */
301
302static int gfs2_write_cache_jdata(struct address_space *mapping,
303 struct writeback_control *wbc)
304{
b8e7cbb6
SW
305 int ret = 0;
306 int done = 0;
307 struct pagevec pvec;
308 int nr_pages;
309 pgoff_t index;
310 pgoff_t end;
311 int scanned = 0;
312 int range_whole = 0;
313
b8e7cbb6
SW
314 pagevec_init(&pvec, 0);
315 if (wbc->range_cyclic) {
316 index = mapping->writeback_index; /* Start from prev offset */
317 end = -1;
318 } else {
319 index = wbc->range_start >> PAGE_CACHE_SHIFT;
320 end = wbc->range_end >> PAGE_CACHE_SHIFT;
321 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
322 range_whole = 1;
323 scanned = 1;
324 }
325
326retry:
327 while (!done && (index <= end) &&
328 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
329 PAGECACHE_TAG_DIRTY,
330 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
331 scanned = 1;
332 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end);
333 if (ret)
334 done = 1;
335 if (ret > 0)
336 ret = 0;
337
338 pagevec_release(&pvec);
339 cond_resched();
340 }
341
342 if (!scanned && !done) {
343 /*
344 * We hit the last page and there is more work to be done: wrap
345 * back to the start of the file
346 */
347 scanned = 1;
348 index = 0;
349 goto retry;
350 }
351
352 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
353 mapping->writeback_index = index;
354 return ret;
355}
356
357
358/**
359 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
360 * @mapping: The mapping to write
361 * @wbc: The writeback control
362 *
363 */
364
365static int gfs2_jdata_writepages(struct address_space *mapping,
366 struct writeback_control *wbc)
367{
368 struct gfs2_inode *ip = GFS2_I(mapping->host);
369 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
370 int ret;
371
372 ret = gfs2_write_cache_jdata(mapping, wbc);
373 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
374 gfs2_log_flush(sdp, ip->i_gl);
375 ret = gfs2_write_cache_jdata(mapping, wbc);
376 }
377 return ret;
378}
379
b3b94faa
DT
380/**
381 * stuffed_readpage - Fill in a Linux page with stuffed file data
382 * @ip: the inode
383 * @page: the page
384 *
385 * Returns: errno
386 */
387
388static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
389{
390 struct buffer_head *dibh;
602c89d2 391 u64 dsize = i_size_read(&ip->i_inode);
b3b94faa
DT
392 void *kaddr;
393 int error;
394
bf126aee 395 /*
3c18ddd1 396 * Due to the order of unstuffing files and ->fault(), we can be
bf126aee
SW
397 * asked for a zero page in the case of a stuffed file being extended,
398 * so we need to supply one here. It doesn't happen often.
399 */
400 if (unlikely(page->index)) {
eebd2aa3 401 zero_user(page, 0, PAGE_CACHE_SIZE);
0a7ab79c 402 SetPageUptodate(page);
bf126aee
SW
403 return 0;
404 }
fd88de56 405
b3b94faa
DT
406 error = gfs2_meta_inode_buffer(ip, &dibh);
407 if (error)
408 return error;
409
d9349285 410 kaddr = kmap_atomic(page);
602c89d2
SW
411 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
412 dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
413 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
414 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
d9349285 415 kunmap_atomic(kaddr);
bf126aee 416 flush_dcache_page(page);
b3b94faa 417 brelse(dibh);
b3b94faa
DT
418 SetPageUptodate(page);
419
420 return 0;
421}
422
b3b94faa 423
b3b94faa 424/**
51ff87bd
SW
425 * __gfs2_readpage - readpage
426 * @file: The file to read a page for
b3b94faa
DT
427 * @page: The page to read
428 *
51ff87bd
SW
429 * This is the core of gfs2's readpage. Its used by the internal file
430 * reading code as in that case we already hold the glock. Also its
431 * called by gfs2_readpage() once the required lock has been granted.
432 *
b3b94faa
DT
433 */
434
51ff87bd 435static int __gfs2_readpage(void *file, struct page *page)
b3b94faa 436{
feaa7bba
SW
437 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
438 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
b3b94faa
DT
439 int error;
440
18ec7d5c 441 if (gfs2_is_stuffed(ip)) {
fd88de56
SW
442 error = stuffed_readpage(ip, page);
443 unlock_page(page);
51ff87bd 444 } else {
e9e1ef2b 445 error = mpage_readpage(page, gfs2_block_map);
51ff87bd 446 }
b3b94faa
DT
447
448 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
51ff87bd 449 return -EIO;
b3b94faa 450
51ff87bd
SW
451 return error;
452}
453
454/**
455 * gfs2_readpage - read a page of a file
456 * @file: The file to read
457 * @page: The page of the file
458 *
01b7c7ae
SW
459 * This deals with the locking required. We have to unlock and
460 * relock the page in order to get the locking in the right
461 * order.
51ff87bd
SW
462 */
463
464static int gfs2_readpage(struct file *file, struct page *page)
465{
01b7c7ae
SW
466 struct address_space *mapping = page->mapping;
467 struct gfs2_inode *ip = GFS2_I(mapping->host);
6802e340 468 struct gfs2_holder gh;
51ff87bd
SW
469 int error;
470
01b7c7ae 471 unlock_page(page);
719ee344
SW
472 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
473 error = gfs2_glock_nq(&gh);
01b7c7ae 474 if (unlikely(error))
6802e340 475 goto out;
01b7c7ae
SW
476 error = AOP_TRUNCATED_PAGE;
477 lock_page(page);
478 if (page->mapping == mapping && !PageUptodate(page))
479 error = __gfs2_readpage(file, page);
480 else
481 unlock_page(page);
6802e340 482 gfs2_glock_dq(&gh);
18ec7d5c 483out:
6802e340 484 gfs2_holder_uninit(&gh);
01b7c7ae
SW
485 if (error && error != AOP_TRUNCATED_PAGE)
486 lock_page(page);
51ff87bd
SW
487 return error;
488}
489
490/**
491 * gfs2_internal_read - read an internal file
492 * @ip: The gfs2 inode
51ff87bd
SW
493 * @buf: The buffer to fill
494 * @pos: The file position
495 * @size: The amount to read
496 *
497 */
498
4306629e
AP
499int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
500 unsigned size)
51ff87bd
SW
501{
502 struct address_space *mapping = ip->i_inode.i_mapping;
503 unsigned long index = *pos / PAGE_CACHE_SIZE;
504 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
505 unsigned copied = 0;
506 unsigned amt;
507 struct page *page;
508 void *p;
509
510 do {
511 amt = size - copied;
512 if (offset + size > PAGE_CACHE_SIZE)
513 amt = PAGE_CACHE_SIZE - offset;
514 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
515 if (IS_ERR(page))
516 return PTR_ERR(page);
d9349285 517 p = kmap_atomic(page);
51ff87bd 518 memcpy(buf + copied, p + offset, amt);
d9349285 519 kunmap_atomic(p);
51ff87bd
SW
520 mark_page_accessed(page);
521 page_cache_release(page);
522 copied += amt;
523 index++;
524 offset = 0;
525 } while(copied < size);
526 (*pos) += size;
527 return size;
fd88de56
SW
528}
529
fd88de56
SW
530/**
531 * gfs2_readpages - Read a bunch of pages at once
532 *
533 * Some notes:
534 * 1. This is only for readahead, so we can simply ignore any things
535 * which are slightly inconvenient (such as locking conflicts between
536 * the page lock and the glock) and return having done no I/O. Its
537 * obviously not something we'd want to do on too regular a basis.
538 * Any I/O we ignore at this time will be done via readpage later.
e1d5b18a 539 * 2. We don't handle stuffed files here we let readpage do the honours.
fd88de56 540 * 3. mpage_readpages() does most of the heavy lifting in the common case.
e9e1ef2b 541 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
fd88de56 542 */
3cc3f710 543
fd88de56
SW
544static int gfs2_readpages(struct file *file, struct address_space *mapping,
545 struct list_head *pages, unsigned nr_pages)
546{
547 struct inode *inode = mapping->host;
feaa7bba
SW
548 struct gfs2_inode *ip = GFS2_I(inode);
549 struct gfs2_sbd *sdp = GFS2_SB(inode);
fd88de56 550 struct gfs2_holder gh;
3cc3f710 551 int ret;
fd88de56 552
719ee344
SW
553 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
554 ret = gfs2_glock_nq(&gh);
51ff87bd 555 if (unlikely(ret))
3cc3f710 556 goto out_uninit;
e1d5b18a 557 if (!gfs2_is_stuffed(ip))
e9e1ef2b 558 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
3cc3f710
SW
559 gfs2_glock_dq(&gh);
560out_uninit:
561 gfs2_holder_uninit(&gh);
fd88de56
SW
562 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
563 ret = -EIO;
564 return ret;
b3b94faa
DT
565}
566
567/**
7765ec26 568 * gfs2_write_begin - Begin to write to a file
b3b94faa 569 * @file: The file to write to
7765ec26
SW
570 * @mapping: The mapping in which to write
571 * @pos: The file offset at which to start writing
572 * @len: Length of the write
573 * @flags: Various flags
574 * @pagep: Pointer to return the page
575 * @fsdata: Pointer to return fs data (unused by GFS2)
b3b94faa
DT
576 *
577 * Returns: errno
578 */
579
7765ec26
SW
580static int gfs2_write_begin(struct file *file, struct address_space *mapping,
581 loff_t pos, unsigned len, unsigned flags,
582 struct page **pagep, void **fsdata)
b3b94faa 583{
7765ec26
SW
584 struct gfs2_inode *ip = GFS2_I(mapping->host);
585 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
1946f70a 586 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
7ed122e4 587 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
71f890f7 588 unsigned requested = 0;
18ec7d5c 589 int alloc_required;
b3b94faa 590 int error = 0;
7765ec26
SW
591 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
592 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
7765ec26 593 struct page *page;
52ae7b79 594
719ee344
SW
595 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
596 error = gfs2_glock_nq(&ip->i_gh);
7765ec26 597 if (unlikely(error))
18ec7d5c 598 goto out_uninit;
1946f70a
BM
599 if (&ip->i_inode == sdp->sd_rindex) {
600 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
601 GL_NOCACHE, &m_ip->i_gh);
602 if (unlikely(error)) {
603 gfs2_glock_dq(&ip->i_gh);
604 goto out_uninit;
605 }
606 }
b3b94faa 607
461cb419 608 alloc_required = gfs2_write_alloc_required(ip, pos, len);
18ec7d5c 609
7ed122e4
SW
610 if (alloc_required || gfs2_is_jdata(ip))
611 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
612
18ec7d5c 613 if (alloc_required) {
7b9cff46 614 struct gfs2_alloc_parms ap = { .aflags = 0, };
d82661d9 615 error = gfs2_quota_lock_check(ip);
18ec7d5c 616 if (error)
5407e242 617 goto out_unlock;
18ec7d5c 618
71f890f7 619 requested = data_blocks + ind_blocks;
7b9cff46
SW
620 ap.target = requested;
621 error = gfs2_inplace_reserve(ip, &ap);
18ec7d5c
SW
622 if (error)
623 goto out_qunlock;
624 }
625
626 rblocks = RES_DINODE + ind_blocks;
627 if (gfs2_is_jdata(ip))
628 rblocks += data_blocks ? data_blocks : 1;
629 if (ind_blocks || data_blocks)
630 rblocks += RES_STATFS + RES_QUOTA;
1946f70a
BM
631 if (&ip->i_inode == sdp->sd_rindex)
632 rblocks += 2 * RES_STATFS;
bf97b673 633 if (alloc_required)
71f890f7 634 rblocks += gfs2_rg_blocks(ip, requested);
18ec7d5c 635
16615be1
SW
636 error = gfs2_trans_begin(sdp, rblocks,
637 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
18ec7d5c 638 if (error)
a867bb28 639 goto out_trans_fail;
18ec7d5c 640
c41d4f09 641 error = -ENOMEM;
e4fefbac 642 flags |= AOP_FLAG_NOFS;
54566b2c 643 page = grab_cache_page_write_begin(mapping, index, flags);
c41d4f09
SW
644 *pagep = page;
645 if (unlikely(!page))
646 goto out_endtrans;
647
18ec7d5c 648 if (gfs2_is_stuffed(ip)) {
c41d4f09 649 error = 0;
7765ec26 650 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
f25ef0c1 651 error = gfs2_unstuff_dinode(ip, page);
5c4e9e03
SW
652 if (error == 0)
653 goto prepare_write;
c41d4f09 654 } else if (!PageUptodate(page)) {
b3b94faa 655 error = stuffed_readpage(ip, page);
c41d4f09 656 }
5c4e9e03 657 goto out;
18ec7d5c
SW
658 }
659
5c4e9e03 660prepare_write:
ebdec241 661 error = __block_write_begin(page, from, len, gfs2_block_map);
18ec7d5c 662out:
c41d4f09
SW
663 if (error == 0)
664 return 0;
665
6c474f7b 666 unlock_page(page);
c41d4f09 667 page_cache_release(page);
15c6fd97 668
ff8f33c8 669 gfs2_trans_end(sdp);
c41d4f09 670 if (pos + len > ip->i_inode.i_size)
ff8f33c8
SW
671 gfs2_trim_blocks(&ip->i_inode);
672 goto out_trans_fail;
673
c41d4f09
SW
674out_endtrans:
675 gfs2_trans_end(sdp);
a867bb28 676out_trans_fail:
c41d4f09
SW
677 if (alloc_required) {
678 gfs2_inplace_release(ip);
18ec7d5c 679out_qunlock:
c41d4f09 680 gfs2_quota_unlock(ip);
c41d4f09 681 }
18ec7d5c 682out_unlock:
1946f70a
BM
683 if (&ip->i_inode == sdp->sd_rindex) {
684 gfs2_glock_dq(&m_ip->i_gh);
685 gfs2_holder_uninit(&m_ip->i_gh);
686 }
c41d4f09 687 gfs2_glock_dq(&ip->i_gh);
18ec7d5c 688out_uninit:
c41d4f09 689 gfs2_holder_uninit(&ip->i_gh);
b3b94faa
DT
690 return error;
691}
692
7ae8fa84
RP
693/**
694 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
695 * @inode: the rindex inode
696 */
697static void adjust_fs_space(struct inode *inode)
698{
699 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
1946f70a
BM
700 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
701 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
7ae8fa84
RP
702 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
703 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
1946f70a 704 struct buffer_head *m_bh, *l_bh;
7ae8fa84
RP
705 u64 fs_total, new_free;
706
707 /* Total up the file system space, according to the latest rindex. */
708 fs_total = gfs2_ri_total(sdp);
1946f70a
BM
709 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
710 return;
7ae8fa84
RP
711
712 spin_lock(&sdp->sd_statfs_spin);
1946f70a
BM
713 gfs2_statfs_change_in(m_sc, m_bh->b_data +
714 sizeof(struct gfs2_dinode));
7ae8fa84
RP
715 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
716 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
717 else
718 new_free = 0;
719 spin_unlock(&sdp->sd_statfs_spin);
6c53267f
RP
720 fs_warn(sdp, "File system extended by %llu blocks.\n",
721 (unsigned long long)new_free);
7ae8fa84 722 gfs2_statfs_change(sdp, new_free, new_free, 0);
1946f70a
BM
723
724 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
725 goto out;
726 update_statfs(sdp, m_bh, l_bh);
727 brelse(l_bh);
728out:
729 brelse(m_bh);
7ae8fa84
RP
730}
731
b3b94faa 732/**
7765ec26
SW
733 * gfs2_stuffed_write_end - Write end for stuffed files
734 * @inode: The inode
735 * @dibh: The buffer_head containing the on-disk inode
736 * @pos: The file position
737 * @len: The length of the write
738 * @copied: How much was actually copied by the VFS
739 * @page: The page
740 *
741 * This copies the data from the page into the inode block after
742 * the inode data structure itself.
743 *
744 * Returns: errno
745 */
746static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
747 loff_t pos, unsigned len, unsigned copied,
748 struct page *page)
749{
750 struct gfs2_inode *ip = GFS2_I(inode);
751 struct gfs2_sbd *sdp = GFS2_SB(inode);
1946f70a 752 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
7765ec26
SW
753 u64 to = pos + copied;
754 void *kaddr;
755 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
7765ec26
SW
756
757 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
d9349285 758 kaddr = kmap_atomic(page);
7765ec26
SW
759 memcpy(buf + pos, kaddr + pos, copied);
760 memset(kaddr + pos + copied, 0, len - copied);
761 flush_dcache_page(page);
d9349285 762 kunmap_atomic(kaddr);
7765ec26
SW
763
764 if (!PageUptodate(page))
765 SetPageUptodate(page);
766 unlock_page(page);
767 page_cache_release(page);
768
7537d81a 769 if (copied) {
a2e0f799 770 if (inode->i_size < to)
7537d81a 771 i_size_write(inode, to);
7765ec26
SW
772 mark_inode_dirty(inode);
773 }
774
9ae3c6de 775 if (inode == sdp->sd_rindex) {
7765ec26 776 adjust_fs_space(inode);
ca9248d8 777 sdp->sd_rindex_uptodate = 0;
9ae3c6de 778 }
7765ec26
SW
779
780 brelse(dibh);
781 gfs2_trans_end(sdp);
1946f70a
BM
782 if (inode == sdp->sd_rindex) {
783 gfs2_glock_dq(&m_ip->i_gh);
784 gfs2_holder_uninit(&m_ip->i_gh);
785 }
7765ec26
SW
786 gfs2_glock_dq(&ip->i_gh);
787 gfs2_holder_uninit(&ip->i_gh);
788 return copied;
789}
790
791/**
792 * gfs2_write_end
b3b94faa 793 * @file: The file to write to
7765ec26
SW
794 * @mapping: The address space to write to
795 * @pos: The file position
796 * @len: The length of the data
797 * @copied:
798 * @page: The page that has been written
799 * @fsdata: The fsdata (unused in GFS2)
800 *
801 * The main write_end function for GFS2. We have a separate one for
802 * stuffed files as they are slightly different, otherwise we just
803 * put our locking around the VFS provided functions.
b3b94faa
DT
804 *
805 * Returns: errno
806 */
807
7765ec26
SW
808static int gfs2_write_end(struct file *file, struct address_space *mapping,
809 loff_t pos, unsigned len, unsigned copied,
810 struct page *page, void *fsdata)
b3b94faa
DT
811{
812 struct inode *inode = page->mapping->host;
feaa7bba
SW
813 struct gfs2_inode *ip = GFS2_I(inode);
814 struct gfs2_sbd *sdp = GFS2_SB(inode);
1946f70a 815 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
18ec7d5c 816 struct buffer_head *dibh;
7765ec26
SW
817 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
818 unsigned int to = from + len;
819 int ret;
0c901809
BM
820 struct gfs2_trans *tr = current->journal_info;
821 BUG_ON(!tr);
b3b94faa 822
7afd88d9 823 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
18ec7d5c 824
7765ec26
SW
825 ret = gfs2_meta_inode_buffer(ip, &dibh);
826 if (unlikely(ret)) {
827 unlock_page(page);
828 page_cache_release(page);
829 goto failed;
830 }
18ec7d5c 831
7765ec26
SW
832 if (gfs2_is_stuffed(ip))
833 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
b3b94faa 834
bf36a713 835 if (!gfs2_is_writeback(ip))
7765ec26 836 gfs2_page_add_databufs(ip, page, from, to);
b3b94faa 837
7765ec26 838 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
0c901809
BM
839 if (tr->tr_num_buf_new)
840 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
841 else
842 gfs2_trans_add_meta(ip->i_gl, dibh);
843
48516ced 844
9ae3c6de 845 if (inode == sdp->sd_rindex) {
7ae8fa84 846 adjust_fs_space(inode);
ca9248d8 847 sdp->sd_rindex_uptodate = 0;
9ae3c6de 848 }
7ae8fa84 849
18ec7d5c 850 brelse(dibh);
7765ec26 851failed:
deab72d3 852 gfs2_trans_end(sdp);
71f890f7 853 gfs2_inplace_release(ip);
5407e242 854 if (ip->i_res->rs_qa_qd_num)
18ec7d5c 855 gfs2_quota_unlock(ip);
1946f70a
BM
856 if (inode == sdp->sd_rindex) {
857 gfs2_glock_dq(&m_ip->i_gh);
858 gfs2_holder_uninit(&m_ip->i_gh);
859 }
7765ec26 860 gfs2_glock_dq(&ip->i_gh);
18ec7d5c 861 gfs2_holder_uninit(&ip->i_gh);
7765ec26 862 return ret;
b3b94faa
DT
863}
864
8fb68595
RP
865/**
866 * gfs2_set_page_dirty - Page dirtying function
867 * @page: The page to dirty
868 *
869 * Returns: 1 if it dirtyed the page, or 0 otherwise
870 */
871
872static int gfs2_set_page_dirty(struct page *page)
873{
5561093e 874 SetPageChecked(page);
8fb68595
RP
875 return __set_page_dirty_buffers(page);
876}
877
b3b94faa
DT
878/**
879 * gfs2_bmap - Block map function
880 * @mapping: Address space info
881 * @lblock: The block to map
882 *
883 * Returns: The disk address for the block or 0 on hole or error
884 */
885
886static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
887{
feaa7bba 888 struct gfs2_inode *ip = GFS2_I(mapping->host);
b3b94faa
DT
889 struct gfs2_holder i_gh;
890 sector_t dblock = 0;
891 int error;
892
b3b94faa
DT
893 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
894 if (error)
895 return 0;
896
897 if (!gfs2_is_stuffed(ip))
e9e1ef2b 898 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
b3b94faa
DT
899
900 gfs2_glock_dq_uninit(&i_gh);
901
902 return dblock;
903}
904
d7b616e2
SW
905static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
906{
907 struct gfs2_bufdata *bd;
908
909 lock_buffer(bh);
910 gfs2_log_lock(sdp);
911 clear_buffer_dirty(bh);
912 bd = bh->b_private;
913 if (bd) {
c0752aa7
BP
914 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
915 list_del_init(&bd->bd_list);
16615be1
SW
916 else
917 gfs2_remove_from_journal(bh, current->journal_info, 0);
d7b616e2
SW
918 }
919 bh->b_bdev = NULL;
920 clear_buffer_mapped(bh);
921 clear_buffer_req(bh);
922 clear_buffer_new(bh);
923 gfs2_log_unlock(sdp);
924 unlock_buffer(bh);
925}
926
d47992f8
LC
927static void gfs2_invalidatepage(struct page *page, unsigned int offset,
928 unsigned int length)
b3b94faa 929{
d7b616e2 930 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
5c0bb97c
LC
931 unsigned int stop = offset + length;
932 int partial_page = (offset || length < PAGE_CACHE_SIZE);
d7b616e2
SW
933 struct buffer_head *bh, *head;
934 unsigned long pos = 0;
935
b3b94faa 936 BUG_ON(!PageLocked(page));
5c0bb97c 937 if (!partial_page)
8fb68595 938 ClearPageChecked(page);
d7b616e2
SW
939 if (!page_has_buffers(page))
940 goto out;
b3b94faa 941
d7b616e2
SW
942 bh = head = page_buffers(page);
943 do {
5c0bb97c
LC
944 if (pos + bh->b_size > stop)
945 return;
946
d7b616e2
SW
947 if (offset <= pos)
948 gfs2_discard(sdp, bh);
949 pos += bh->b_size;
950 bh = bh->b_this_page;
951 } while (bh != head);
952out:
5c0bb97c 953 if (!partial_page)
d7b616e2 954 try_to_release_page(page, 0);
b3b94faa
DT
955}
956
c7b33834
SW
957/**
958 * gfs2_ok_for_dio - check that dio is valid on this file
959 * @ip: The inode
960 * @rw: READ or WRITE
961 * @offset: The offset at which we are reading or writing
962 *
963 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
964 * 1 (to accept the i/o request)
965 */
966static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
967{
968 /*
969 * Should we return an error here? I can't see that O_DIRECT for
5561093e
SW
970 * a stuffed file makes any sense. For now we'll silently fall
971 * back to buffered I/O
c7b33834 972 */
c7b33834
SW
973 if (gfs2_is_stuffed(ip))
974 return 0;
975
acb57a36 976 if (offset >= i_size_read(&ip->i_inode))
c7b33834
SW
977 return 0;
978 return 1;
979}
980
981
982
a9e5f4d0
SW
983static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
984 const struct iovec *iov, loff_t offset,
985 unsigned long nr_segs)
d1665e41
SW
986{
987 struct file *file = iocb->ki_filp;
988 struct inode *inode = file->f_mapping->host;
feaa7bba 989 struct gfs2_inode *ip = GFS2_I(inode);
d1665e41
SW
990 struct gfs2_holder gh;
991 int rv;
992
993 /*
c7b33834
SW
994 * Deferred lock, even if its a write, since we do no allocation
995 * on this path. All we need change is atime, and this lock mode
996 * ensures that other nodes have flushed their buffered read caches
997 * (i.e. their page cache entries for this inode). We do not,
998 * unfortunately have the option of only flushing a range like
999 * the VFS does.
d1665e41 1000 */
719ee344
SW
1001 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
1002 rv = gfs2_glock_nq(&gh);
d1665e41 1003 if (rv)
c7b33834
SW
1004 return rv;
1005 rv = gfs2_ok_for_dio(ip, rw, offset);
1006 if (rv != 1)
1007 goto out; /* dio not valid, fall back to buffered i/o */
1008
eafdc7d1
CH
1009 rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1010 offset, nr_segs, gfs2_get_block_direct,
1011 NULL, NULL, 0);
d1665e41 1012out:
8e711e10 1013 gfs2_glock_dq(&gh);
d1665e41 1014 gfs2_holder_uninit(&gh);
d1665e41
SW
1015 return rv;
1016}
1017
4340fe62 1018/**
623d9355 1019 * gfs2_releasepage - free the metadata associated with a page
4340fe62
SW
1020 * @page: the page that's being released
1021 * @gfp_mask: passed from Linux VFS, ignored by us
1022 *
1023 * Call try_to_free_buffers() if the buffers in this page can be
1024 * released.
1025 *
1026 * Returns: 0
1027 */
1028
1029int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1030{
009d8518
SW
1031 struct address_space *mapping = page->mapping;
1032 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
4340fe62
SW
1033 struct buffer_head *bh, *head;
1034 struct gfs2_bufdata *bd;
4340fe62
SW
1035
1036 if (!page_has_buffers(page))
891ba6d4 1037 return 0;
4340fe62 1038
bb3b0e3d 1039 gfs2_log_lock(sdp);
380f7c65 1040 spin_lock(&sdp->sd_ail_lock);
4340fe62
SW
1041 head = bh = page_buffers(page);
1042 do {
bb3b0e3d
SW
1043 if (atomic_read(&bh->b_count))
1044 goto cannot_release;
1045 bd = bh->b_private;
16ca9412 1046 if (bd && bd->bd_tr)
bb3b0e3d 1047 goto cannot_release;
8f065d36
SW
1048 if (buffer_pinned(bh) || buffer_dirty(bh))
1049 goto not_possible;
bb3b0e3d
SW
1050 bh = bh->b_this_page;
1051 } while(bh != head);
380f7c65 1052 spin_unlock(&sdp->sd_ail_lock);
bb3b0e3d 1053 gfs2_log_unlock(sdp);
4340fe62 1054
bb3b0e3d
SW
1055 head = bh = page_buffers(page);
1056 do {
623d9355 1057 gfs2_log_lock(sdp);
4340fe62
SW
1058 bd = bh->b_private;
1059 if (bd) {
1060 gfs2_assert_warn(sdp, bd->bd_bh == bh);
c0752aa7 1061 if (!list_empty(&bd->bd_list)) {
d7b616e2 1062 if (!buffer_pinned(bh))
c0752aa7 1063 list_del_init(&bd->bd_list);
d7b616e2
SW
1064 else
1065 bd = NULL;
1066 }
1067 if (bd)
1068 bd->bd_bh = NULL;
4340fe62
SW
1069 bh->b_private = NULL;
1070 }
623d9355
SW
1071 gfs2_log_unlock(sdp);
1072 if (bd)
1073 kmem_cache_free(gfs2_bufdata_cachep, bd);
4340fe62
SW
1074
1075 bh = bh->b_this_page;
166afccd 1076 } while (bh != head);
4340fe62 1077
4340fe62 1078 return try_to_free_buffers(page);
8f065d36
SW
1079
1080not_possible: /* Should never happen */
1081 WARN_ON(buffer_dirty(bh));
1082 WARN_ON(buffer_pinned(bh));
bb3b0e3d 1083cannot_release:
380f7c65 1084 spin_unlock(&sdp->sd_ail_lock);
bb3b0e3d
SW
1085 gfs2_log_unlock(sdp);
1086 return 0;
4340fe62
SW
1087}
1088
5561093e 1089static const struct address_space_operations gfs2_writeback_aops = {
9d358143 1090 .writepage = gfs2_writepage,
45138990 1091 .writepages = gfs2_writepages,
5561093e
SW
1092 .readpage = gfs2_readpage,
1093 .readpages = gfs2_readpages,
5561093e
SW
1094 .write_begin = gfs2_write_begin,
1095 .write_end = gfs2_write_end,
1096 .bmap = gfs2_bmap,
1097 .invalidatepage = gfs2_invalidatepage,
1098 .releasepage = gfs2_releasepage,
1099 .direct_IO = gfs2_direct_IO,
e5d9dc27 1100 .migratepage = buffer_migrate_page,
229615de 1101 .is_partially_uptodate = block_is_partially_uptodate,
aa261f54 1102 .error_remove_page = generic_error_remove_page,
5561093e
SW
1103};
1104
1105static const struct address_space_operations gfs2_ordered_aops = {
9d358143 1106 .writepage = gfs2_writepage,
45138990 1107 .writepages = gfs2_writepages,
b3b94faa 1108 .readpage = gfs2_readpage,
fd88de56 1109 .readpages = gfs2_readpages,
7765ec26
SW
1110 .write_begin = gfs2_write_begin,
1111 .write_end = gfs2_write_end,
8fb68595 1112 .set_page_dirty = gfs2_set_page_dirty,
b3b94faa
DT
1113 .bmap = gfs2_bmap,
1114 .invalidatepage = gfs2_invalidatepage,
4340fe62 1115 .releasepage = gfs2_releasepage,
b3b94faa 1116 .direct_IO = gfs2_direct_IO,
e5d9dc27 1117 .migratepage = buffer_migrate_page,
229615de 1118 .is_partially_uptodate = block_is_partially_uptodate,
aa261f54 1119 .error_remove_page = generic_error_remove_page,
b3b94faa
DT
1120};
1121
5561093e 1122static const struct address_space_operations gfs2_jdata_aops = {
9ff8ec32 1123 .writepage = gfs2_jdata_writepage,
b8e7cbb6 1124 .writepages = gfs2_jdata_writepages,
5561093e
SW
1125 .readpage = gfs2_readpage,
1126 .readpages = gfs2_readpages,
5561093e
SW
1127 .write_begin = gfs2_write_begin,
1128 .write_end = gfs2_write_end,
1129 .set_page_dirty = gfs2_set_page_dirty,
1130 .bmap = gfs2_bmap,
1131 .invalidatepage = gfs2_invalidatepage,
1132 .releasepage = gfs2_releasepage,
229615de 1133 .is_partially_uptodate = block_is_partially_uptodate,
aa261f54 1134 .error_remove_page = generic_error_remove_page,
5561093e
SW
1135};
1136
1137void gfs2_set_aops(struct inode *inode)
1138{
1139 struct gfs2_inode *ip = GFS2_I(inode);
1140
1141 if (gfs2_is_writeback(ip))
1142 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1143 else if (gfs2_is_ordered(ip))
1144 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1145 else if (gfs2_is_jdata(ip))
1146 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1147 else
1148 BUG();
1149}
1150