[GFS2] Clean up the glock core
[linux-2.6-block.git] / fs / gfs2 / ops_address.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
7eabb77e 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
fd88de56 16#include <linux/pagevec.h>
9b124fbb 17#include <linux/mpage.h>
d1665e41 18#include <linux/fs.h>
a8d638e3 19#include <linux/writeback.h>
7765ec26 20#include <linux/swap.h>
5c676f6d 21#include <linux/gfs2_ondisk.h>
7d308590 22#include <linux/lm_interface.h>
47e83b50 23#include <linux/backing-dev.h>
b3b94faa
DT
24
25#include "gfs2.h"
5c676f6d 26#include "incore.h"
b3b94faa
DT
27#include "bmap.h"
28#include "glock.h"
29#include "inode.h"
b3b94faa
DT
30#include "log.h"
31#include "meta_io.h"
32#include "ops_address.h"
b3b94faa
DT
33#include "quota.h"
34#include "trans.h"
18ec7d5c 35#include "rgrp.h"
cd81a4ba 36#include "super.h"
5c676f6d 37#include "util.h"
4340fe62 38#include "glops.h"
b3b94faa 39
ba7f7290
SW
40
41static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
42 unsigned int from, unsigned int to)
43{
44 struct buffer_head *head = page_buffers(page);
45 unsigned int bsize = head->b_size;
46 struct buffer_head *bh;
47 unsigned int start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
52 if (end <= from || start >= to)
53 continue;
ddf4b426
BM
54 if (gfs2_is_jdata(ip))
55 set_buffer_uptodate(bh);
ba7f7290
SW
56 gfs2_trans_add_bh(ip->i_gl, bh, 0);
57 }
58}
59
b3b94faa 60/**
7a6bbacb 61 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
b3b94faa
DT
62 * @inode: The inode
63 * @lblock: The block number to look up
64 * @bh_result: The buffer head to return the result in
65 * @create: Non-zero if we may add block to the file
66 *
67 * Returns: errno
68 */
69
7a6bbacb
SW
70static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
71 struct buffer_head *bh_result, int create)
b3b94faa 72{
b3b94faa
DT
73 int error;
74
e9e1ef2b 75 error = gfs2_block_map(inode, lblock, bh_result, 0);
b3b94faa
DT
76 if (error)
77 return error;
de986e85 78 if (!buffer_mapped(bh_result))
7a6bbacb
SW
79 return -EIO;
80 return 0;
b3b94faa
DT
81}
82
7a6bbacb
SW
83static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
84 struct buffer_head *bh_result, int create)
623d9355 85{
e9e1ef2b 86 return gfs2_block_map(inode, lblock, bh_result, 0);
623d9355 87}
7a6bbacb 88
b3b94faa 89/**
9ff8ec32
SW
90 * gfs2_writepage_common - Common bits of writepage
91 * @page: The page to be written
92 * @wbc: The writeback control
b3b94faa 93 *
9ff8ec32 94 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
b3b94faa
DT
95 */
96
9ff8ec32
SW
97static int gfs2_writepage_common(struct page *page,
98 struct writeback_control *wbc)
b3b94faa 99{
18ec7d5c 100 struct inode *inode = page->mapping->host;
f4387149
SW
101 struct gfs2_inode *ip = GFS2_I(inode);
102 struct gfs2_sbd *sdp = GFS2_SB(inode);
18ec7d5c
SW
103 loff_t i_size = i_size_read(inode);
104 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
105 unsigned offset;
b3b94faa 106
9ff8ec32
SW
107 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
108 goto out;
5c676f6d 109 if (current->journal_info)
9ff8ec32 110 goto redirty;
18ec7d5c 111 /* Is the page fully outside i_size? (truncate in progress) */
9ff8ec32 112 offset = i_size & (PAGE_CACHE_SIZE-1);
d2d7b8a2 113 if (page->index > end_index || (page->index == end_index && !offset)) {
18ec7d5c 114 page->mapping->a_ops->invalidatepage(page, 0);
9ff8ec32 115 goto out;
b3b94faa 116 }
9ff8ec32
SW
117 return 1;
118redirty:
119 redirty_page_for_writepage(wbc, page);
120out:
121 unlock_page(page);
122 return 0;
123}
124
125/**
126 * gfs2_writeback_writepage - Write page for writeback mappings
127 * @page: The page
128 * @wbc: The writeback control
129 *
130 */
131
132static int gfs2_writeback_writepage(struct page *page,
133 struct writeback_control *wbc)
134{
135 int ret;
136
137 ret = gfs2_writepage_common(page, wbc);
138 if (ret <= 0)
139 return ret;
140
141 ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc);
142 if (ret == -EAGAIN)
143 ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
144 return ret;
145}
146
147/**
148 * gfs2_ordered_writepage - Write page for ordered data files
149 * @page: The page to write
150 * @wbc: The writeback control
151 *
152 */
153
154static int gfs2_ordered_writepage(struct page *page,
155 struct writeback_control *wbc)
156{
157 struct inode *inode = page->mapping->host;
158 struct gfs2_inode *ip = GFS2_I(inode);
159 int ret;
160
161 ret = gfs2_writepage_common(page, wbc);
162 if (ret <= 0)
163 return ret;
164
165 if (!page_has_buffers(page)) {
166 create_empty_buffers(page, inode->i_sb->s_blocksize,
167 (1 << BH_Dirty)|(1 << BH_Uptodate));
168 }
169 gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1);
170 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
171}
172
b8e7cbb6
SW
173/**
174 * __gfs2_jdata_writepage - The core of jdata writepage
175 * @page: The page to write
176 * @wbc: The writeback control
177 *
178 * This is shared between writepage and writepages and implements the
179 * core of the writepage operation. If a transaction is required then
180 * PageChecked will have been set and the transaction will have
181 * already been started before this is called.
182 */
183
184static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
185{
186 struct inode *inode = page->mapping->host;
187 struct gfs2_inode *ip = GFS2_I(inode);
188 struct gfs2_sbd *sdp = GFS2_SB(inode);
189
190 if (PageChecked(page)) {
191 ClearPageChecked(page);
192 if (!page_has_buffers(page)) {
193 create_empty_buffers(page, inode->i_sb->s_blocksize,
194 (1 << BH_Dirty)|(1 << BH_Uptodate));
195 }
196 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
197 }
198 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
199}
200
9ff8ec32
SW
201/**
202 * gfs2_jdata_writepage - Write complete page
203 * @page: Page to write
204 *
205 * Returns: errno
206 *
207 */
208
209static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
210{
211 struct inode *inode = page->mapping->host;
9ff8ec32
SW
212 struct gfs2_sbd *sdp = GFS2_SB(inode);
213 int error;
214 int done_trans = 0;
215
216 error = gfs2_writepage_common(page, wbc);
217 if (error <= 0)
218 return error;
b3b94faa 219
bf36a713 220 if (PageChecked(page)) {
b8e7cbb6
SW
221 if (wbc->sync_mode != WB_SYNC_ALL)
222 goto out_ignore;
18ec7d5c
SW
223 error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
224 if (error)
225 goto out_ignore;
18ec7d5c
SW
226 done_trans = 1;
227 }
b8e7cbb6 228 error = __gfs2_jdata_writepage(page, wbc);
18ec7d5c
SW
229 if (done_trans)
230 gfs2_trans_end(sdp);
b3b94faa 231 return error;
18ec7d5c
SW
232
233out_ignore:
234 redirty_page_for_writepage(wbc, page);
235 unlock_page(page);
236 return 0;
b3b94faa
DT
237}
238
a8d638e3 239/**
5561093e 240 * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk
a8d638e3
SW
241 * @mapping: The mapping to write
242 * @wbc: Write-back control
243 *
5561093e 244 * For the data=writeback case we can already ignore buffer heads
a8d638e3
SW
245 * and write whole extents at once. This is a big reduction in the
246 * number of I/O requests we send and the bmap calls we make in this case.
247 */
5561093e
SW
248static int gfs2_writeback_writepages(struct address_space *mapping,
249 struct writeback_control *wbc)
a8d638e3 250{
5561093e 251 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
a8d638e3
SW
252}
253
b8e7cbb6
SW
254/**
255 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
256 * @mapping: The mapping
257 * @wbc: The writeback control
258 * @writepage: The writepage function to call for each page
259 * @pvec: The vector of pages
260 * @nr_pages: The number of pages to write
261 *
262 * Returns: non-zero if loop should terminate, zero otherwise
263 */
264
265static int gfs2_write_jdata_pagevec(struct address_space *mapping,
266 struct writeback_control *wbc,
267 struct pagevec *pvec,
268 int nr_pages, pgoff_t end)
269{
270 struct inode *inode = mapping->host;
271 struct gfs2_sbd *sdp = GFS2_SB(inode);
272 loff_t i_size = i_size_read(inode);
273 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
274 unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
275 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
276 struct backing_dev_info *bdi = mapping->backing_dev_info;
277 int i;
278 int ret;
279
20b95bf2 280 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
b8e7cbb6
SW
281 if (ret < 0)
282 return ret;
283
284 for(i = 0; i < nr_pages; i++) {
285 struct page *page = pvec->pages[i];
286
287 lock_page(page);
288
289 if (unlikely(page->mapping != mapping)) {
290 unlock_page(page);
291 continue;
292 }
293
294 if (!wbc->range_cyclic && page->index > end) {
295 ret = 1;
296 unlock_page(page);
297 continue;
298 }
299
300 if (wbc->sync_mode != WB_SYNC_NONE)
301 wait_on_page_writeback(page);
302
303 if (PageWriteback(page) ||
304 !clear_page_dirty_for_io(page)) {
305 unlock_page(page);
306 continue;
307 }
308
309 /* Is the page fully outside i_size? (truncate in progress) */
310 if (page->index > end_index || (page->index == end_index && !offset)) {
311 page->mapping->a_ops->invalidatepage(page, 0);
312 unlock_page(page);
313 continue;
314 }
315
316 ret = __gfs2_jdata_writepage(page, wbc);
317
318 if (ret || (--(wbc->nr_to_write) <= 0))
319 ret = 1;
320 if (wbc->nonblocking && bdi_write_congested(bdi)) {
321 wbc->encountered_congestion = 1;
322 ret = 1;
323 }
324
325 }
326 gfs2_trans_end(sdp);
327 return ret;
328}
329
330/**
331 * gfs2_write_cache_jdata - Like write_cache_pages but different
332 * @mapping: The mapping to write
333 * @wbc: The writeback control
334 * @writepage: The writepage function to call
335 * @data: The data to pass to writepage
336 *
337 * The reason that we use our own function here is that we need to
338 * start transactions before we grab page locks. This allows us
339 * to get the ordering right.
340 */
341
342static int gfs2_write_cache_jdata(struct address_space *mapping,
343 struct writeback_control *wbc)
344{
345 struct backing_dev_info *bdi = mapping->backing_dev_info;
346 int ret = 0;
347 int done = 0;
348 struct pagevec pvec;
349 int nr_pages;
350 pgoff_t index;
351 pgoff_t end;
352 int scanned = 0;
353 int range_whole = 0;
354
355 if (wbc->nonblocking && bdi_write_congested(bdi)) {
356 wbc->encountered_congestion = 1;
357 return 0;
358 }
359
360 pagevec_init(&pvec, 0);
361 if (wbc->range_cyclic) {
362 index = mapping->writeback_index; /* Start from prev offset */
363 end = -1;
364 } else {
365 index = wbc->range_start >> PAGE_CACHE_SHIFT;
366 end = wbc->range_end >> PAGE_CACHE_SHIFT;
367 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
368 range_whole = 1;
369 scanned = 1;
370 }
371
372retry:
373 while (!done && (index <= end) &&
374 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
375 PAGECACHE_TAG_DIRTY,
376 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
377 scanned = 1;
378 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end);
379 if (ret)
380 done = 1;
381 if (ret > 0)
382 ret = 0;
383
384 pagevec_release(&pvec);
385 cond_resched();
386 }
387
388 if (!scanned && !done) {
389 /*
390 * We hit the last page and there is more work to be done: wrap
391 * back to the start of the file
392 */
393 scanned = 1;
394 index = 0;
395 goto retry;
396 }
397
398 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
399 mapping->writeback_index = index;
400 return ret;
401}
402
403
404/**
405 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
406 * @mapping: The mapping to write
407 * @wbc: The writeback control
408 *
409 */
410
411static int gfs2_jdata_writepages(struct address_space *mapping,
412 struct writeback_control *wbc)
413{
414 struct gfs2_inode *ip = GFS2_I(mapping->host);
415 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
416 int ret;
417
418 ret = gfs2_write_cache_jdata(mapping, wbc);
419 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
420 gfs2_log_flush(sdp, ip->i_gl);
421 ret = gfs2_write_cache_jdata(mapping, wbc);
422 }
423 return ret;
424}
425
b3b94faa
DT
426/**
427 * stuffed_readpage - Fill in a Linux page with stuffed file data
428 * @ip: the inode
429 * @page: the page
430 *
431 * Returns: errno
432 */
433
434static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
435{
436 struct buffer_head *dibh;
437 void *kaddr;
438 int error;
439
bf126aee 440 /*
3c18ddd1 441 * Due to the order of unstuffing files and ->fault(), we can be
bf126aee
SW
442 * asked for a zero page in the case of a stuffed file being extended,
443 * so we need to supply one here. It doesn't happen often.
444 */
445 if (unlikely(page->index)) {
eebd2aa3 446 zero_user(page, 0, PAGE_CACHE_SIZE);
bf126aee
SW
447 return 0;
448 }
fd88de56 449
b3b94faa
DT
450 error = gfs2_meta_inode_buffer(ip, &dibh);
451 if (error)
452 return error;
453
5c4e9e03 454 kaddr = kmap_atomic(page, KM_USER0);
fd88de56 455 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
b3b94faa 456 ip->i_di.di_size);
fd88de56 457 memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
c312c4fd 458 kunmap_atomic(kaddr, KM_USER0);
bf126aee 459 flush_dcache_page(page);
b3b94faa 460 brelse(dibh);
b3b94faa
DT
461 SetPageUptodate(page);
462
463 return 0;
464}
465
b3b94faa 466
b3b94faa 467/**
51ff87bd
SW
468 * __gfs2_readpage - readpage
469 * @file: The file to read a page for
b3b94faa
DT
470 * @page: The page to read
471 *
51ff87bd
SW
472 * This is the core of gfs2's readpage. Its used by the internal file
473 * reading code as in that case we already hold the glock. Also its
474 * called by gfs2_readpage() once the required lock has been granted.
475 *
b3b94faa
DT
476 */
477
51ff87bd 478static int __gfs2_readpage(void *file, struct page *page)
b3b94faa 479{
feaa7bba
SW
480 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
481 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
b3b94faa
DT
482 int error;
483
18ec7d5c 484 if (gfs2_is_stuffed(ip)) {
fd88de56
SW
485 error = stuffed_readpage(ip, page);
486 unlock_page(page);
51ff87bd 487 } else {
e9e1ef2b 488 error = mpage_readpage(page, gfs2_block_map);
51ff87bd 489 }
b3b94faa
DT
490
491 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
51ff87bd 492 return -EIO;
b3b94faa 493
51ff87bd
SW
494 return error;
495}
496
497/**
498 * gfs2_readpage - read a page of a file
499 * @file: The file to read
500 * @page: The page of the file
501 *
3cc3f710 502 * This deals with the locking required. We use a trylock in order to
51ff87bd
SW
503 * avoid the page lock / glock ordering problems returning AOP_TRUNCATED_PAGE
504 * in the event that we are unable to get the lock.
505 */
506
507static int gfs2_readpage(struct file *file, struct page *page)
508{
509 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
6802e340 510 struct gfs2_holder gh;
51ff87bd
SW
511 int error;
512
6802e340
SW
513 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
514 error = gfs2_glock_nq_atime(&gh);
515 if (unlikely(error)) {
51ff87bd 516 unlock_page(page);
6802e340 517 goto out;
61a30dcb 518 }
51ff87bd 519 error = __gfs2_readpage(file, page);
6802e340 520 gfs2_glock_dq(&gh);
18ec7d5c 521out:
6802e340
SW
522 gfs2_holder_uninit(&gh);
523 if (error == GLR_TRYFAILED) {
524 yield();
525 return AOP_TRUNCATED_PAGE;
526 }
51ff87bd
SW
527 return error;
528}
529
530/**
531 * gfs2_internal_read - read an internal file
532 * @ip: The gfs2 inode
533 * @ra_state: The readahead state (or NULL for no readahead)
534 * @buf: The buffer to fill
535 * @pos: The file position
536 * @size: The amount to read
537 *
538 */
539
540int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
541 char *buf, loff_t *pos, unsigned size)
542{
543 struct address_space *mapping = ip->i_inode.i_mapping;
544 unsigned long index = *pos / PAGE_CACHE_SIZE;
545 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
546 unsigned copied = 0;
547 unsigned amt;
548 struct page *page;
549 void *p;
550
551 do {
552 amt = size - copied;
553 if (offset + size > PAGE_CACHE_SIZE)
554 amt = PAGE_CACHE_SIZE - offset;
555 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
556 if (IS_ERR(page))
557 return PTR_ERR(page);
558 p = kmap_atomic(page, KM_USER0);
559 memcpy(buf + copied, p + offset, amt);
560 kunmap_atomic(p, KM_USER0);
561 mark_page_accessed(page);
562 page_cache_release(page);
563 copied += amt;
564 index++;
565 offset = 0;
566 } while(copied < size);
567 (*pos) += size;
568 return size;
fd88de56
SW
569}
570
fd88de56
SW
571/**
572 * gfs2_readpages - Read a bunch of pages at once
573 *
574 * Some notes:
575 * 1. This is only for readahead, so we can simply ignore any things
576 * which are slightly inconvenient (such as locking conflicts between
577 * the page lock and the glock) and return having done no I/O. Its
578 * obviously not something we'd want to do on too regular a basis.
579 * Any I/O we ignore at this time will be done via readpage later.
e1d5b18a 580 * 2. We don't handle stuffed files here we let readpage do the honours.
fd88de56 581 * 3. mpage_readpages() does most of the heavy lifting in the common case.
e9e1ef2b 582 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
fd88de56 583 */
3cc3f710 584
fd88de56
SW
585static int gfs2_readpages(struct file *file, struct address_space *mapping,
586 struct list_head *pages, unsigned nr_pages)
587{
588 struct inode *inode = mapping->host;
feaa7bba
SW
589 struct gfs2_inode *ip = GFS2_I(inode);
590 struct gfs2_sbd *sdp = GFS2_SB(inode);
fd88de56 591 struct gfs2_holder gh;
3cc3f710 592 int ret;
fd88de56 593
3cc3f710 594 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
51ff87bd 595 ret = gfs2_glock_nq_atime(&gh);
51ff87bd 596 if (unlikely(ret))
3cc3f710 597 goto out_uninit;
e1d5b18a 598 if (!gfs2_is_stuffed(ip))
e9e1ef2b 599 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
3cc3f710
SW
600 gfs2_glock_dq(&gh);
601out_uninit:
602 gfs2_holder_uninit(&gh);
fd88de56
SW
603 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
604 ret = -EIO;
605 return ret;
b3b94faa
DT
606}
607
608/**
7765ec26 609 * gfs2_write_begin - Begin to write to a file
b3b94faa 610 * @file: The file to write to
7765ec26
SW
611 * @mapping: The mapping in which to write
612 * @pos: The file offset at which to start writing
613 * @len: Length of the write
614 * @flags: Various flags
615 * @pagep: Pointer to return the page
616 * @fsdata: Pointer to return fs data (unused by GFS2)
b3b94faa
DT
617 *
618 * Returns: errno
619 */
620
7765ec26
SW
621static int gfs2_write_begin(struct file *file, struct address_space *mapping,
622 loff_t pos, unsigned len, unsigned flags,
623 struct page **pagep, void **fsdata)
b3b94faa 624{
7765ec26
SW
625 struct gfs2_inode *ip = GFS2_I(mapping->host);
626 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
18ec7d5c
SW
627 unsigned int data_blocks, ind_blocks, rblocks;
628 int alloc_required;
b3b94faa 629 int error = 0;
18ec7d5c 630 struct gfs2_alloc *al;
7765ec26
SW
631 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
632 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
633 unsigned to = from + len;
634 struct page *page;
52ae7b79 635
7765ec26 636 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &ip->i_gh);
dcd24799 637 error = gfs2_glock_nq_atime(&ip->i_gh);
7765ec26 638 if (unlikely(error))
18ec7d5c 639 goto out_uninit;
b3b94faa 640
7765ec26 641 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
7765ec26 642 error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
18ec7d5c 643 if (error)
c41d4f09 644 goto out_unlock;
18ec7d5c
SW
645
646 if (alloc_required) {
647 al = gfs2_alloc_get(ip);
182fe5ab
CG
648 if (!al) {
649 error = -ENOMEM;
650 goto out_unlock;
651 }
18ec7d5c 652
d82661d9 653 error = gfs2_quota_lock_check(ip);
18ec7d5c
SW
654 if (error)
655 goto out_alloc_put;
656
18ec7d5c
SW
657 al->al_requested = data_blocks + ind_blocks;
658 error = gfs2_inplace_reserve(ip);
659 if (error)
660 goto out_qunlock;
661 }
662
663 rblocks = RES_DINODE + ind_blocks;
664 if (gfs2_is_jdata(ip))
665 rblocks += data_blocks ? data_blocks : 1;
666 if (ind_blocks || data_blocks)
667 rblocks += RES_STATFS + RES_QUOTA;
668
16615be1
SW
669 error = gfs2_trans_begin(sdp, rblocks,
670 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
18ec7d5c 671 if (error)
a867bb28 672 goto out_trans_fail;
18ec7d5c 673
c41d4f09
SW
674 error = -ENOMEM;
675 page = __grab_cache_page(mapping, index);
676 *pagep = page;
677 if (unlikely(!page))
678 goto out_endtrans;
679
18ec7d5c 680 if (gfs2_is_stuffed(ip)) {
c41d4f09 681 error = 0;
7765ec26 682 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
f25ef0c1 683 error = gfs2_unstuff_dinode(ip, page);
5c4e9e03
SW
684 if (error == 0)
685 goto prepare_write;
c41d4f09 686 } else if (!PageUptodate(page)) {
b3b94faa 687 error = stuffed_readpage(ip, page);
c41d4f09 688 }
5c4e9e03 689 goto out;
18ec7d5c
SW
690 }
691
5c4e9e03 692prepare_write:
e9e1ef2b 693 error = block_prepare_write(page, from, to, gfs2_block_map);
18ec7d5c 694out:
c41d4f09
SW
695 if (error == 0)
696 return 0;
697
698 page_cache_release(page);
699 if (pos + len > ip->i_inode.i_size)
700 vmtruncate(&ip->i_inode, ip->i_inode.i_size);
701out_endtrans:
702 gfs2_trans_end(sdp);
a867bb28 703out_trans_fail:
c41d4f09
SW
704 if (alloc_required) {
705 gfs2_inplace_release(ip);
18ec7d5c 706out_qunlock:
c41d4f09 707 gfs2_quota_unlock(ip);
18ec7d5c 708out_alloc_put:
c41d4f09
SW
709 gfs2_alloc_put(ip);
710 }
18ec7d5c 711out_unlock:
c41d4f09 712 gfs2_glock_dq(&ip->i_gh);
18ec7d5c 713out_uninit:
c41d4f09 714 gfs2_holder_uninit(&ip->i_gh);
b3b94faa
DT
715 return error;
716}
717
7ae8fa84
RP
718/**
719 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
720 * @inode: the rindex inode
721 */
722static void adjust_fs_space(struct inode *inode)
723{
724 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
725 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
726 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
727 u64 fs_total, new_free;
728
729 /* Total up the file system space, according to the latest rindex. */
730 fs_total = gfs2_ri_total(sdp);
731
732 spin_lock(&sdp->sd_statfs_spin);
733 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
734 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
735 else
736 new_free = 0;
737 spin_unlock(&sdp->sd_statfs_spin);
6c53267f
RP
738 fs_warn(sdp, "File system extended by %llu blocks.\n",
739 (unsigned long long)new_free);
7ae8fa84
RP
740 gfs2_statfs_change(sdp, new_free, new_free, 0);
741}
742
b3b94faa 743/**
7765ec26
SW
744 * gfs2_stuffed_write_end - Write end for stuffed files
745 * @inode: The inode
746 * @dibh: The buffer_head containing the on-disk inode
747 * @pos: The file position
748 * @len: The length of the write
749 * @copied: How much was actually copied by the VFS
750 * @page: The page
751 *
752 * This copies the data from the page into the inode block after
753 * the inode data structure itself.
754 *
755 * Returns: errno
756 */
757static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
758 loff_t pos, unsigned len, unsigned copied,
759 struct page *page)
760{
761 struct gfs2_inode *ip = GFS2_I(inode);
762 struct gfs2_sbd *sdp = GFS2_SB(inode);
763 u64 to = pos + copied;
764 void *kaddr;
765 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
766 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
767
768 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
769 kaddr = kmap_atomic(page, KM_USER0);
770 memcpy(buf + pos, kaddr + pos, copied);
771 memset(kaddr + pos + copied, 0, len - copied);
772 flush_dcache_page(page);
773 kunmap_atomic(kaddr, KM_USER0);
774
775 if (!PageUptodate(page))
776 SetPageUptodate(page);
777 unlock_page(page);
778 page_cache_release(page);
779
780 if (inode->i_size < to) {
781 i_size_write(inode, to);
782 ip->i_di.di_size = inode->i_size;
783 di->di_size = cpu_to_be64(inode->i_size);
784 mark_inode_dirty(inode);
785 }
786
787 if (inode == sdp->sd_rindex)
788 adjust_fs_space(inode);
789
790 brelse(dibh);
791 gfs2_trans_end(sdp);
792 gfs2_glock_dq(&ip->i_gh);
793 gfs2_holder_uninit(&ip->i_gh);
794 return copied;
795}
796
797/**
798 * gfs2_write_end
b3b94faa 799 * @file: The file to write to
7765ec26
SW
800 * @mapping: The address space to write to
801 * @pos: The file position
802 * @len: The length of the data
803 * @copied:
804 * @page: The page that has been written
805 * @fsdata: The fsdata (unused in GFS2)
806 *
807 * The main write_end function for GFS2. We have a separate one for
808 * stuffed files as they are slightly different, otherwise we just
809 * put our locking around the VFS provided functions.
b3b94faa
DT
810 *
811 * Returns: errno
812 */
813
7765ec26
SW
814static int gfs2_write_end(struct file *file, struct address_space *mapping,
815 loff_t pos, unsigned len, unsigned copied,
816 struct page *page, void *fsdata)
b3b94faa
DT
817{
818 struct inode *inode = page->mapping->host;
feaa7bba
SW
819 struct gfs2_inode *ip = GFS2_I(inode);
820 struct gfs2_sbd *sdp = GFS2_SB(inode);
18ec7d5c 821 struct buffer_head *dibh;
6dbd8224 822 struct gfs2_alloc *al = ip->i_alloc;
48516ced 823 struct gfs2_dinode *di;
7765ec26
SW
824 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
825 unsigned int to = from + len;
826 int ret;
b3b94faa 827
7afd88d9 828 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
18ec7d5c 829
7765ec26
SW
830 ret = gfs2_meta_inode_buffer(ip, &dibh);
831 if (unlikely(ret)) {
832 unlock_page(page);
833 page_cache_release(page);
834 goto failed;
835 }
18ec7d5c
SW
836
837 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
b3b94faa 838
7765ec26
SW
839 if (gfs2_is_stuffed(ip))
840 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
b3b94faa 841
bf36a713 842 if (!gfs2_is_writeback(ip))
7765ec26 843 gfs2_page_add_databufs(ip, page, from, to);
b3b94faa 844
7765ec26 845 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
b3b94faa 846
9656b2c1
SW
847 if (likely(ret >= 0) && (inode->i_size > ip->i_di.di_size)) {
848 di = (struct gfs2_dinode *)dibh->b_data;
849 ip->i_di.di_size = inode->i_size;
850 di->di_size = cpu_to_be64(inode->i_size);
851 mark_inode_dirty(inode);
48516ced
SW
852 }
853
7ae8fa84
RP
854 if (inode == sdp->sd_rindex)
855 adjust_fs_space(inode);
856
18ec7d5c
SW
857 brelse(dibh);
858 gfs2_trans_end(sdp);
7765ec26 859failed:
6dbd8224 860 if (al) {
18ec7d5c
SW
861 gfs2_inplace_release(ip);
862 gfs2_quota_unlock(ip);
863 gfs2_alloc_put(ip);
864 }
7765ec26 865 gfs2_glock_dq(&ip->i_gh);
18ec7d5c 866 gfs2_holder_uninit(&ip->i_gh);
7765ec26 867 return ret;
b3b94faa
DT
868}
869
8fb68595
RP
870/**
871 * gfs2_set_page_dirty - Page dirtying function
872 * @page: The page to dirty
873 *
874 * Returns: 1 if it dirtyed the page, or 0 otherwise
875 */
876
877static int gfs2_set_page_dirty(struct page *page)
878{
5561093e 879 SetPageChecked(page);
8fb68595
RP
880 return __set_page_dirty_buffers(page);
881}
882
b3b94faa
DT
883/**
884 * gfs2_bmap - Block map function
885 * @mapping: Address space info
886 * @lblock: The block to map
887 *
888 * Returns: The disk address for the block or 0 on hole or error
889 */
890
891static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
892{
feaa7bba 893 struct gfs2_inode *ip = GFS2_I(mapping->host);
b3b94faa
DT
894 struct gfs2_holder i_gh;
895 sector_t dblock = 0;
896 int error;
897
b3b94faa
DT
898 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
899 if (error)
900 return 0;
901
902 if (!gfs2_is_stuffed(ip))
e9e1ef2b 903 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
b3b94faa
DT
904
905 gfs2_glock_dq_uninit(&i_gh);
906
907 return dblock;
908}
909
d7b616e2
SW
910static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
911{
912 struct gfs2_bufdata *bd;
913
914 lock_buffer(bh);
915 gfs2_log_lock(sdp);
916 clear_buffer_dirty(bh);
917 bd = bh->b_private;
918 if (bd) {
16615be1
SW
919 if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
920 list_del_init(&bd->bd_le.le_list);
921 else
922 gfs2_remove_from_journal(bh, current->journal_info, 0);
d7b616e2
SW
923 }
924 bh->b_bdev = NULL;
925 clear_buffer_mapped(bh);
926 clear_buffer_req(bh);
927 clear_buffer_new(bh);
928 gfs2_log_unlock(sdp);
929 unlock_buffer(bh);
930}
931
8628de05 932static void gfs2_invalidatepage(struct page *page, unsigned long offset)
b3b94faa 933{
d7b616e2
SW
934 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
935 struct buffer_head *bh, *head;
936 unsigned long pos = 0;
937
b3b94faa 938 BUG_ON(!PageLocked(page));
8fb68595
RP
939 if (offset == 0)
940 ClearPageChecked(page);
d7b616e2
SW
941 if (!page_has_buffers(page))
942 goto out;
b3b94faa 943
d7b616e2
SW
944 bh = head = page_buffers(page);
945 do {
946 if (offset <= pos)
947 gfs2_discard(sdp, bh);
948 pos += bh->b_size;
949 bh = bh->b_this_page;
950 } while (bh != head);
951out:
952 if (offset == 0)
953 try_to_release_page(page, 0);
b3b94faa
DT
954}
955
c7b33834
SW
956/**
957 * gfs2_ok_for_dio - check that dio is valid on this file
958 * @ip: The inode
959 * @rw: READ or WRITE
960 * @offset: The offset at which we are reading or writing
961 *
962 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
963 * 1 (to accept the i/o request)
964 */
965static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
966{
967 /*
968 * Should we return an error here? I can't see that O_DIRECT for
5561093e
SW
969 * a stuffed file makes any sense. For now we'll silently fall
970 * back to buffered I/O
c7b33834 971 */
c7b33834
SW
972 if (gfs2_is_stuffed(ip))
973 return 0;
974
975 if (offset > i_size_read(&ip->i_inode))
976 return 0;
977 return 1;
978}
979
980
981
a9e5f4d0
SW
982static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
983 const struct iovec *iov, loff_t offset,
984 unsigned long nr_segs)
d1665e41
SW
985{
986 struct file *file = iocb->ki_filp;
987 struct inode *inode = file->f_mapping->host;
feaa7bba 988 struct gfs2_inode *ip = GFS2_I(inode);
d1665e41
SW
989 struct gfs2_holder gh;
990 int rv;
991
992 /*
c7b33834
SW
993 * Deferred lock, even if its a write, since we do no allocation
994 * on this path. All we need change is atime, and this lock mode
995 * ensures that other nodes have flushed their buffered read caches
996 * (i.e. their page cache entries for this inode). We do not,
997 * unfortunately have the option of only flushing a range like
998 * the VFS does.
d1665e41 999 */
c7b33834 1000 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
dcd24799 1001 rv = gfs2_glock_nq_atime(&gh);
d1665e41 1002 if (rv)
c7b33834
SW
1003 return rv;
1004 rv = gfs2_ok_for_dio(ip, rw, offset);
1005 if (rv != 1)
1006 goto out; /* dio not valid, fall back to buffered i/o */
1007
1008 rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
1009 iov, offset, nr_segs,
1010 gfs2_get_block_direct, NULL);
d1665e41
SW
1011out:
1012 gfs2_glock_dq_m(1, &gh);
1013 gfs2_holder_uninit(&gh);
d1665e41
SW
1014 return rv;
1015}
1016
4340fe62 1017/**
623d9355 1018 * gfs2_releasepage - free the metadata associated with a page
4340fe62
SW
1019 * @page: the page that's being released
1020 * @gfp_mask: passed from Linux VFS, ignored by us
1021 *
1022 * Call try_to_free_buffers() if the buffers in this page can be
1023 * released.
1024 *
1025 * Returns: 0
1026 */
1027
1028int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1029{
1030 struct inode *aspace = page->mapping->host;
1031 struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
1032 struct buffer_head *bh, *head;
1033 struct gfs2_bufdata *bd;
4340fe62
SW
1034
1035 if (!page_has_buffers(page))
891ba6d4 1036 return 0;
4340fe62 1037
bb3b0e3d 1038 gfs2_log_lock(sdp);
4340fe62
SW
1039 head = bh = page_buffers(page);
1040 do {
bb3b0e3d
SW
1041 if (atomic_read(&bh->b_count))
1042 goto cannot_release;
1043 bd = bh->b_private;
1044 if (bd && bd->bd_ail)
1045 goto cannot_release;
4340fe62 1046 gfs2_assert_warn(sdp, !buffer_pinned(bh));
623d9355 1047 gfs2_assert_warn(sdp, !buffer_dirty(bh));
bb3b0e3d
SW
1048 bh = bh->b_this_page;
1049 } while(bh != head);
1050 gfs2_log_unlock(sdp);
4340fe62 1051
bb3b0e3d
SW
1052 head = bh = page_buffers(page);
1053 do {
623d9355 1054 gfs2_log_lock(sdp);
4340fe62
SW
1055 bd = bh->b_private;
1056 if (bd) {
1057 gfs2_assert_warn(sdp, bd->bd_bh == bh);
1058 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
d7b616e2
SW
1059 if (!list_empty(&bd->bd_le.le_list)) {
1060 if (!buffer_pinned(bh))
1061 list_del_init(&bd->bd_le.le_list);
1062 else
1063 bd = NULL;
1064 }
1065 if (bd)
1066 bd->bd_bh = NULL;
4340fe62
SW
1067 bh->b_private = NULL;
1068 }
623d9355
SW
1069 gfs2_log_unlock(sdp);
1070 if (bd)
1071 kmem_cache_free(gfs2_bufdata_cachep, bd);
4340fe62
SW
1072
1073 bh = bh->b_this_page;
166afccd 1074 } while (bh != head);
4340fe62 1075
4340fe62 1076 return try_to_free_buffers(page);
bb3b0e3d
SW
1077cannot_release:
1078 gfs2_log_unlock(sdp);
1079 return 0;
4340fe62
SW
1080}
1081
5561093e 1082static const struct address_space_operations gfs2_writeback_aops = {
9ff8ec32 1083 .writepage = gfs2_writeback_writepage,
5561093e
SW
1084 .writepages = gfs2_writeback_writepages,
1085 .readpage = gfs2_readpage,
1086 .readpages = gfs2_readpages,
1087 .sync_page = block_sync_page,
1088 .write_begin = gfs2_write_begin,
1089 .write_end = gfs2_write_end,
1090 .bmap = gfs2_bmap,
1091 .invalidatepage = gfs2_invalidatepage,
1092 .releasepage = gfs2_releasepage,
1093 .direct_IO = gfs2_direct_IO,
e5d9dc27 1094 .migratepage = buffer_migrate_page,
5561093e
SW
1095};
1096
1097static const struct address_space_operations gfs2_ordered_aops = {
9ff8ec32 1098 .writepage = gfs2_ordered_writepage,
b3b94faa 1099 .readpage = gfs2_readpage,
fd88de56 1100 .readpages = gfs2_readpages,
b3b94faa 1101 .sync_page = block_sync_page,
7765ec26
SW
1102 .write_begin = gfs2_write_begin,
1103 .write_end = gfs2_write_end,
8fb68595 1104 .set_page_dirty = gfs2_set_page_dirty,
b3b94faa
DT
1105 .bmap = gfs2_bmap,
1106 .invalidatepage = gfs2_invalidatepage,
4340fe62 1107 .releasepage = gfs2_releasepage,
b3b94faa 1108 .direct_IO = gfs2_direct_IO,
e5d9dc27 1109 .migratepage = buffer_migrate_page,
b3b94faa
DT
1110};
1111
5561093e 1112static const struct address_space_operations gfs2_jdata_aops = {
9ff8ec32 1113 .writepage = gfs2_jdata_writepage,
b8e7cbb6 1114 .writepages = gfs2_jdata_writepages,
5561093e
SW
1115 .readpage = gfs2_readpage,
1116 .readpages = gfs2_readpages,
1117 .sync_page = block_sync_page,
1118 .write_begin = gfs2_write_begin,
1119 .write_end = gfs2_write_end,
1120 .set_page_dirty = gfs2_set_page_dirty,
1121 .bmap = gfs2_bmap,
1122 .invalidatepage = gfs2_invalidatepage,
1123 .releasepage = gfs2_releasepage,
1124};
1125
1126void gfs2_set_aops(struct inode *inode)
1127{
1128 struct gfs2_inode *ip = GFS2_I(inode);
1129
1130 if (gfs2_is_writeback(ip))
1131 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1132 else if (gfs2_is_ordered(ip))
1133 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1134 else if (gfs2_is_jdata(ip))
1135 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1136 else
1137 BUG();
1138}
1139