[GFS2] proper extern for gfs2/locking/dlm/mount.c:gdlm_ops
[linux-block.git] / fs / gfs2 / ops_address.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
7eabb77e 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
fd88de56 16#include <linux/pagevec.h>
9b124fbb 17#include <linux/mpage.h>
d1665e41 18#include <linux/fs.h>
a8d638e3 19#include <linux/writeback.h>
7765ec26 20#include <linux/swap.h>
5c676f6d 21#include <linux/gfs2_ondisk.h>
7d308590 22#include <linux/lm_interface.h>
47e83b50 23#include <linux/backing-dev.h>
b8e7cbb6 24#include <linux/pagevec.h>
b3b94faa
DT
25
26#include "gfs2.h"
5c676f6d 27#include "incore.h"
b3b94faa
DT
28#include "bmap.h"
29#include "glock.h"
30#include "inode.h"
b3b94faa
DT
31#include "log.h"
32#include "meta_io.h"
33#include "ops_address.h"
b3b94faa
DT
34#include "quota.h"
35#include "trans.h"
18ec7d5c 36#include "rgrp.h"
cd81a4ba 37#include "super.h"
5c676f6d 38#include "util.h"
4340fe62 39#include "glops.h"
b3b94faa 40
ba7f7290
SW
41
42static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
43 unsigned int from, unsigned int to)
44{
45 struct buffer_head *head = page_buffers(page);
46 unsigned int bsize = head->b_size;
47 struct buffer_head *bh;
48 unsigned int start, end;
49
50 for (bh = head, start = 0; bh != head || !start;
51 bh = bh->b_this_page, start = end) {
52 end = start + bsize;
53 if (end <= from || start >= to)
54 continue;
ddf4b426
BM
55 if (gfs2_is_jdata(ip))
56 set_buffer_uptodate(bh);
ba7f7290
SW
57 gfs2_trans_add_bh(ip->i_gl, bh, 0);
58 }
59}
60
b3b94faa 61/**
7a6bbacb 62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
b3b94faa
DT
63 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
7a6bbacb
SW
71static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
b3b94faa 73{
b3b94faa
DT
74 int error;
75
e9e1ef2b 76 error = gfs2_block_map(inode, lblock, bh_result, 0);
b3b94faa
DT
77 if (error)
78 return error;
de986e85 79 if (!buffer_mapped(bh_result))
7a6bbacb
SW
80 return -EIO;
81 return 0;
b3b94faa
DT
82}
83
7a6bbacb
SW
84static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
85 struct buffer_head *bh_result, int create)
623d9355 86{
e9e1ef2b 87 return gfs2_block_map(inode, lblock, bh_result, 0);
623d9355 88}
7a6bbacb 89
b3b94faa 90/**
9ff8ec32
SW
91 * gfs2_writepage_common - Common bits of writepage
92 * @page: The page to be written
93 * @wbc: The writeback control
b3b94faa 94 *
9ff8ec32 95 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
b3b94faa
DT
96 */
97
9ff8ec32
SW
98static int gfs2_writepage_common(struct page *page,
99 struct writeback_control *wbc)
b3b94faa 100{
18ec7d5c 101 struct inode *inode = page->mapping->host;
f4387149
SW
102 struct gfs2_inode *ip = GFS2_I(inode);
103 struct gfs2_sbd *sdp = GFS2_SB(inode);
18ec7d5c
SW
104 loff_t i_size = i_size_read(inode);
105 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
106 unsigned offset;
b3b94faa 107
9ff8ec32
SW
108 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
109 goto out;
5c676f6d 110 if (current->journal_info)
9ff8ec32 111 goto redirty;
18ec7d5c 112 /* Is the page fully outside i_size? (truncate in progress) */
9ff8ec32 113 offset = i_size & (PAGE_CACHE_SIZE-1);
d2d7b8a2 114 if (page->index > end_index || (page->index == end_index && !offset)) {
18ec7d5c 115 page->mapping->a_ops->invalidatepage(page, 0);
9ff8ec32 116 goto out;
b3b94faa 117 }
9ff8ec32
SW
118 return 1;
119redirty:
120 redirty_page_for_writepage(wbc, page);
121out:
122 unlock_page(page);
123 return 0;
124}
125
126/**
127 * gfs2_writeback_writepage - Write page for writeback mappings
128 * @page: The page
129 * @wbc: The writeback control
130 *
131 */
132
133static int gfs2_writeback_writepage(struct page *page,
134 struct writeback_control *wbc)
135{
136 int ret;
137
138 ret = gfs2_writepage_common(page, wbc);
139 if (ret <= 0)
140 return ret;
141
142 ret = mpage_writepage(page, gfs2_get_block_noalloc, wbc);
143 if (ret == -EAGAIN)
144 ret = block_write_full_page(page, gfs2_get_block_noalloc, wbc);
145 return ret;
146}
147
148/**
149 * gfs2_ordered_writepage - Write page for ordered data files
150 * @page: The page to write
151 * @wbc: The writeback control
152 *
153 */
154
155static int gfs2_ordered_writepage(struct page *page,
156 struct writeback_control *wbc)
157{
158 struct inode *inode = page->mapping->host;
159 struct gfs2_inode *ip = GFS2_I(inode);
160 int ret;
161
162 ret = gfs2_writepage_common(page, wbc);
163 if (ret <= 0)
164 return ret;
165
166 if (!page_has_buffers(page)) {
167 create_empty_buffers(page, inode->i_sb->s_blocksize,
168 (1 << BH_Dirty)|(1 << BH_Uptodate));
169 }
170 gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1);
171 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
172}
173
b8e7cbb6
SW
174/**
175 * __gfs2_jdata_writepage - The core of jdata writepage
176 * @page: The page to write
177 * @wbc: The writeback control
178 *
179 * This is shared between writepage and writepages and implements the
180 * core of the writepage operation. If a transaction is required then
181 * PageChecked will have been set and the transaction will have
182 * already been started before this is called.
183 */
184
185static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
186{
187 struct inode *inode = page->mapping->host;
188 struct gfs2_inode *ip = GFS2_I(inode);
189 struct gfs2_sbd *sdp = GFS2_SB(inode);
190
191 if (PageChecked(page)) {
192 ClearPageChecked(page);
193 if (!page_has_buffers(page)) {
194 create_empty_buffers(page, inode->i_sb->s_blocksize,
195 (1 << BH_Dirty)|(1 << BH_Uptodate));
196 }
197 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
198 }
199 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
200}
201
9ff8ec32
SW
202/**
203 * gfs2_jdata_writepage - Write complete page
204 * @page: Page to write
205 *
206 * Returns: errno
207 *
208 */
209
210static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
211{
212 struct inode *inode = page->mapping->host;
9ff8ec32
SW
213 struct gfs2_sbd *sdp = GFS2_SB(inode);
214 int error;
215 int done_trans = 0;
216
217 error = gfs2_writepage_common(page, wbc);
218 if (error <= 0)
219 return error;
b3b94faa 220
bf36a713 221 if (PageChecked(page)) {
b8e7cbb6
SW
222 if (wbc->sync_mode != WB_SYNC_ALL)
223 goto out_ignore;
18ec7d5c
SW
224 error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
225 if (error)
226 goto out_ignore;
18ec7d5c
SW
227 done_trans = 1;
228 }
b8e7cbb6 229 error = __gfs2_jdata_writepage(page, wbc);
18ec7d5c
SW
230 if (done_trans)
231 gfs2_trans_end(sdp);
b3b94faa 232 return error;
18ec7d5c
SW
233
234out_ignore:
235 redirty_page_for_writepage(wbc, page);
236 unlock_page(page);
237 return 0;
b3b94faa
DT
238}
239
a8d638e3 240/**
5561093e 241 * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk
a8d638e3
SW
242 * @mapping: The mapping to write
243 * @wbc: Write-back control
244 *
5561093e 245 * For the data=writeback case we can already ignore buffer heads
a8d638e3
SW
246 * and write whole extents at once. This is a big reduction in the
247 * number of I/O requests we send and the bmap calls we make in this case.
248 */
5561093e
SW
249static int gfs2_writeback_writepages(struct address_space *mapping,
250 struct writeback_control *wbc)
a8d638e3 251{
5561093e 252 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
a8d638e3
SW
253}
254
b8e7cbb6
SW
255/**
256 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
257 * @mapping: The mapping
258 * @wbc: The writeback control
259 * @writepage: The writepage function to call for each page
260 * @pvec: The vector of pages
261 * @nr_pages: The number of pages to write
262 *
263 * Returns: non-zero if loop should terminate, zero otherwise
264 */
265
266static int gfs2_write_jdata_pagevec(struct address_space *mapping,
267 struct writeback_control *wbc,
268 struct pagevec *pvec,
269 int nr_pages, pgoff_t end)
270{
271 struct inode *inode = mapping->host;
272 struct gfs2_sbd *sdp = GFS2_SB(inode);
273 loff_t i_size = i_size_read(inode);
274 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
275 unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
276 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
277 struct backing_dev_info *bdi = mapping->backing_dev_info;
278 int i;
279 int ret;
280
281 ret = gfs2_trans_begin(sdp, nrblocks, 0);
282 if (ret < 0)
283 return ret;
284
285 for(i = 0; i < nr_pages; i++) {
286 struct page *page = pvec->pages[i];
287
288 lock_page(page);
289
290 if (unlikely(page->mapping != mapping)) {
291 unlock_page(page);
292 continue;
293 }
294
295 if (!wbc->range_cyclic && page->index > end) {
296 ret = 1;
297 unlock_page(page);
298 continue;
299 }
300
301 if (wbc->sync_mode != WB_SYNC_NONE)
302 wait_on_page_writeback(page);
303
304 if (PageWriteback(page) ||
305 !clear_page_dirty_for_io(page)) {
306 unlock_page(page);
307 continue;
308 }
309
310 /* Is the page fully outside i_size? (truncate in progress) */
311 if (page->index > end_index || (page->index == end_index && !offset)) {
312 page->mapping->a_ops->invalidatepage(page, 0);
313 unlock_page(page);
314 continue;
315 }
316
317 ret = __gfs2_jdata_writepage(page, wbc);
318
319 if (ret || (--(wbc->nr_to_write) <= 0))
320 ret = 1;
321 if (wbc->nonblocking && bdi_write_congested(bdi)) {
322 wbc->encountered_congestion = 1;
323 ret = 1;
324 }
325
326 }
327 gfs2_trans_end(sdp);
328 return ret;
329}
330
331/**
332 * gfs2_write_cache_jdata - Like write_cache_pages but different
333 * @mapping: The mapping to write
334 * @wbc: The writeback control
335 * @writepage: The writepage function to call
336 * @data: The data to pass to writepage
337 *
338 * The reason that we use our own function here is that we need to
339 * start transactions before we grab page locks. This allows us
340 * to get the ordering right.
341 */
342
343static int gfs2_write_cache_jdata(struct address_space *mapping,
344 struct writeback_control *wbc)
345{
346 struct backing_dev_info *bdi = mapping->backing_dev_info;
347 int ret = 0;
348 int done = 0;
349 struct pagevec pvec;
350 int nr_pages;
351 pgoff_t index;
352 pgoff_t end;
353 int scanned = 0;
354 int range_whole = 0;
355
356 if (wbc->nonblocking && bdi_write_congested(bdi)) {
357 wbc->encountered_congestion = 1;
358 return 0;
359 }
360
361 pagevec_init(&pvec, 0);
362 if (wbc->range_cyclic) {
363 index = mapping->writeback_index; /* Start from prev offset */
364 end = -1;
365 } else {
366 index = wbc->range_start >> PAGE_CACHE_SHIFT;
367 end = wbc->range_end >> PAGE_CACHE_SHIFT;
368 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
369 range_whole = 1;
370 scanned = 1;
371 }
372
373retry:
374 while (!done && (index <= end) &&
375 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
376 PAGECACHE_TAG_DIRTY,
377 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
378 scanned = 1;
379 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end);
380 if (ret)
381 done = 1;
382 if (ret > 0)
383 ret = 0;
384
385 pagevec_release(&pvec);
386 cond_resched();
387 }
388
389 if (!scanned && !done) {
390 /*
391 * We hit the last page and there is more work to be done: wrap
392 * back to the start of the file
393 */
394 scanned = 1;
395 index = 0;
396 goto retry;
397 }
398
399 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
400 mapping->writeback_index = index;
401 return ret;
402}
403
404
405/**
406 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
407 * @mapping: The mapping to write
408 * @wbc: The writeback control
409 *
410 */
411
412static int gfs2_jdata_writepages(struct address_space *mapping,
413 struct writeback_control *wbc)
414{
415 struct gfs2_inode *ip = GFS2_I(mapping->host);
416 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
417 int ret;
418
419 ret = gfs2_write_cache_jdata(mapping, wbc);
420 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
421 gfs2_log_flush(sdp, ip->i_gl);
422 ret = gfs2_write_cache_jdata(mapping, wbc);
423 }
424 return ret;
425}
426
b3b94faa
DT
427/**
428 * stuffed_readpage - Fill in a Linux page with stuffed file data
429 * @ip: the inode
430 * @page: the page
431 *
432 * Returns: errno
433 */
434
435static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
436{
437 struct buffer_head *dibh;
438 void *kaddr;
439 int error;
440
bf126aee
SW
441 /*
442 * Due to the order of unstuffing files and ->nopage(), we can be
443 * asked for a zero page in the case of a stuffed file being extended,
444 * so we need to supply one here. It doesn't happen often.
445 */
446 if (unlikely(page->index)) {
eebd2aa3 447 zero_user(page, 0, PAGE_CACHE_SIZE);
bf126aee
SW
448 return 0;
449 }
fd88de56 450
b3b94faa
DT
451 error = gfs2_meta_inode_buffer(ip, &dibh);
452 if (error)
453 return error;
454
5c4e9e03 455 kaddr = kmap_atomic(page, KM_USER0);
fd88de56 456 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
b3b94faa 457 ip->i_di.di_size);
fd88de56 458 memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
c312c4fd 459 kunmap_atomic(kaddr, KM_USER0);
bf126aee 460 flush_dcache_page(page);
b3b94faa 461 brelse(dibh);
b3b94faa
DT
462 SetPageUptodate(page);
463
464 return 0;
465}
466
b3b94faa 467
b3b94faa 468/**
51ff87bd
SW
469 * __gfs2_readpage - readpage
470 * @file: The file to read a page for
b3b94faa
DT
471 * @page: The page to read
472 *
51ff87bd
SW
473 * This is the core of gfs2's readpage. Its used by the internal file
474 * reading code as in that case we already hold the glock. Also its
475 * called by gfs2_readpage() once the required lock has been granted.
476 *
b3b94faa
DT
477 */
478
51ff87bd 479static int __gfs2_readpage(void *file, struct page *page)
b3b94faa 480{
feaa7bba
SW
481 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
482 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
b3b94faa
DT
483 int error;
484
18ec7d5c 485 if (gfs2_is_stuffed(ip)) {
fd88de56
SW
486 error = stuffed_readpage(ip, page);
487 unlock_page(page);
51ff87bd 488 } else {
e9e1ef2b 489 error = mpage_readpage(page, gfs2_block_map);
51ff87bd 490 }
b3b94faa
DT
491
492 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
51ff87bd 493 return -EIO;
b3b94faa 494
51ff87bd
SW
495 return error;
496}
497
498/**
499 * gfs2_readpage - read a page of a file
500 * @file: The file to read
501 * @page: The page of the file
502 *
3cc3f710 503 * This deals with the locking required. We use a trylock in order to
51ff87bd
SW
504 * avoid the page lock / glock ordering problems returning AOP_TRUNCATED_PAGE
505 * in the event that we are unable to get the lock.
506 */
507
508static int gfs2_readpage(struct file *file, struct page *page)
509{
510 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
511 struct gfs2_holder gh;
512 int error;
513
51ff87bd
SW
514 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
515 error = gfs2_glock_nq_atime(&gh);
516 if (unlikely(error)) {
517 unlock_page(page);
518 goto out;
61a30dcb 519 }
51ff87bd
SW
520 error = __gfs2_readpage(file, page);
521 gfs2_glock_dq(&gh);
18ec7d5c 522out:
51ff87bd 523 gfs2_holder_uninit(&gh);
a13cbe37 524 if (error == GLR_TRYFAILED) {
a13cbe37 525 yield();
51ff87bd 526 return AOP_TRUNCATED_PAGE;
a13cbe37 527 }
51ff87bd
SW
528 return error;
529}
530
531/**
532 * gfs2_internal_read - read an internal file
533 * @ip: The gfs2 inode
534 * @ra_state: The readahead state (or NULL for no readahead)
535 * @buf: The buffer to fill
536 * @pos: The file position
537 * @size: The amount to read
538 *
539 */
540
541int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
542 char *buf, loff_t *pos, unsigned size)
543{
544 struct address_space *mapping = ip->i_inode.i_mapping;
545 unsigned long index = *pos / PAGE_CACHE_SIZE;
546 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
547 unsigned copied = 0;
548 unsigned amt;
549 struct page *page;
550 void *p;
551
552 do {
553 amt = size - copied;
554 if (offset + size > PAGE_CACHE_SIZE)
555 amt = PAGE_CACHE_SIZE - offset;
556 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
557 if (IS_ERR(page))
558 return PTR_ERR(page);
559 p = kmap_atomic(page, KM_USER0);
560 memcpy(buf + copied, p + offset, amt);
561 kunmap_atomic(p, KM_USER0);
562 mark_page_accessed(page);
563 page_cache_release(page);
564 copied += amt;
565 index++;
566 offset = 0;
567 } while(copied < size);
568 (*pos) += size;
569 return size;
fd88de56
SW
570}
571
fd88de56
SW
572/**
573 * gfs2_readpages - Read a bunch of pages at once
574 *
575 * Some notes:
576 * 1. This is only for readahead, so we can simply ignore any things
577 * which are slightly inconvenient (such as locking conflicts between
578 * the page lock and the glock) and return having done no I/O. Its
579 * obviously not something we'd want to do on too regular a basis.
580 * Any I/O we ignore at this time will be done via readpage later.
e1d5b18a 581 * 2. We don't handle stuffed files here we let readpage do the honours.
fd88de56 582 * 3. mpage_readpages() does most of the heavy lifting in the common case.
e9e1ef2b 583 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
fd88de56 584 */
3cc3f710 585
fd88de56
SW
586static int gfs2_readpages(struct file *file, struct address_space *mapping,
587 struct list_head *pages, unsigned nr_pages)
588{
589 struct inode *inode = mapping->host;
feaa7bba
SW
590 struct gfs2_inode *ip = GFS2_I(inode);
591 struct gfs2_sbd *sdp = GFS2_SB(inode);
fd88de56 592 struct gfs2_holder gh;
3cc3f710 593 int ret;
fd88de56 594
3cc3f710 595 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
51ff87bd 596 ret = gfs2_glock_nq_atime(&gh);
51ff87bd 597 if (unlikely(ret))
3cc3f710 598 goto out_uninit;
e1d5b18a 599 if (!gfs2_is_stuffed(ip))
e9e1ef2b 600 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
3cc3f710
SW
601 gfs2_glock_dq(&gh);
602out_uninit:
603 gfs2_holder_uninit(&gh);
fd88de56
SW
604 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
605 ret = -EIO;
606 return ret;
b3b94faa
DT
607}
608
609/**
7765ec26 610 * gfs2_write_begin - Begin to write to a file
b3b94faa 611 * @file: The file to write to
7765ec26
SW
612 * @mapping: The mapping in which to write
613 * @pos: The file offset at which to start writing
614 * @len: Length of the write
615 * @flags: Various flags
616 * @pagep: Pointer to return the page
617 * @fsdata: Pointer to return fs data (unused by GFS2)
b3b94faa
DT
618 *
619 * Returns: errno
620 */
621
7765ec26
SW
622static int gfs2_write_begin(struct file *file, struct address_space *mapping,
623 loff_t pos, unsigned len, unsigned flags,
624 struct page **pagep, void **fsdata)
b3b94faa 625{
7765ec26
SW
626 struct gfs2_inode *ip = GFS2_I(mapping->host);
627 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
18ec7d5c
SW
628 unsigned int data_blocks, ind_blocks, rblocks;
629 int alloc_required;
b3b94faa 630 int error = 0;
18ec7d5c 631 struct gfs2_alloc *al;
7765ec26
SW
632 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
633 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
634 unsigned to = from + len;
635 struct page *page;
52ae7b79 636
7765ec26 637 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &ip->i_gh);
dcd24799 638 error = gfs2_glock_nq_atime(&ip->i_gh);
7765ec26 639 if (unlikely(error))
18ec7d5c 640 goto out_uninit;
b3b94faa 641
7765ec26 642 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
7765ec26 643 error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
18ec7d5c 644 if (error)
c41d4f09 645 goto out_unlock;
18ec7d5c
SW
646
647 if (alloc_required) {
648 al = gfs2_alloc_get(ip);
649
650 error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
651 if (error)
652 goto out_alloc_put;
653
2933f925 654 error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
18ec7d5c
SW
655 if (error)
656 goto out_qunlock;
657
658 al->al_requested = data_blocks + ind_blocks;
659 error = gfs2_inplace_reserve(ip);
660 if (error)
661 goto out_qunlock;
662 }
663
664 rblocks = RES_DINODE + ind_blocks;
665 if (gfs2_is_jdata(ip))
666 rblocks += data_blocks ? data_blocks : 1;
667 if (ind_blocks || data_blocks)
668 rblocks += RES_STATFS + RES_QUOTA;
669
16615be1
SW
670 error = gfs2_trans_begin(sdp, rblocks,
671 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
18ec7d5c 672 if (error)
a867bb28 673 goto out_trans_fail;
18ec7d5c 674
c41d4f09
SW
675 error = -ENOMEM;
676 page = __grab_cache_page(mapping, index);
677 *pagep = page;
678 if (unlikely(!page))
679 goto out_endtrans;
680
18ec7d5c 681 if (gfs2_is_stuffed(ip)) {
c41d4f09 682 error = 0;
7765ec26 683 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
f25ef0c1 684 error = gfs2_unstuff_dinode(ip, page);
5c4e9e03
SW
685 if (error == 0)
686 goto prepare_write;
c41d4f09 687 } else if (!PageUptodate(page)) {
b3b94faa 688 error = stuffed_readpage(ip, page);
c41d4f09 689 }
5c4e9e03 690 goto out;
18ec7d5c
SW
691 }
692
5c4e9e03 693prepare_write:
e9e1ef2b 694 error = block_prepare_write(page, from, to, gfs2_block_map);
18ec7d5c 695out:
c41d4f09
SW
696 if (error == 0)
697 return 0;
698
699 page_cache_release(page);
700 if (pos + len > ip->i_inode.i_size)
701 vmtruncate(&ip->i_inode, ip->i_inode.i_size);
702out_endtrans:
703 gfs2_trans_end(sdp);
a867bb28 704out_trans_fail:
c41d4f09
SW
705 if (alloc_required) {
706 gfs2_inplace_release(ip);
18ec7d5c 707out_qunlock:
c41d4f09 708 gfs2_quota_unlock(ip);
18ec7d5c 709out_alloc_put:
c41d4f09
SW
710 gfs2_alloc_put(ip);
711 }
18ec7d5c 712out_unlock:
c41d4f09 713 gfs2_glock_dq(&ip->i_gh);
18ec7d5c 714out_uninit:
c41d4f09 715 gfs2_holder_uninit(&ip->i_gh);
b3b94faa
DT
716 return error;
717}
718
7ae8fa84
RP
719/**
720 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
721 * @inode: the rindex inode
722 */
723static void adjust_fs_space(struct inode *inode)
724{
725 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
726 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
727 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
728 u64 fs_total, new_free;
729
730 /* Total up the file system space, according to the latest rindex. */
731 fs_total = gfs2_ri_total(sdp);
732
733 spin_lock(&sdp->sd_statfs_spin);
734 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
735 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
736 else
737 new_free = 0;
738 spin_unlock(&sdp->sd_statfs_spin);
6c53267f
RP
739 fs_warn(sdp, "File system extended by %llu blocks.\n",
740 (unsigned long long)new_free);
7ae8fa84
RP
741 gfs2_statfs_change(sdp, new_free, new_free, 0);
742}
743
b3b94faa 744/**
7765ec26
SW
745 * gfs2_stuffed_write_end - Write end for stuffed files
746 * @inode: The inode
747 * @dibh: The buffer_head containing the on-disk inode
748 * @pos: The file position
749 * @len: The length of the write
750 * @copied: How much was actually copied by the VFS
751 * @page: The page
752 *
753 * This copies the data from the page into the inode block after
754 * the inode data structure itself.
755 *
756 * Returns: errno
757 */
758static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
759 loff_t pos, unsigned len, unsigned copied,
760 struct page *page)
761{
762 struct gfs2_inode *ip = GFS2_I(inode);
763 struct gfs2_sbd *sdp = GFS2_SB(inode);
764 u64 to = pos + copied;
765 void *kaddr;
766 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
767 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
768
769 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
770 kaddr = kmap_atomic(page, KM_USER0);
771 memcpy(buf + pos, kaddr + pos, copied);
772 memset(kaddr + pos + copied, 0, len - copied);
773 flush_dcache_page(page);
774 kunmap_atomic(kaddr, KM_USER0);
775
776 if (!PageUptodate(page))
777 SetPageUptodate(page);
778 unlock_page(page);
779 page_cache_release(page);
780
781 if (inode->i_size < to) {
782 i_size_write(inode, to);
783 ip->i_di.di_size = inode->i_size;
784 di->di_size = cpu_to_be64(inode->i_size);
785 mark_inode_dirty(inode);
786 }
787
788 if (inode == sdp->sd_rindex)
789 adjust_fs_space(inode);
790
791 brelse(dibh);
792 gfs2_trans_end(sdp);
793 gfs2_glock_dq(&ip->i_gh);
794 gfs2_holder_uninit(&ip->i_gh);
795 return copied;
796}
797
798/**
799 * gfs2_write_end
b3b94faa 800 * @file: The file to write to
7765ec26
SW
801 * @mapping: The address space to write to
802 * @pos: The file position
803 * @len: The length of the data
804 * @copied:
805 * @page: The page that has been written
806 * @fsdata: The fsdata (unused in GFS2)
807 *
808 * The main write_end function for GFS2. We have a separate one for
809 * stuffed files as they are slightly different, otherwise we just
810 * put our locking around the VFS provided functions.
b3b94faa
DT
811 *
812 * Returns: errno
813 */
814
7765ec26
SW
815static int gfs2_write_end(struct file *file, struct address_space *mapping,
816 loff_t pos, unsigned len, unsigned copied,
817 struct page *page, void *fsdata)
b3b94faa
DT
818{
819 struct inode *inode = page->mapping->host;
feaa7bba
SW
820 struct gfs2_inode *ip = GFS2_I(inode);
821 struct gfs2_sbd *sdp = GFS2_SB(inode);
18ec7d5c 822 struct buffer_head *dibh;
6dbd8224 823 struct gfs2_alloc *al = ip->i_alloc;
48516ced 824 struct gfs2_dinode *di;
7765ec26
SW
825 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
826 unsigned int to = from + len;
827 int ret;
b3b94faa 828
7765ec26 829 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == 0);
18ec7d5c 830
7765ec26
SW
831 ret = gfs2_meta_inode_buffer(ip, &dibh);
832 if (unlikely(ret)) {
833 unlock_page(page);
834 page_cache_release(page);
835 goto failed;
836 }
18ec7d5c
SW
837
838 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
b3b94faa 839
7765ec26
SW
840 if (gfs2_is_stuffed(ip))
841 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
b3b94faa 842
bf36a713 843 if (!gfs2_is_writeback(ip))
7765ec26 844 gfs2_page_add_databufs(ip, page, from, to);
b3b94faa 845
7765ec26 846 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
b3b94faa 847
9656b2c1
SW
848 if (likely(ret >= 0) && (inode->i_size > ip->i_di.di_size)) {
849 di = (struct gfs2_dinode *)dibh->b_data;
850 ip->i_di.di_size = inode->i_size;
851 di->di_size = cpu_to_be64(inode->i_size);
852 mark_inode_dirty(inode);
48516ced
SW
853 }
854
7ae8fa84
RP
855 if (inode == sdp->sd_rindex)
856 adjust_fs_space(inode);
857
18ec7d5c
SW
858 brelse(dibh);
859 gfs2_trans_end(sdp);
7765ec26 860failed:
6dbd8224 861 if (al) {
18ec7d5c
SW
862 gfs2_inplace_release(ip);
863 gfs2_quota_unlock(ip);
864 gfs2_alloc_put(ip);
865 }
7765ec26 866 gfs2_glock_dq(&ip->i_gh);
18ec7d5c 867 gfs2_holder_uninit(&ip->i_gh);
7765ec26 868 return ret;
b3b94faa
DT
869}
870
8fb68595
RP
871/**
872 * gfs2_set_page_dirty - Page dirtying function
873 * @page: The page to dirty
874 *
875 * Returns: 1 if it dirtyed the page, or 0 otherwise
876 */
877
878static int gfs2_set_page_dirty(struct page *page)
879{
5561093e 880 SetPageChecked(page);
8fb68595
RP
881 return __set_page_dirty_buffers(page);
882}
883
b3b94faa
DT
884/**
885 * gfs2_bmap - Block map function
886 * @mapping: Address space info
887 * @lblock: The block to map
888 *
889 * Returns: The disk address for the block or 0 on hole or error
890 */
891
892static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
893{
feaa7bba 894 struct gfs2_inode *ip = GFS2_I(mapping->host);
b3b94faa
DT
895 struct gfs2_holder i_gh;
896 sector_t dblock = 0;
897 int error;
898
b3b94faa
DT
899 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
900 if (error)
901 return 0;
902
903 if (!gfs2_is_stuffed(ip))
e9e1ef2b 904 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
b3b94faa
DT
905
906 gfs2_glock_dq_uninit(&i_gh);
907
908 return dblock;
909}
910
d7b616e2
SW
911static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
912{
913 struct gfs2_bufdata *bd;
914
915 lock_buffer(bh);
916 gfs2_log_lock(sdp);
917 clear_buffer_dirty(bh);
918 bd = bh->b_private;
919 if (bd) {
16615be1
SW
920 if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
921 list_del_init(&bd->bd_le.le_list);
922 else
923 gfs2_remove_from_journal(bh, current->journal_info, 0);
d7b616e2
SW
924 }
925 bh->b_bdev = NULL;
926 clear_buffer_mapped(bh);
927 clear_buffer_req(bh);
928 clear_buffer_new(bh);
929 gfs2_log_unlock(sdp);
930 unlock_buffer(bh);
931}
932
8628de05 933static void gfs2_invalidatepage(struct page *page, unsigned long offset)
b3b94faa 934{
d7b616e2
SW
935 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
936 struct buffer_head *bh, *head;
937 unsigned long pos = 0;
938
b3b94faa 939 BUG_ON(!PageLocked(page));
8fb68595
RP
940 if (offset == 0)
941 ClearPageChecked(page);
d7b616e2
SW
942 if (!page_has_buffers(page))
943 goto out;
b3b94faa 944
d7b616e2
SW
945 bh = head = page_buffers(page);
946 do {
947 if (offset <= pos)
948 gfs2_discard(sdp, bh);
949 pos += bh->b_size;
950 bh = bh->b_this_page;
951 } while (bh != head);
952out:
953 if (offset == 0)
954 try_to_release_page(page, 0);
b3b94faa
DT
955}
956
c7b33834
SW
957/**
958 * gfs2_ok_for_dio - check that dio is valid on this file
959 * @ip: The inode
960 * @rw: READ or WRITE
961 * @offset: The offset at which we are reading or writing
962 *
963 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
964 * 1 (to accept the i/o request)
965 */
966static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
967{
968 /*
969 * Should we return an error here? I can't see that O_DIRECT for
5561093e
SW
970 * a stuffed file makes any sense. For now we'll silently fall
971 * back to buffered I/O
c7b33834 972 */
c7b33834
SW
973 if (gfs2_is_stuffed(ip))
974 return 0;
975
976 if (offset > i_size_read(&ip->i_inode))
977 return 0;
978 return 1;
979}
980
981
982
a9e5f4d0
SW
983static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
984 const struct iovec *iov, loff_t offset,
985 unsigned long nr_segs)
d1665e41
SW
986{
987 struct file *file = iocb->ki_filp;
988 struct inode *inode = file->f_mapping->host;
feaa7bba 989 struct gfs2_inode *ip = GFS2_I(inode);
d1665e41
SW
990 struct gfs2_holder gh;
991 int rv;
992
993 /*
c7b33834
SW
994 * Deferred lock, even if its a write, since we do no allocation
995 * on this path. All we need change is atime, and this lock mode
996 * ensures that other nodes have flushed their buffered read caches
997 * (i.e. their page cache entries for this inode). We do not,
998 * unfortunately have the option of only flushing a range like
999 * the VFS does.
d1665e41 1000 */
c7b33834 1001 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
dcd24799 1002 rv = gfs2_glock_nq_atime(&gh);
d1665e41 1003 if (rv)
c7b33834
SW
1004 return rv;
1005 rv = gfs2_ok_for_dio(ip, rw, offset);
1006 if (rv != 1)
1007 goto out; /* dio not valid, fall back to buffered i/o */
1008
1009 rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
1010 iov, offset, nr_segs,
1011 gfs2_get_block_direct, NULL);
d1665e41
SW
1012out:
1013 gfs2_glock_dq_m(1, &gh);
1014 gfs2_holder_uninit(&gh);
d1665e41
SW
1015 return rv;
1016}
1017
4340fe62 1018/**
623d9355 1019 * gfs2_releasepage - free the metadata associated with a page
4340fe62
SW
1020 * @page: the page that's being released
1021 * @gfp_mask: passed from Linux VFS, ignored by us
1022 *
1023 * Call try_to_free_buffers() if the buffers in this page can be
1024 * released.
1025 *
1026 * Returns: 0
1027 */
1028
1029int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1030{
1031 struct inode *aspace = page->mapping->host;
1032 struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
1033 struct buffer_head *bh, *head;
1034 struct gfs2_bufdata *bd;
4340fe62
SW
1035
1036 if (!page_has_buffers(page))
891ba6d4 1037 return 0;
4340fe62 1038
bb3b0e3d 1039 gfs2_log_lock(sdp);
4340fe62
SW
1040 head = bh = page_buffers(page);
1041 do {
bb3b0e3d
SW
1042 if (atomic_read(&bh->b_count))
1043 goto cannot_release;
1044 bd = bh->b_private;
1045 if (bd && bd->bd_ail)
1046 goto cannot_release;
4340fe62 1047 gfs2_assert_warn(sdp, !buffer_pinned(bh));
623d9355 1048 gfs2_assert_warn(sdp, !buffer_dirty(bh));
bb3b0e3d
SW
1049 bh = bh->b_this_page;
1050 } while(bh != head);
1051 gfs2_log_unlock(sdp);
4340fe62 1052
bb3b0e3d
SW
1053 head = bh = page_buffers(page);
1054 do {
623d9355 1055 gfs2_log_lock(sdp);
4340fe62
SW
1056 bd = bh->b_private;
1057 if (bd) {
1058 gfs2_assert_warn(sdp, bd->bd_bh == bh);
1059 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
d7b616e2
SW
1060 if (!list_empty(&bd->bd_le.le_list)) {
1061 if (!buffer_pinned(bh))
1062 list_del_init(&bd->bd_le.le_list);
1063 else
1064 bd = NULL;
1065 }
1066 if (bd)
1067 bd->bd_bh = NULL;
4340fe62
SW
1068 bh->b_private = NULL;
1069 }
623d9355
SW
1070 gfs2_log_unlock(sdp);
1071 if (bd)
1072 kmem_cache_free(gfs2_bufdata_cachep, bd);
4340fe62
SW
1073
1074 bh = bh->b_this_page;
166afccd 1075 } while (bh != head);
4340fe62 1076
4340fe62 1077 return try_to_free_buffers(page);
bb3b0e3d
SW
1078cannot_release:
1079 gfs2_log_unlock(sdp);
1080 return 0;
4340fe62
SW
1081}
1082
5561093e 1083static const struct address_space_operations gfs2_writeback_aops = {
9ff8ec32 1084 .writepage = gfs2_writeback_writepage,
5561093e
SW
1085 .writepages = gfs2_writeback_writepages,
1086 .readpage = gfs2_readpage,
1087 .readpages = gfs2_readpages,
1088 .sync_page = block_sync_page,
1089 .write_begin = gfs2_write_begin,
1090 .write_end = gfs2_write_end,
1091 .bmap = gfs2_bmap,
1092 .invalidatepage = gfs2_invalidatepage,
1093 .releasepage = gfs2_releasepage,
1094 .direct_IO = gfs2_direct_IO,
e5d9dc27 1095 .migratepage = buffer_migrate_page,
5561093e
SW
1096};
1097
1098static const struct address_space_operations gfs2_ordered_aops = {
9ff8ec32 1099 .writepage = gfs2_ordered_writepage,
b3b94faa 1100 .readpage = gfs2_readpage,
fd88de56 1101 .readpages = gfs2_readpages,
b3b94faa 1102 .sync_page = block_sync_page,
7765ec26
SW
1103 .write_begin = gfs2_write_begin,
1104 .write_end = gfs2_write_end,
8fb68595 1105 .set_page_dirty = gfs2_set_page_dirty,
b3b94faa
DT
1106 .bmap = gfs2_bmap,
1107 .invalidatepage = gfs2_invalidatepage,
4340fe62 1108 .releasepage = gfs2_releasepage,
b3b94faa 1109 .direct_IO = gfs2_direct_IO,
e5d9dc27 1110 .migratepage = buffer_migrate_page,
b3b94faa
DT
1111};
1112
5561093e 1113static const struct address_space_operations gfs2_jdata_aops = {
9ff8ec32 1114 .writepage = gfs2_jdata_writepage,
b8e7cbb6 1115 .writepages = gfs2_jdata_writepages,
5561093e
SW
1116 .readpage = gfs2_readpage,
1117 .readpages = gfs2_readpages,
1118 .sync_page = block_sync_page,
1119 .write_begin = gfs2_write_begin,
1120 .write_end = gfs2_write_end,
1121 .set_page_dirty = gfs2_set_page_dirty,
1122 .bmap = gfs2_bmap,
1123 .invalidatepage = gfs2_invalidatepage,
1124 .releasepage = gfs2_releasepage,
1125};
1126
1127void gfs2_set_aops(struct inode *inode)
1128{
1129 struct gfs2_inode *ip = GFS2_I(inode);
1130
1131 if (gfs2_is_writeback(ip))
1132 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1133 else if (gfs2_is_ordered(ip))
1134 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1135 else if (gfs2_is_jdata(ip))
1136 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1137 else
1138 BUG();
1139}
1140