2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
13 #include <linux/blkdev.h>
14 #include <linux/gfs2_ondisk.h>
15 #include <linux/crc32.h>
30 #include "trace_gfs2.h"
32 /* This doesn't need to be that large as max 64 bit pointers in a 4k
33 * block is 512, so __u16 is fine for that. It saves stack space to
37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 __u16 mp_list[GFS2_MAX_META_HEIGHT];
43 unsigned int sm_height;
47 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
49 * @dibh: the dinode buffer
50 * @block: the block number that was allocated
51 * @page: The (optional) page. This is looked up if @page is NULL
56 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
57 u64 block, struct page *page)
59 struct inode *inode = &ip->i_inode;
60 struct buffer_head *bh;
63 if (!page || page->index) {
64 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
70 if (!PageUptodate(page)) {
71 void *kaddr = kmap(page);
72 u64 dsize = i_size_read(inode);
74 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
75 dsize = dibh->b_size - sizeof(struct gfs2_dinode);
77 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
78 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
81 SetPageUptodate(page);
84 if (!page_has_buffers(page))
85 create_empty_buffers(page, BIT(inode->i_blkbits),
88 bh = page_buffers(page);
90 if (!buffer_mapped(bh))
91 map_bh(bh, inode->i_sb, block);
93 set_buffer_uptodate(bh);
94 if (!gfs2_is_jdata(ip))
95 mark_buffer_dirty(bh);
96 if (!gfs2_is_writeback(ip))
97 gfs2_trans_add_data(ip->i_gl, bh);
108 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
109 * @ip: The GFS2 inode to unstuff
110 * @page: The (optional) page. This is looked up if the @page is NULL
112 * This routine unstuffs a dinode and returns it to a "normal" state such
113 * that the height can be grown in the traditional way.
118 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
120 struct buffer_head *bh, *dibh;
121 struct gfs2_dinode *di;
123 int isdir = gfs2_is_dir(ip);
126 down_write(&ip->i_rw_mutex);
128 error = gfs2_meta_inode_buffer(ip, &dibh);
132 if (i_size_read(&ip->i_inode)) {
133 /* Get a free block, fill it with the stuffed data,
134 and write it out to disk */
137 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
141 gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1);
142 error = gfs2_dir_get_new_buffer(ip, block, &bh);
145 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
146 dibh, sizeof(struct gfs2_dinode));
149 error = gfs2_unstuffer_page(ip, dibh, block, page);
155 /* Set up the pointer to the new block */
157 gfs2_trans_add_meta(ip->i_gl, dibh);
158 di = (struct gfs2_dinode *)dibh->b_data;
159 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
161 if (i_size_read(&ip->i_inode)) {
162 *(__be64 *)(di + 1) = cpu_to_be64(block);
163 gfs2_add_inode_blocks(&ip->i_inode, 1);
164 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
168 di->di_height = cpu_to_be16(1);
173 up_write(&ip->i_rw_mutex);
179 * find_metapath - Find path through the metadata tree
180 * @sdp: The superblock
181 * @mp: The metapath to return the result in
182 * @block: The disk block to look up
183 * @height: The pre-calculated height of the metadata tree
185 * This routine returns a struct metapath structure that defines a path
186 * through the metadata of inode "ip" to get to block "block".
189 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a
190 * filesystem with a blocksize of 4096.
192 * find_metapath() would return a struct metapath structure set to:
193 * mp_offset = 101342453, mp_height = 3, mp_list[0] = 0, mp_list[1] = 48,
194 * and mp_list[2] = 165.
196 * That means that in order to get to the block containing the byte at
197 * offset 101342453, we would load the indirect block pointed to by pointer
198 * 0 in the dinode. We would then load the indirect block pointed to by
199 * pointer 48 in that indirect block. We would then load the data block
200 * pointed to by pointer 165 in that indirect block.
202 * ----------------------------------------
207 * ----------------------------------------
211 * ----------------------------------------
215 * |0 5 6 7 8 9 0 1 2|
216 * ----------------------------------------
220 * ----------------------------------------
225 * ----------------------------------------
229 * ----------------------------------------
230 * | Data block containing offset |
234 * ----------------------------------------
238 static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
239 struct metapath *mp, unsigned int height)
243 for (i = height; i--;)
244 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
248 static inline unsigned int metapath_branch_start(const struct metapath *mp)
250 if (mp->mp_list[0] == 0)
256 * metapointer - Return pointer to start of metadata in a buffer
257 * @height: The metadata height (0 = dinode)
260 * Return a pointer to the block number of the next height of the metadata
261 * tree given a buffer containing the pointer to the current height of the
265 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
267 struct buffer_head *bh = mp->mp_bh[height];
268 unsigned int head_size = (height > 0) ?
269 sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
270 return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
273 static void gfs2_metapath_ra(struct gfs2_glock *gl,
274 const struct buffer_head *bh, const __be64 *pos)
276 struct buffer_head *rabh;
277 const __be64 *endp = (const __be64 *)(bh->b_data + bh->b_size);
280 for (t = pos; t < endp; t++) {
284 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
285 if (trylock_buffer(rabh)) {
286 if (!buffer_uptodate(rabh)) {
287 rabh->b_end_io = end_buffer_read_sync;
288 submit_bh(REQ_OP_READ, REQ_RAHEAD | REQ_META,
299 * lookup_metapath - Walk the metadata tree to a specific point
303 * Assumes that the inode's buffer has already been looked up and
304 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
305 * by find_metapath().
307 * If this function encounters part of the tree which has not been
308 * allocated, it returns the current height of the tree at the point
309 * at which it found the unallocated block. Blocks which are found are
310 * added to the mp->mp_bh[] list.
312 * Returns: error or height of metadata tree
315 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
317 unsigned int end_of_metadata = ip->i_height - 1;
323 for (x = 0; x < end_of_metadata; x++) {
324 ptr = metapointer(x, mp);
325 dblock = be64_to_cpu(*ptr);
329 ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, &mp->mp_bh[x+1]);
337 static inline void release_metapath(struct metapath *mp)
341 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
342 if (mp->mp_bh[i] == NULL)
344 brelse(mp->mp_bh[i]);
349 * gfs2_extent_length - Returns length of an extent of blocks
350 * @start: Start of the buffer
351 * @len: Length of the buffer in bytes
352 * @ptr: Current position in the buffer
353 * @limit: Max extent length to return (0 = unlimited)
354 * @eob: Set to 1 if we hit "end of block"
356 * If the first block is zero (unallocated) it will return the number of
357 * unallocated blocks in the extent, otherwise it will return the number
358 * of contiguous blocks in the extent.
360 * Returns: The length of the extent (minimum of one block)
363 static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, size_t limit, int *eob)
365 const __be64 *end = (start + len);
366 const __be64 *first = ptr;
367 u64 d = be64_to_cpu(*ptr);
374 if (limit && --limit == 0)
378 } while(be64_to_cpu(*ptr) == d);
381 return (ptr - first);
384 static inline void bmap_lock(struct gfs2_inode *ip, int create)
387 down_write(&ip->i_rw_mutex);
389 down_read(&ip->i_rw_mutex);
392 static inline void bmap_unlock(struct gfs2_inode *ip, int create)
395 up_write(&ip->i_rw_mutex);
397 up_read(&ip->i_rw_mutex);
400 static inline __be64 *gfs2_indirect_init(struct metapath *mp,
401 struct gfs2_glock *gl, unsigned int i,
402 unsigned offset, u64 bn)
404 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
405 ((i > 1) ? sizeof(struct gfs2_meta_header) :
406 sizeof(struct gfs2_dinode)));
408 BUG_ON(mp->mp_bh[i] != NULL);
409 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
410 gfs2_trans_add_meta(gl, mp->mp_bh[i]);
411 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
412 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
414 *ptr = cpu_to_be64(bn);
420 ALLOC_GROW_DEPTH = 1,
421 ALLOC_GROW_HEIGHT = 2,
422 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */
426 * gfs2_bmap_alloc - Build a metadata tree of the requested height
427 * @inode: The GFS2 inode
428 * @lblock: The logical starting block of the extent
429 * @bh_map: This is used to return the mapping details
431 * @sheight: The starting height (i.e. whats already mapped)
432 * @height: The height to build to
433 * @maxlen: The max number of data blocks to alloc
435 * In this routine we may have to alloc:
436 * i) Indirect blocks to grow the metadata tree height
437 * ii) Indirect blocks to fill in lower part of the metadata tree
440 * The function is in two parts. The first part works out the total
441 * number of blocks which we need. The second part does the actual
442 * allocation asking for an extent at a time (if enough contiguous free
443 * blocks are available, there will only be one request per bmap call)
444 * and uses the state machine to initialise the blocks in order.
446 * Returns: errno on error
449 static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
450 struct buffer_head *bh_map, struct metapath *mp,
451 const unsigned int sheight,
452 const unsigned int height,
455 struct gfs2_inode *ip = GFS2_I(inode);
456 struct gfs2_sbd *sdp = GFS2_SB(inode);
457 struct super_block *sb = sdp->sd_vfs;
458 struct buffer_head *dibh = mp->mp_bh[0];
460 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
462 unsigned ptrs_per_blk;
463 const unsigned end_of_metadata = height - 1;
466 enum alloc_state state;
471 BUG_ON(dibh == NULL);
473 gfs2_trans_add_meta(ip->i_gl, dibh);
475 if (height == sheight) {
476 struct buffer_head *bh;
477 /* Bottom indirect block exists, find unalloced extent size */
478 ptr = metapointer(end_of_metadata, mp);
479 bh = mp->mp_bh[end_of_metadata];
480 dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
485 /* Need to allocate indirect blocks */
486 ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
487 dblks = min(maxlen, (size_t)(ptrs_per_blk -
488 mp->mp_list[end_of_metadata]));
489 if (height == ip->i_height) {
490 /* Writing into existing tree, extend tree down */
491 iblks = height - sheight;
492 state = ALLOC_GROW_DEPTH;
494 /* Building up tree height */
495 state = ALLOC_GROW_HEIGHT;
496 iblks = height - ip->i_height;
497 branch_start = metapath_branch_start(mp);
498 iblks += (height - branch_start);
502 /* start of the second part of the function (state machine) */
504 blks = dblks + iblks;
509 error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
513 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
514 gfs2_trans_add_unrevoke(sdp, bn, n);
516 /* Growing height of tree */
517 case ALLOC_GROW_HEIGHT:
519 ptr = (__be64 *)(dibh->b_data +
520 sizeof(struct gfs2_dinode));
523 for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
524 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
525 if (i - 1 == height - ip->i_height) {
527 gfs2_buffer_copy_tail(mp->mp_bh[i],
528 sizeof(struct gfs2_meta_header),
529 dibh, sizeof(struct gfs2_dinode));
530 gfs2_buffer_clear_tail(dibh,
531 sizeof(struct gfs2_dinode) +
533 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
534 sizeof(struct gfs2_meta_header));
536 state = ALLOC_GROW_DEPTH;
537 for(i = branch_start; i < height; i++) {
538 if (mp->mp_bh[i] == NULL)
540 brelse(mp->mp_bh[i]);
547 /* Branching from existing tree */
548 case ALLOC_GROW_DEPTH:
549 if (i > 1 && i < height)
550 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
551 for (; i < height && n > 0; i++, n--)
552 gfs2_indirect_init(mp, ip->i_gl, i,
553 mp->mp_list[i-1], bn++);
558 /* Tree complete, adding data blocks */
561 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
562 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
564 ptr = metapointer(end_of_metadata, mp);
567 *ptr++ = cpu_to_be64(bn++);
568 if (buffer_zeronew(bh_map)) {
569 ret = sb_issue_zeroout(sb, dblock, dblks,
573 "Failed to zero data buffers\n");
574 clear_buffer_zeronew(bh_map);
579 } while ((state != ALLOC_DATA) || !dblock);
581 ip->i_height = height;
582 gfs2_add_inode_blocks(&ip->i_inode, alloced);
583 gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
584 map_bh(bh_map, inode->i_sb, dblock);
585 bh_map->b_size = dblks << inode->i_blkbits;
586 set_buffer_new(bh_map);
591 * gfs2_block_map - Map a block from an inode to a disk block
593 * @lblock: The logical block number
594 * @bh_map: The bh to be mapped
595 * @create: True if its ok to alloc blocks to satify the request
597 * Sets buffer_mapped() if successful, sets buffer_boundary() if a
598 * read of metadata will be required before the next block can be
599 * mapped. Sets buffer_new() if new blocks were allocated.
604 int gfs2_block_map(struct inode *inode, sector_t lblock,
605 struct buffer_head *bh_map, int create)
607 struct gfs2_inode *ip = GFS2_I(inode);
608 struct gfs2_sbd *sdp = GFS2_SB(inode);
609 unsigned int bsize = sdp->sd_sb.sb_bsize;
610 const size_t maxlen = bh_map->b_size >> inode->i_blkbits;
611 const u64 *arr = sdp->sd_heightsize;
618 struct buffer_head *bh;
623 memset(mp.mp_bh, 0, sizeof(mp.mp_bh));
624 bmap_lock(ip, create);
625 clear_buffer_mapped(bh_map);
626 clear_buffer_new(bh_map);
627 clear_buffer_boundary(bh_map);
628 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
629 if (gfs2_is_dir(ip)) {
630 bsize = sdp->sd_jbsize;
631 arr = sdp->sd_jheightsize;
634 ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
638 height = ip->i_height;
639 size = (lblock + 1) * bsize;
640 while (size > arr[height])
642 find_metapath(sdp, lblock, &mp, height);
644 if (height > ip->i_height || gfs2_is_stuffed(ip))
646 ret = lookup_metapath(ip, &mp);
649 if (ret != ip->i_height)
651 ptr = metapointer(ip->i_height - 1, &mp);
654 map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr));
655 bh = mp.mp_bh[ip->i_height - 1];
656 len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob);
657 bh_map->b_size = (len << inode->i_blkbits);
659 set_buffer_boundary(bh_map);
662 release_metapath(&mp);
663 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
664 bmap_unlock(ip, create);
668 /* All allocations are done here, firstly check create flag */
670 BUG_ON(gfs2_is_stuffed(ip));
675 /* At this point ret is the tree depth of already allocated blocks */
676 ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen);
681 * Deprecated: do not use in new code
683 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
685 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
693 bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
694 ret = gfs2_block_map(inode, lblock, &bh, create);
695 *extlen = bh.b_size >> inode->i_blkbits;
696 *dblock = bh.b_blocknr;
705 * do_strip - Look for a layer a particular layer of the file and strip it off
707 * @dibh: the dinode buffer
708 * @bh: A buffer of pointers
709 * @top: The first pointer in the buffer
710 * @bottom: One more than the last pointer
711 * @height: the height this buffer is at
712 * @sm: a pointer to a struct strip_mine
717 static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
718 struct buffer_head *bh, __be64 *top, __be64 *bottom,
719 unsigned int height, struct strip_mine *sm)
721 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
722 struct gfs2_rgrp_list rlist;
723 struct gfs2_trans *tr;
727 unsigned int rg_blocks = 0;
729 unsigned int revokes = 0;
734 error = gfs2_rindex_update(sdp);
741 if (height != sm->sm_height)
749 metadata = (height != ip->i_height - 1);
751 revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs;
752 else if (ip->i_depth)
753 revokes = sdp->sd_inptrs;
755 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
759 for (p = top; p < bottom; p++) {
763 bn = be64_to_cpu(*p);
765 if (bstart + blen == bn)
769 gfs2_rlist_add(ip, &rlist, bstart);
777 gfs2_rlist_add(ip, &rlist, bstart);
779 goto out; /* Nothing to do */
781 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
783 for (x = 0; x < rlist.rl_rgrps; x++) {
784 struct gfs2_rgrpd *rgd;
785 rgd = rlist.rl_ghs[x].gh_gl->gl_object;
786 rg_blocks += rgd->rd_length;
789 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
793 if (gfs2_rs_active(&ip->i_res)) /* needs to be done with the rgrp glock held */
794 gfs2_rs_deltree(&ip->i_res);
797 jblocks_rqsted = rg_blocks + RES_DINODE +
798 RES_INDIRECT + RES_STATFS + RES_QUOTA +
799 gfs2_struct2blk(sdp, revokes, sizeof(u64));
800 if (jblocks_rqsted > atomic_read(&sdp->sd_log_thresh2))
801 jblocks_rqsted = atomic_read(&sdp->sd_log_thresh2);
802 error = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
806 tr = current->journal_info;
807 down_write(&ip->i_rw_mutex);
809 gfs2_trans_add_meta(ip->i_gl, dibh);
810 gfs2_trans_add_meta(ip->i_gl, bh);
816 for (p = top; p < bottom; p++) {
820 /* check for max reasonable journal transaction blocks */
821 if (tr->tr_num_buf_new + RES_STATFS +
822 RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
823 if (rg_blocks >= tr->tr_num_buf_new)
824 rg_blocks -= tr->tr_num_buf_new;
830 bn = be64_to_cpu(*p);
832 if (bstart + blen == bn)
836 __gfs2_free_blocks(ip, bstart, blen, metadata);
845 gfs2_add_inode_blocks(&ip->i_inode, -1);
851 __gfs2_free_blocks(ip, bstart, blen, metadata);
855 gfs2_statfs_change(sdp, 0, +btotal, 0);
856 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
859 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
861 gfs2_dinode_out(ip, dibh->b_data);
863 up_write(&ip->i_rw_mutex);
871 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
873 gfs2_rlist_free(&rlist);
879 * recursive_scan - recursively scan through the end of a file
881 * @dibh: the dinode buffer
882 * @mp: the path through the metadata to the point to start
883 * @height: the height the recursion is at
884 * @block: the indirect block to look at
885 * @first: 1 if this is the first block
886 * @sm: data opaque to this function to pass to @bc
888 * When this is first called @height and @block should be zero and
889 * @first should be 1.
894 static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
895 struct metapath *mp, unsigned int height,
896 u64 block, int first, struct strip_mine *sm)
898 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
899 struct buffer_head *bh = NULL;
900 __be64 *top, *bottom;
903 int mh_size = sizeof(struct gfs2_meta_header);
906 error = gfs2_meta_inode_buffer(ip, &bh);
911 top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
912 bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
914 error = gfs2_meta_indirect_buffer(ip, height, block, &bh);
918 top = (__be64 *)(bh->b_data + mh_size) +
919 (first ? mp->mp_list[height] : 0);
921 bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
924 error = do_strip(ip, dibh, bh, top, bottom, height, sm);
928 if (height < ip->i_height - 1) {
930 gfs2_metapath_ra(ip->i_gl, bh, top);
932 for (; top < bottom; top++, first = 0) {
936 bn = be64_to_cpu(*top);
938 error = recursive_scan(ip, dibh, mp, height + 1, bn,
951 * gfs2_block_truncate_page - Deal with zeroing out data for truncate
953 * This is partly borrowed from ext3.
955 static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
957 struct inode *inode = mapping->host;
958 struct gfs2_inode *ip = GFS2_I(inode);
959 unsigned long index = from >> PAGE_SHIFT;
960 unsigned offset = from & (PAGE_SIZE-1);
961 unsigned blocksize, iblock, length, pos;
962 struct buffer_head *bh;
966 page = find_or_create_page(mapping, index, GFP_NOFS);
970 blocksize = inode->i_sb->s_blocksize;
971 length = blocksize - (offset & (blocksize - 1));
972 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
974 if (!page_has_buffers(page))
975 create_empty_buffers(page, blocksize, 0);
977 /* Find the buffer that contains "offset" */
978 bh = page_buffers(page);
980 while (offset >= pos) {
981 bh = bh->b_this_page;
988 if (!buffer_mapped(bh)) {
989 gfs2_block_map(inode, iblock, bh, 0);
990 /* unmapped? It's a hole - nothing to do */
991 if (!buffer_mapped(bh))
995 /* Ok, it's mapped. Make sure it's up-to-date */
996 if (PageUptodate(page))
997 set_buffer_uptodate(bh);
999 if (!buffer_uptodate(bh)) {
1001 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1003 /* Uhhuh. Read error. Complain and punt. */
1004 if (!buffer_uptodate(bh))
1009 if (!gfs2_is_writeback(ip))
1010 gfs2_trans_add_data(ip->i_gl, bh);
1012 zero_user(page, offset, length);
1013 mark_buffer_dirty(bh);
1020 #define GFS2_JTRUNC_REVOKES 8192
1023 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1024 * @inode: The inode being truncated
1025 * @oldsize: The original (larger) size
1026 * @newsize: The new smaller size
1028 * With jdata files, we have to journal a revoke for each block which is
1029 * truncated. As a result, we need to split this into separate transactions
1030 * if the number of pages being truncated gets too large.
1033 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1035 struct gfs2_sbd *sdp = GFS2_SB(inode);
1036 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1040 while (oldsize != newsize) {
1041 chunk = oldsize - newsize;
1042 if (chunk > max_chunk)
1044 truncate_pagecache(inode, oldsize - chunk);
1046 gfs2_trans_end(sdp);
1047 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1055 static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
1057 struct gfs2_inode *ip = GFS2_I(inode);
1058 struct gfs2_sbd *sdp = GFS2_SB(inode);
1059 struct address_space *mapping = inode->i_mapping;
1060 struct buffer_head *dibh;
1061 int journaled = gfs2_is_jdata(ip);
1065 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1067 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1071 error = gfs2_meta_inode_buffer(ip, &dibh);
1075 gfs2_trans_add_meta(ip->i_gl, dibh);
1077 if (gfs2_is_stuffed(ip)) {
1078 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1080 if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) {
1081 error = gfs2_block_truncate_page(mapping, newsize);
1085 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1088 i_size_write(inode, newsize);
1089 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1090 gfs2_dinode_out(ip, dibh->b_data);
1093 error = gfs2_journaled_truncate(inode, oldsize, newsize);
1095 truncate_pagecache(inode, newsize);
1105 gfs2_trans_end(sdp);
1109 static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
1111 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1112 unsigned int height = ip->i_height;
1120 lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
1122 find_metapath(sdp, lblock, &mp, ip->i_height);
1123 error = gfs2_rindex_update(sdp);
1127 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1132 struct strip_mine sm;
1133 sm.sm_first = !!size;
1134 sm.sm_height = height;
1136 error = recursive_scan(ip, NULL, &mp, 0, 0, 1, &sm);
1141 gfs2_quota_unhold(ip);
1146 static int trunc_end(struct gfs2_inode *ip)
1148 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1149 struct buffer_head *dibh;
1152 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1156 down_write(&ip->i_rw_mutex);
1158 error = gfs2_meta_inode_buffer(ip, &dibh);
1162 if (!i_size_read(&ip->i_inode)) {
1164 ip->i_goal = ip->i_no_addr;
1165 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1166 gfs2_ordered_del_inode(ip);
1168 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1169 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
1171 gfs2_trans_add_meta(ip->i_gl, dibh);
1172 gfs2_dinode_out(ip, dibh->b_data);
1176 up_write(&ip->i_rw_mutex);
1177 gfs2_trans_end(sdp);
1182 * do_shrink - make a file smaller
1184 * @oldsize: the current inode size
1185 * @newsize: the size to make the file
1187 * Called with an exclusive lock on @inode. The @size must
1188 * be equal to or smaller than the current inode size.
1193 static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
1195 struct gfs2_inode *ip = GFS2_I(inode);
1198 error = trunc_start(inode, oldsize, newsize);
1201 if (gfs2_is_stuffed(ip))
1204 error = trunc_dealloc(ip, newsize);
1206 error = trunc_end(ip);
1211 void gfs2_trim_blocks(struct inode *inode)
1213 u64 size = inode->i_size;
1216 ret = do_shrink(inode, size, size);
1221 * do_grow - Touch and update inode size
1223 * @size: The new size
1225 * This function updates the timestamps on the inode and
1226 * may also increase the size of the inode. This function
1227 * must not be called with @size any smaller than the current
1230 * Although it is not strictly required to unstuff files here,
1231 * earlier versions of GFS2 have a bug in the stuffed file reading
1232 * code which will result in a buffer overrun if the size is larger
1233 * than the max stuffed file size. In order to prevent this from
1234 * occurring, such files are unstuffed, but in other cases we can
1235 * just update the inode size directly.
1237 * Returns: 0 on success, or -ve on error
1240 static int do_grow(struct inode *inode, u64 size)
1242 struct gfs2_inode *ip = GFS2_I(inode);
1243 struct gfs2_sbd *sdp = GFS2_SB(inode);
1244 struct gfs2_alloc_parms ap = { .target = 1, };
1245 struct buffer_head *dibh;
1249 if (gfs2_is_stuffed(ip) &&
1250 (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
1251 error = gfs2_quota_lock_check(ip, &ap);
1255 error = gfs2_inplace_reserve(ip, &ap);
1257 goto do_grow_qunlock;
1261 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
1262 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
1265 goto do_grow_release;
1268 error = gfs2_unstuff_dinode(ip, NULL);
1273 error = gfs2_meta_inode_buffer(ip, &dibh);
1277 i_size_write(inode, size);
1278 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1279 gfs2_trans_add_meta(ip->i_gl, dibh);
1280 gfs2_dinode_out(ip, dibh->b_data);
1284 gfs2_trans_end(sdp);
1287 gfs2_inplace_release(ip);
1289 gfs2_quota_unlock(ip);
1295 * gfs2_setattr_size - make a file a given size
1297 * @newsize: the size to make the file
1299 * The file size can grow, shrink, or stay the same size. This
1300 * is called holding i_mutex and an exclusive glock on the inode
1306 int gfs2_setattr_size(struct inode *inode, u64 newsize)
1308 struct gfs2_inode *ip = GFS2_I(inode);
1312 BUG_ON(!S_ISREG(inode->i_mode));
1314 ret = inode_newsize_ok(inode, newsize);
1318 inode_dio_wait(inode);
1320 ret = gfs2_rsqa_alloc(ip);
1324 oldsize = inode->i_size;
1325 if (newsize >= oldsize) {
1326 ret = do_grow(inode, newsize);
1330 ret = do_shrink(inode, oldsize, newsize);
1332 gfs2_rsqa_delete(ip, NULL);
1336 int gfs2_truncatei_resume(struct gfs2_inode *ip)
1339 error = trunc_dealloc(ip, i_size_read(&ip->i_inode));
1341 error = trunc_end(ip);
1345 int gfs2_file_dealloc(struct gfs2_inode *ip)
1347 return trunc_dealloc(ip, 0);
1351 * gfs2_free_journal_extents - Free cached journal bmap info
1356 void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
1358 struct gfs2_journal_extent *jext;
1360 while(!list_empty(&jd->extent_list)) {
1361 jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
1362 list_del(&jext->list);
1368 * gfs2_add_jextent - Add or merge a new extent to extent cache
1369 * @jd: The journal descriptor
1370 * @lblock: The logical block at start of new extent
1371 * @dblock: The physical block at start of new extent
1372 * @blocks: Size of extent in fs blocks
1374 * Returns: 0 on success or -ENOMEM
1377 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
1379 struct gfs2_journal_extent *jext;
1381 if (!list_empty(&jd->extent_list)) {
1382 jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
1383 if ((jext->dblock + jext->blocks) == dblock) {
1384 jext->blocks += blocks;
1389 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
1392 jext->dblock = dblock;
1393 jext->lblock = lblock;
1394 jext->blocks = blocks;
1395 list_add_tail(&jext->list, &jd->extent_list);
1401 * gfs2_map_journal_extents - Cache journal bmap info
1402 * @sdp: The super block
1403 * @jd: The journal to map
1405 * Create a reusable "extent" mapping from all logical
1406 * blocks to all physical blocks for the given journal. This will save
1407 * us time when writing journal blocks. Most journals will have only one
1408 * extent that maps all their logical blocks. That's because gfs2.mkfs
1409 * arranges the journal blocks sequentially to maximize performance.
1410 * So the extent would map the first block for the entire file length.
1411 * However, gfs2_jadd can happen while file activity is happening, so
1412 * those journals may not be sequential. Less likely is the case where
1413 * the users created their own journals by mounting the metafs and
1414 * laying it out. But it's still possible. These journals might have
1417 * Returns: 0 on success, or error on failure
1420 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
1424 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1425 struct buffer_head bh;
1426 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
1430 lblock_stop = i_size_read(jd->jd_inode) >> shift;
1431 size = (lblock_stop - lblock) << shift;
1433 WARN_ON(!list_empty(&jd->extent_list));
1439 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
1440 if (rc || !buffer_mapped(&bh))
1442 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
1446 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1449 fs_info(sdp, "journal %d mapped with %u extents\n", jd->jd_jid,
1454 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
1456 (unsigned long long)(i_size_read(jd->jd_inode) - size),
1458 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
1459 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
1460 bh.b_state, (unsigned long long)bh.b_size);
1461 gfs2_free_journal_extents(jd);
1466 * gfs2_write_alloc_required - figure out if a write will require an allocation
1467 * @ip: the file being written to
1468 * @offset: the offset to write to
1469 * @len: the number of bytes being written
1471 * Returns: 1 if an alloc is required, 0 otherwise
1474 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
1477 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1478 struct buffer_head bh;
1480 u64 lblock, lblock_stop, size;
1486 if (gfs2_is_stuffed(ip)) {
1488 sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
1493 shift = sdp->sd_sb.sb_bsize_shift;
1494 BUG_ON(gfs2_is_dir(ip));
1495 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
1496 lblock = offset >> shift;
1497 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
1498 if (lblock_stop > end_of_file)
1501 size = (lblock_stop - lblock) << shift;
1505 gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
1506 if (!buffer_mapped(&bh))
1509 lblock += (bh.b_size >> ip->i_inode.i_blkbits);