1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/blkdev.h>
11 #include <linux/gfs2_ondisk.h>
12 #include <linux/crc32.h>
13 #include <linux/iomap.h>
14 #include <linux/ktime.h>
30 #include "trace_gfs2.h"
32 /* This doesn't need to be that large as max 64 bit pointers in a 4k
33 * block is 512, so __u16 is fine for that. It saves stack space to
37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
38 __u16 mp_list[GFS2_MAX_META_HEIGHT];
39 int mp_fheight; /* find_metapath height */
40 int mp_aheight; /* actual height (lookup height) */
43 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
46 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
48 * @dibh: the dinode buffer
49 * @block: the block number that was allocated
50 * @page: The (optional) page. This is looked up if @page is NULL
55 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
56 u64 block, struct page *page)
58 struct inode *inode = &ip->i_inode;
59 struct buffer_head *bh;
62 if (!page || page->index) {
63 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
69 if (!PageUptodate(page)) {
70 void *kaddr = kmap(page);
71 u64 dsize = i_size_read(inode);
73 if (dsize > gfs2_max_stuffed_size(ip))
74 dsize = gfs2_max_stuffed_size(ip);
76 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
77 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
80 SetPageUptodate(page);
83 if (!page_has_buffers(page))
84 create_empty_buffers(page, BIT(inode->i_blkbits),
87 bh = page_buffers(page);
89 if (!buffer_mapped(bh))
90 map_bh(bh, inode->i_sb, block);
92 set_buffer_uptodate(bh);
93 if (gfs2_is_jdata(ip))
94 gfs2_trans_add_data(ip->i_gl, bh);
96 mark_buffer_dirty(bh);
97 gfs2_ordered_add_inode(ip);
109 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
110 * @ip: The GFS2 inode to unstuff
111 * @page: The (optional) page. This is looked up if the @page is NULL
113 * This routine unstuffs a dinode and returns it to a "normal" state such
114 * that the height can be grown in the traditional way.
119 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
121 struct buffer_head *bh, *dibh;
122 struct gfs2_dinode *di;
124 int isdir = gfs2_is_dir(ip);
127 down_write(&ip->i_rw_mutex);
129 error = gfs2_meta_inode_buffer(ip, &dibh);
133 if (i_size_read(&ip->i_inode)) {
134 /* Get a free block, fill it with the stuffed data,
135 and write it out to disk */
138 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
142 gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
143 error = gfs2_dir_get_new_buffer(ip, block, &bh);
146 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
147 dibh, sizeof(struct gfs2_dinode));
150 error = gfs2_unstuffer_page(ip, dibh, block, page);
156 /* Set up the pointer to the new block */
158 gfs2_trans_add_meta(ip->i_gl, dibh);
159 di = (struct gfs2_dinode *)dibh->b_data;
160 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
162 if (i_size_read(&ip->i_inode)) {
163 *(__be64 *)(di + 1) = cpu_to_be64(block);
164 gfs2_add_inode_blocks(&ip->i_inode, 1);
165 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
169 di->di_height = cpu_to_be16(1);
174 up_write(&ip->i_rw_mutex);
180 * find_metapath - Find path through the metadata tree
181 * @sdp: The superblock
182 * @block: The disk block to look up
183 * @mp: The metapath to return the result in
184 * @height: The pre-calculated height of the metadata tree
186 * This routine returns a struct metapath structure that defines a path
187 * through the metadata of inode "ip" to get to block "block".
190 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a
191 * filesystem with a blocksize of 4096.
193 * find_metapath() would return a struct metapath structure set to:
194 * mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165.
196 * That means that in order to get to the block containing the byte at
197 * offset 101342453, we would load the indirect block pointed to by pointer
198 * 0 in the dinode. We would then load the indirect block pointed to by
199 * pointer 48 in that indirect block. We would then load the data block
200 * pointed to by pointer 165 in that indirect block.
202 * ----------------------------------------
207 * ----------------------------------------
211 * ----------------------------------------
215 * |0 5 6 7 8 9 0 1 2|
216 * ----------------------------------------
220 * ----------------------------------------
225 * ----------------------------------------
229 * ----------------------------------------
230 * | Data block containing offset |
234 * ----------------------------------------
238 static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
239 struct metapath *mp, unsigned int height)
243 mp->mp_fheight = height;
244 for (i = height; i--;)
245 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
248 static inline unsigned int metapath_branch_start(const struct metapath *mp)
250 if (mp->mp_list[0] == 0)
256 * metaptr1 - Return the first possible metadata pointer in a metapath buffer
257 * @height: The metadata height (0 = dinode)
260 static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
262 struct buffer_head *bh = mp->mp_bh[height];
264 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
265 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
269 * metapointer - Return pointer to start of metadata in a buffer
270 * @height: The metadata height (0 = dinode)
273 * Return a pointer to the block number of the next height of the metadata
274 * tree given a buffer containing the pointer to the current height of the
278 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
280 __be64 *p = metaptr1(height, mp);
281 return p + mp->mp_list[height];
284 static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
286 const struct buffer_head *bh = mp->mp_bh[height];
287 return (const __be64 *)(bh->b_data + bh->b_size);
290 static void clone_metapath(struct metapath *clone, struct metapath *mp)
295 for (hgt = 0; hgt < mp->mp_aheight; hgt++)
296 get_bh(clone->mp_bh[hgt]);
299 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
303 for (t = start; t < end; t++) {
304 struct buffer_head *rabh;
309 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
310 if (trylock_buffer(rabh)) {
311 if (!buffer_uptodate(rabh)) {
312 rabh->b_end_io = end_buffer_read_sync;
313 submit_bh(REQ_OP_READ,
314 REQ_RAHEAD | REQ_META | REQ_PRIO,
324 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
325 unsigned int x, unsigned int h)
328 __be64 *ptr = metapointer(x, mp);
329 u64 dblock = be64_to_cpu(*ptr);
334 ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]);
338 mp->mp_aheight = x + 1;
343 * lookup_metapath - Walk the metadata tree to a specific point
347 * Assumes that the inode's buffer has already been looked up and
348 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
349 * by find_metapath().
351 * If this function encounters part of the tree which has not been
352 * allocated, it returns the current height of the tree at the point
353 * at which it found the unallocated block. Blocks which are found are
354 * added to the mp->mp_bh[] list.
359 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
361 return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
365 * fillup_metapath - fill up buffers for the metadata path to a specific height
368 * @h: The height to which it should be mapped
370 * Similar to lookup_metapath, but does lookups for a range of heights
372 * Returns: error or the number of buffers filled
375 static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
381 /* find the first buffer we need to look up. */
382 for (x = h - 1; x > 0; x--) {
387 ret = __fillup_metapath(ip, mp, x, h);
390 return mp->mp_aheight - x - 1;
393 static void release_metapath(struct metapath *mp)
397 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
398 if (mp->mp_bh[i] == NULL)
400 brelse(mp->mp_bh[i]);
406 * gfs2_extent_length - Returns length of an extent of blocks
407 * @bh: The metadata block
408 * @ptr: Current position in @bh
409 * @limit: Max extent length to return
410 * @eob: Set to 1 if we hit "end of block"
412 * Returns: The length of the extent (minimum of one block)
415 static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
417 const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
418 const __be64 *first = ptr;
419 u64 d = be64_to_cpu(*ptr);
427 } while(be64_to_cpu(*ptr) == d);
433 typedef const __be64 *(*gfs2_metadata_walker)(
435 const __be64 *start, const __be64 *end,
436 u64 factor, void *data);
438 #define WALK_STOP ((__be64 *)0)
439 #define WALK_NEXT ((__be64 *)1)
441 static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
442 u64 len, struct metapath *mp, gfs2_metadata_walker walker,
445 struct metapath clone;
446 struct gfs2_inode *ip = GFS2_I(inode);
447 struct gfs2_sbd *sdp = GFS2_SB(inode);
448 const __be64 *start, *end, *ptr;
453 for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--)
454 factor *= sdp->sd_inptrs;
459 /* Walk indirect block. */
460 start = metapointer(hgt, mp);
461 end = metaend(hgt, mp);
463 step = (end - start) * factor;
465 end = start + DIV_ROUND_UP_ULL(len, factor);
467 ptr = walker(mp, start, end, factor, data);
468 if (ptr == WALK_STOP)
473 if (ptr != WALK_NEXT) {
475 mp->mp_list[hgt] += ptr - start;
476 goto fill_up_metapath;
480 /* Decrease height of metapath. */
482 clone_metapath(&clone, mp);
485 brelse(mp->mp_bh[hgt]);
486 mp->mp_bh[hgt] = NULL;
490 factor *= sdp->sd_inptrs;
492 /* Advance in metadata tree. */
493 (mp->mp_list[hgt])++;
494 start = metapointer(hgt, mp);
495 end = metaend(hgt, mp);
497 mp->mp_list[hgt] = 0;
504 /* Increase height of metapath. */
506 clone_metapath(&clone, mp);
509 ret = fillup_metapath(ip, mp, ip->i_height - 1);
514 do_div(factor, sdp->sd_inptrs);
515 mp->mp_aheight = hgt + 1;
518 release_metapath(mp);
522 struct gfs2_hole_walker_args {
526 static const __be64 *gfs2_hole_walker(struct metapath *mp,
527 const __be64 *start, const __be64 *end,
528 u64 factor, void *data)
530 struct gfs2_hole_walker_args *args = data;
533 for (ptr = start; ptr < end; ptr++) {
535 args->blocks += (ptr - start) * factor;
536 if (mp->mp_aheight == mp->mp_fheight)
538 return ptr; /* increase height */
541 args->blocks += (end - start) * factor;
546 * gfs2_hole_size - figure out the size of a hole
548 * @lblock: The logical starting block number
549 * @len: How far to look (in blocks)
550 * @mp: The metapath at lblock
551 * @iomap: The iomap to store the hole size in
553 * This function modifies @mp.
555 * Returns: errno on error
557 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
558 struct metapath *mp, struct iomap *iomap)
560 struct gfs2_hole_walker_args args = { };
563 ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args);
565 iomap->length = args.blocks << inode->i_blkbits;
569 static inline __be64 *gfs2_indirect_init(struct metapath *mp,
570 struct gfs2_glock *gl, unsigned int i,
571 unsigned offset, u64 bn)
573 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
574 ((i > 1) ? sizeof(struct gfs2_meta_header) :
575 sizeof(struct gfs2_dinode)));
577 BUG_ON(mp->mp_bh[i] != NULL);
578 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
579 gfs2_trans_add_meta(gl, mp->mp_bh[i]);
580 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
581 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
583 *ptr = cpu_to_be64(bn);
589 ALLOC_GROW_DEPTH = 1,
590 ALLOC_GROW_HEIGHT = 2,
591 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */
595 * gfs2_iomap_alloc - Build a metadata tree of the requested height
596 * @inode: The GFS2 inode
597 * @iomap: The iomap structure
598 * @flags: iomap flags
599 * @mp: The metapath, with proper height information calculated
601 * In this routine we may have to alloc:
602 * i) Indirect blocks to grow the metadata tree height
603 * ii) Indirect blocks to fill in lower part of the metadata tree
606 * This function is called after gfs2_iomap_get, which works out the
607 * total number of blocks which we need via gfs2_alloc_size.
609 * We then do the actual allocation asking for an extent at a time (if
610 * enough contiguous free blocks are available, there will only be one
611 * allocation request per call) and uses the state machine to initialise
612 * the blocks in order.
614 * Right now, this function will allocate at most one indirect block
615 * worth of data -- with a default block size of 4K, that's slightly
616 * less than 2M. If this limitation is ever removed to allow huge
617 * allocations, we would probably still want to limit the iomap size we
618 * return to avoid stalling other tasks during huge writes; the next
619 * iomap iteration would then find the blocks already allocated.
621 * Returns: errno on error
624 static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
625 unsigned flags, struct metapath *mp)
627 struct gfs2_inode *ip = GFS2_I(inode);
628 struct gfs2_sbd *sdp = GFS2_SB(inode);
629 struct buffer_head *dibh = mp->mp_bh[0];
631 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
632 size_t dblks = iomap->length >> inode->i_blkbits;
633 const unsigned end_of_metadata = mp->mp_fheight - 1;
635 enum alloc_state state;
639 BUG_ON(mp->mp_aheight < 1);
640 BUG_ON(dibh == NULL);
643 gfs2_trans_add_meta(ip->i_gl, dibh);
645 down_write(&ip->i_rw_mutex);
647 if (mp->mp_fheight == mp->mp_aheight) {
648 /* Bottom indirect block exists */
651 /* Need to allocate indirect blocks */
652 if (mp->mp_fheight == ip->i_height) {
653 /* Writing into existing tree, extend tree down */
654 iblks = mp->mp_fheight - mp->mp_aheight;
655 state = ALLOC_GROW_DEPTH;
657 /* Building up tree height */
658 state = ALLOC_GROW_HEIGHT;
659 iblks = mp->mp_fheight - ip->i_height;
660 branch_start = metapath_branch_start(mp);
661 iblks += (mp->mp_fheight - branch_start);
665 /* start of the second part of the function (state machine) */
667 blks = dblks + iblks;
671 ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
675 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
676 gfs2_trans_remove_revoke(sdp, bn, n);
678 /* Growing height of tree */
679 case ALLOC_GROW_HEIGHT:
681 ptr = (__be64 *)(dibh->b_data +
682 sizeof(struct gfs2_dinode));
685 for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
687 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
688 if (i - 1 == mp->mp_fheight - ip->i_height) {
690 gfs2_buffer_copy_tail(mp->mp_bh[i],
691 sizeof(struct gfs2_meta_header),
692 dibh, sizeof(struct gfs2_dinode));
693 gfs2_buffer_clear_tail(dibh,
694 sizeof(struct gfs2_dinode) +
696 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
697 sizeof(struct gfs2_meta_header));
699 state = ALLOC_GROW_DEPTH;
700 for(i = branch_start; i < mp->mp_fheight; i++) {
701 if (mp->mp_bh[i] == NULL)
703 brelse(mp->mp_bh[i]);
710 /* fall through - To branching from existing tree */
711 case ALLOC_GROW_DEPTH:
712 if (i > 1 && i < mp->mp_fheight)
713 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
714 for (; i < mp->mp_fheight && n > 0; i++, n--)
715 gfs2_indirect_init(mp, ip->i_gl, i,
716 mp->mp_list[i-1], bn++);
717 if (i == mp->mp_fheight)
721 /* fall through - To tree complete, adding data blocks */
724 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
725 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
727 ptr = metapointer(end_of_metadata, mp);
728 iomap->addr = bn << inode->i_blkbits;
729 iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
731 *ptr++ = cpu_to_be64(bn++);
734 } while (iomap->addr == IOMAP_NULL_ADDR);
736 iomap->type = IOMAP_MAPPED;
737 iomap->length = (u64)dblks << inode->i_blkbits;
738 ip->i_height = mp->mp_fheight;
739 gfs2_add_inode_blocks(&ip->i_inode, alloced);
740 gfs2_dinode_out(ip, dibh->b_data);
742 up_write(&ip->i_rw_mutex);
746 #define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
749 * gfs2_alloc_size - Compute the maximum allocation size
752 * @size: Requested size in blocks
754 * Compute the maximum size of the next allocation at @mp.
756 * Returns: size in blocks
758 static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
760 struct gfs2_inode *ip = GFS2_I(inode);
761 struct gfs2_sbd *sdp = GFS2_SB(inode);
762 const __be64 *first, *ptr, *end;
765 * For writes to stuffed files, this function is called twice via
766 * gfs2_iomap_get, before and after unstuffing. The size we return the
767 * first time needs to be large enough to get the reservation and
768 * allocation sizes right. The size we return the second time must
769 * be exact or else gfs2_iomap_alloc won't do the right thing.
772 if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
773 unsigned int maxsize = mp->mp_fheight > 1 ?
774 sdp->sd_inptrs : sdp->sd_diptrs;
775 maxsize -= mp->mp_list[mp->mp_fheight - 1];
781 first = metapointer(ip->i_height - 1, mp);
782 end = metaend(ip->i_height - 1, mp);
783 if (end - first > size)
785 for (ptr = first; ptr < end; ptr++) {
793 * gfs2_iomap_get - Map blocks from an inode to disk blocks
795 * @pos: Starting position in bytes
796 * @length: Length to map, in bytes
797 * @flags: iomap flags
798 * @iomap: The iomap structure
803 static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
804 unsigned flags, struct iomap *iomap,
807 struct gfs2_inode *ip = GFS2_I(inode);
808 struct gfs2_sbd *sdp = GFS2_SB(inode);
809 loff_t size = i_size_read(inode);
812 sector_t lblock_stop;
816 struct buffer_head *dibh = NULL, *bh;
822 down_read(&ip->i_rw_mutex);
824 ret = gfs2_meta_inode_buffer(ip, &dibh);
829 if (gfs2_is_stuffed(ip)) {
830 if (flags & IOMAP_WRITE) {
831 loff_t max_size = gfs2_max_stuffed_size(ip);
833 if (pos + length > max_size)
835 iomap->length = max_size;
838 if (flags & IOMAP_REPORT) {
844 iomap->length = length;
848 iomap->length = size;
850 iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
851 sizeof(struct gfs2_dinode);
852 iomap->type = IOMAP_INLINE;
853 iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
858 lblock = pos >> inode->i_blkbits;
859 iomap->offset = lblock << inode->i_blkbits;
860 lblock_stop = (pos + length - 1) >> inode->i_blkbits;
861 len = lblock_stop - lblock + 1;
862 iomap->length = len << inode->i_blkbits;
864 height = ip->i_height;
865 while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
867 find_metapath(sdp, lblock, mp, height);
868 if (height > ip->i_height || gfs2_is_stuffed(ip))
871 ret = lookup_metapath(ip, mp);
875 if (mp->mp_aheight != ip->i_height)
878 ptr = metapointer(ip->i_height - 1, mp);
882 bh = mp->mp_bh[ip->i_height - 1];
883 len = gfs2_extent_length(bh, ptr, len, &eob);
885 iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
886 iomap->length = len << inode->i_blkbits;
887 iomap->type = IOMAP_MAPPED;
888 iomap->flags |= IOMAP_F_MERGED;
890 iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
893 iomap->bdev = inode->i_sb->s_bdev;
895 up_read(&ip->i_rw_mutex);
899 iomap->addr = IOMAP_NULL_ADDR;
900 iomap->type = IOMAP_HOLE;
901 if (flags & IOMAP_REPORT) {
904 else if (height == ip->i_height)
905 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
907 iomap->length = size - pos;
908 } else if (flags & IOMAP_WRITE) {
911 if (flags & IOMAP_DIRECT)
912 goto out; /* (see gfs2_file_direct_write) */
914 len = gfs2_alloc_size(inode, mp, len);
915 alloc_size = len << inode->i_blkbits;
916 if (alloc_size < iomap->length)
917 iomap->length = alloc_size;
919 if (pos < size && height == ip->i_height)
920 ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
926 * gfs2_lblk_to_dblk - convert logical block to disk block
927 * @inode: the inode of the file we're mapping
928 * @lblock: the block relative to the start of the file
929 * @dblock: the returned dblock, if no error
931 * This function maps a single block from a file logical block (relative to
932 * the start of the file) to a file system absolute block using iomap.
934 * Returns: the absolute file system block, or an error
936 int gfs2_lblk_to_dblk(struct inode *inode, u32 lblock, u64 *dblock)
938 struct iomap iomap = { };
939 struct metapath mp = { .mp_aheight = 1, };
940 loff_t pos = (loff_t)lblock << inode->i_blkbits;
943 ret = gfs2_iomap_get(inode, pos, i_blocksize(inode), 0, &iomap, &mp);
944 release_metapath(&mp);
946 *dblock = iomap.addr >> inode->i_blkbits;
951 static int gfs2_write_lock(struct inode *inode)
953 struct gfs2_inode *ip = GFS2_I(inode);
954 struct gfs2_sbd *sdp = GFS2_SB(inode);
957 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
958 error = gfs2_glock_nq(&ip->i_gh);
961 if (&ip->i_inode == sdp->sd_rindex) {
962 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
964 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
965 GL_NOCACHE, &m_ip->i_gh);
972 gfs2_glock_dq(&ip->i_gh);
974 gfs2_holder_uninit(&ip->i_gh);
978 static void gfs2_write_unlock(struct inode *inode)
980 struct gfs2_inode *ip = GFS2_I(inode);
981 struct gfs2_sbd *sdp = GFS2_SB(inode);
983 if (&ip->i_inode == sdp->sd_rindex) {
984 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
986 gfs2_glock_dq_uninit(&m_ip->i_gh);
988 gfs2_glock_dq_uninit(&ip->i_gh);
991 static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
992 unsigned len, struct iomap *iomap)
994 struct gfs2_sbd *sdp = GFS2_SB(inode);
996 return gfs2_trans_begin(sdp, RES_DINODE + (len >> inode->i_blkbits), 0);
999 static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
1000 unsigned copied, struct page *page,
1001 struct iomap *iomap)
1003 struct gfs2_inode *ip = GFS2_I(inode);
1004 struct gfs2_sbd *sdp = GFS2_SB(inode);
1006 if (page && !gfs2_is_stuffed(ip))
1007 gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
1008 gfs2_trans_end(sdp);
1011 static const struct iomap_page_ops gfs2_iomap_page_ops = {
1012 .page_prepare = gfs2_iomap_page_prepare,
1013 .page_done = gfs2_iomap_page_done,
1016 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1017 loff_t length, unsigned flags,
1018 struct iomap *iomap,
1019 struct metapath *mp)
1021 struct gfs2_inode *ip = GFS2_I(inode);
1022 struct gfs2_sbd *sdp = GFS2_SB(inode);
1023 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
1024 bool unstuff, alloc_required;
1027 ret = gfs2_write_lock(inode);
1031 unstuff = gfs2_is_stuffed(ip) &&
1032 pos + length > gfs2_max_stuffed_size(ip);
1034 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, mp);
1038 alloc_required = unstuff || iomap->type == IOMAP_HOLE;
1040 if (alloc_required || gfs2_is_jdata(ip))
1041 gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
1044 if (alloc_required) {
1045 struct gfs2_alloc_parms ap = {
1046 .target = data_blocks + ind_blocks
1049 ret = gfs2_quota_lock_check(ip, &ap);
1053 ret = gfs2_inplace_reserve(ip, &ap);
1058 rblocks = RES_DINODE + ind_blocks;
1059 if (gfs2_is_jdata(ip))
1060 rblocks += data_blocks;
1061 if (ind_blocks || data_blocks)
1062 rblocks += RES_STATFS + RES_QUOTA;
1063 if (inode == sdp->sd_rindex)
1064 rblocks += 2 * RES_STATFS;
1066 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1068 if (unstuff || iomap->type == IOMAP_HOLE) {
1069 struct gfs2_trans *tr;
1071 ret = gfs2_trans_begin(sdp, rblocks,
1072 iomap->length >> inode->i_blkbits);
1074 goto out_trans_fail;
1077 ret = gfs2_unstuff_dinode(ip, NULL);
1080 release_metapath(mp);
1081 ret = gfs2_iomap_get(inode, iomap->offset,
1082 iomap->length, flags, iomap, mp);
1087 if (iomap->type == IOMAP_HOLE) {
1088 ret = gfs2_iomap_alloc(inode, iomap, flags, mp);
1090 gfs2_trans_end(sdp);
1091 gfs2_inplace_release(ip);
1092 punch_hole(ip, iomap->offset, iomap->length);
1097 tr = current->journal_info;
1098 if (tr->tr_num_buf_new)
1099 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1101 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[0]);
1103 gfs2_trans_end(sdp);
1106 if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
1107 iomap->page_ops = &gfs2_iomap_page_ops;
1111 gfs2_trans_end(sdp);
1114 gfs2_inplace_release(ip);
1117 gfs2_quota_unlock(ip);
1119 gfs2_write_unlock(inode);
1123 static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1124 unsigned flags, struct iomap *iomap)
1126 struct gfs2_inode *ip = GFS2_I(inode);
1127 struct metapath mp = { .mp_aheight = 1, };
1130 iomap->flags |= IOMAP_F_BUFFER_HEAD;
1132 trace_gfs2_iomap_start(ip, pos, length, flags);
1133 if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) {
1134 ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1136 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1139 * Silently fall back to buffered I/O for stuffed files or if
1140 * we've hot a hole (see gfs2_file_direct_write).
1142 if ((flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT) &&
1143 iomap->type != IOMAP_MAPPED)
1146 release_metapath(&mp);
1147 trace_gfs2_iomap_end(ip, iomap, ret);
1151 static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1152 ssize_t written, unsigned flags, struct iomap *iomap)
1154 struct gfs2_inode *ip = GFS2_I(inode);
1155 struct gfs2_sbd *sdp = GFS2_SB(inode);
1157 if ((flags & (IOMAP_WRITE | IOMAP_DIRECT)) != IOMAP_WRITE)
1160 if (!gfs2_is_stuffed(ip))
1161 gfs2_ordered_add_inode(ip);
1163 if (inode == sdp->sd_rindex)
1164 adjust_fs_space(inode);
1166 gfs2_inplace_release(ip);
1168 if (length != written && (iomap->flags & IOMAP_F_NEW)) {
1169 /* Deallocate blocks that were just allocated. */
1170 loff_t blockmask = i_blocksize(inode) - 1;
1171 loff_t end = (pos + length) & ~blockmask;
1173 pos = (pos + written + blockmask) & ~blockmask;
1175 truncate_pagecache_range(inode, pos, end - 1);
1176 punch_hole(ip, pos, end - pos);
1180 if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1181 gfs2_quota_unlock(ip);
1182 gfs2_write_unlock(inode);
1188 const struct iomap_ops gfs2_iomap_ops = {
1189 .iomap_begin = gfs2_iomap_begin,
1190 .iomap_end = gfs2_iomap_end,
1194 * gfs2_block_map - Map one or more blocks of an inode to a disk block
1196 * @lblock: The logical block number
1197 * @bh_map: The bh to be mapped
1198 * @create: True if its ok to alloc blocks to satify the request
1200 * The size of the requested mapping is defined in bh_map->b_size.
1202 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
1203 * when @lblock is not mapped. Sets buffer_mapped(bh_map) and
1204 * bh_map->b_size to indicate the size of the mapping when @lblock and
1205 * successive blocks are mapped, up to the requested size.
1207 * Sets buffer_boundary() if a read of metadata will be required
1208 * before the next block can be mapped. Sets buffer_new() if new
1209 * blocks were allocated.
1214 int gfs2_block_map(struct inode *inode, sector_t lblock,
1215 struct buffer_head *bh_map, int create)
1217 struct gfs2_inode *ip = GFS2_I(inode);
1218 loff_t pos = (loff_t)lblock << inode->i_blkbits;
1219 loff_t length = bh_map->b_size;
1220 struct metapath mp = { .mp_aheight = 1, };
1221 struct iomap iomap = { };
1224 clear_buffer_mapped(bh_map);
1225 clear_buffer_new(bh_map);
1226 clear_buffer_boundary(bh_map);
1227 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
1230 ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, &iomap, &mp);
1231 if (!ret && iomap.type == IOMAP_HOLE)
1232 ret = gfs2_iomap_alloc(inode, &iomap, IOMAP_WRITE, &mp);
1233 release_metapath(&mp);
1235 ret = gfs2_iomap_get(inode, pos, length, 0, &iomap, &mp);
1236 release_metapath(&mp);
1241 if (iomap.length > bh_map->b_size) {
1242 iomap.length = bh_map->b_size;
1243 iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
1245 if (iomap.addr != IOMAP_NULL_ADDR)
1246 map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
1247 bh_map->b_size = iomap.length;
1248 if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
1249 set_buffer_boundary(bh_map);
1250 if (iomap.flags & IOMAP_F_NEW)
1251 set_buffer_new(bh_map);
1254 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
1259 * Deprecated: do not use in new code
1261 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
1263 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
1271 bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
1272 ret = gfs2_block_map(inode, lblock, &bh, create);
1273 *extlen = bh.b_size >> inode->i_blkbits;
1274 *dblock = bh.b_blocknr;
1275 if (buffer_new(&bh))
1283 * gfs2_block_zero_range - Deal with zeroing out data
1285 * This is partly borrowed from ext3.
1287 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
1288 unsigned int length)
1290 struct address_space *mapping = inode->i_mapping;
1291 struct gfs2_inode *ip = GFS2_I(inode);
1292 unsigned long index = from >> PAGE_SHIFT;
1293 unsigned offset = from & (PAGE_SIZE-1);
1294 unsigned blocksize, iblock, pos;
1295 struct buffer_head *bh;
1299 page = find_or_create_page(mapping, index, GFP_NOFS);
1303 blocksize = inode->i_sb->s_blocksize;
1304 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
1306 if (!page_has_buffers(page))
1307 create_empty_buffers(page, blocksize, 0);
1309 /* Find the buffer that contains "offset" */
1310 bh = page_buffers(page);
1312 while (offset >= pos) {
1313 bh = bh->b_this_page;
1320 if (!buffer_mapped(bh)) {
1321 gfs2_block_map(inode, iblock, bh, 0);
1322 /* unmapped? It's a hole - nothing to do */
1323 if (!buffer_mapped(bh))
1327 /* Ok, it's mapped. Make sure it's up-to-date */
1328 if (PageUptodate(page))
1329 set_buffer_uptodate(bh);
1331 if (!buffer_uptodate(bh)) {
1333 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1335 /* Uhhuh. Read error. Complain and punt. */
1336 if (!buffer_uptodate(bh))
1341 if (gfs2_is_jdata(ip))
1342 gfs2_trans_add_data(ip->i_gl, bh);
1344 gfs2_ordered_add_inode(ip);
1346 zero_user(page, offset, length);
1347 mark_buffer_dirty(bh);
1354 #define GFS2_JTRUNC_REVOKES 8192
1357 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1358 * @inode: The inode being truncated
1359 * @oldsize: The original (larger) size
1360 * @newsize: The new smaller size
1362 * With jdata files, we have to journal a revoke for each block which is
1363 * truncated. As a result, we need to split this into separate transactions
1364 * if the number of pages being truncated gets too large.
1367 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1369 struct gfs2_sbd *sdp = GFS2_SB(inode);
1370 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1374 while (oldsize != newsize) {
1375 struct gfs2_trans *tr;
1378 chunk = oldsize - newsize;
1379 if (chunk > max_chunk)
1382 offs = oldsize & ~PAGE_MASK;
1383 if (offs && chunk > PAGE_SIZE)
1384 chunk = offs + ((chunk - offs) & PAGE_MASK);
1386 truncate_pagecache(inode, oldsize - chunk);
1389 tr = current->journal_info;
1390 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
1393 gfs2_trans_end(sdp);
1394 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1402 static int trunc_start(struct inode *inode, u64 newsize)
1404 struct gfs2_inode *ip = GFS2_I(inode);
1405 struct gfs2_sbd *sdp = GFS2_SB(inode);
1406 struct buffer_head *dibh = NULL;
1407 int journaled = gfs2_is_jdata(ip);
1408 u64 oldsize = inode->i_size;
1412 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1414 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1418 error = gfs2_meta_inode_buffer(ip, &dibh);
1422 gfs2_trans_add_meta(ip->i_gl, dibh);
1424 if (gfs2_is_stuffed(ip)) {
1425 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1427 unsigned int blocksize = i_blocksize(inode);
1428 unsigned int offs = newsize & (blocksize - 1);
1430 error = gfs2_block_zero_range(inode, newsize,
1435 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1438 i_size_write(inode, newsize);
1439 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1440 gfs2_dinode_out(ip, dibh->b_data);
1443 error = gfs2_journaled_truncate(inode, oldsize, newsize);
1445 truncate_pagecache(inode, newsize);
1449 if (current->journal_info)
1450 gfs2_trans_end(sdp);
1454 int gfs2_iomap_get_alloc(struct inode *inode, loff_t pos, loff_t length,
1455 struct iomap *iomap)
1457 struct metapath mp = { .mp_aheight = 1, };
1460 ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
1461 if (!ret && iomap->type == IOMAP_HOLE)
1462 ret = gfs2_iomap_alloc(inode, iomap, IOMAP_WRITE, &mp);
1463 release_metapath(&mp);
1468 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
1470 * @rg_gh: holder of resource group glock
1471 * @bh: buffer head to sweep
1472 * @start: starting point in bh
1473 * @end: end point in bh
1474 * @meta: true if bh points to metadata (rather than data)
1475 * @btotal: place to keep count of total blocks freed
1477 * We sweep a metadata buffer (provided by the metapath) for blocks we need to
1478 * free, and free them all. However, we do it one rgrp at a time. If this
1479 * block has references to multiple rgrps, we break it into individual
1480 * transactions. This allows other processes to use the rgrps while we're
1481 * focused on a single one, for better concurrency / performance.
1482 * At every transaction boundary, we rewrite the inode into the journal.
1483 * That way the bitmaps are kept consistent with the inode and we can recover
1484 * if we're interrupted by power-outages.
1486 * Returns: 0, or return code if an error occurred.
1487 * *btotal has the total number of blocks freed
1489 static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1490 struct buffer_head *bh, __be64 *start, __be64 *end,
1491 bool meta, u32 *btotal)
1493 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1494 struct gfs2_rgrpd *rgd;
1495 struct gfs2_trans *tr;
1497 int blks_outside_rgrp;
1498 u64 bn, bstart, isize_blks;
1499 s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
1501 bool buf_in_tr = false; /* buffer was added to transaction */
1505 if (gfs2_holder_initialized(rd_gh)) {
1506 rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1507 gfs2_assert_withdraw(sdp,
1508 gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1510 blks_outside_rgrp = 0;
1514 for (p = start; p < end; p++) {
1517 bn = be64_to_cpu(*p);
1520 if (!rgrp_contains_block(rgd, bn)) {
1521 blks_outside_rgrp++;
1525 rgd = gfs2_blk2rgrpd(sdp, bn, true);
1526 if (unlikely(!rgd)) {
1530 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1535 /* Must be done with the rgrp glock held: */
1536 if (gfs2_rs_active(&ip->i_res) &&
1537 rgd == ip->i_res.rs_rbm.rgd)
1538 gfs2_rs_deltree(&ip->i_res);
1541 /* The size of our transactions will be unknown until we
1542 actually process all the metadata blocks that relate to
1543 the rgrp. So we estimate. We know it can't be more than
1544 the dinode's i_blocks and we don't want to exceed the
1545 journal flush threshold, sd_log_thresh2. */
1546 if (current->journal_info == NULL) {
1547 unsigned int jblocks_rqsted, revokes;
1549 jblocks_rqsted = rgd->rd_length + RES_DINODE +
1551 isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1552 if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1554 atomic_read(&sdp->sd_log_thresh2);
1556 jblocks_rqsted += isize_blks;
1557 revokes = jblocks_rqsted;
1559 revokes += end - start;
1560 else if (ip->i_depth)
1561 revokes += sdp->sd_inptrs;
1562 ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1565 down_write(&ip->i_rw_mutex);
1567 /* check if we will exceed the transaction blocks requested */
1568 tr = current->journal_info;
1569 if (tr->tr_num_buf_new + RES_STATFS +
1570 RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1571 /* We set blks_outside_rgrp to ensure the loop will
1572 be repeated for the same rgrp, but with a new
1574 blks_outside_rgrp++;
1575 /* This next part is tricky. If the buffer was added
1576 to the transaction, we've already set some block
1577 pointers to 0, so we better follow through and free
1578 them, or we will introduce corruption (so break).
1579 This may be impossible, or at least rare, but I
1580 decided to cover the case regardless.
1582 If the buffer was not added to the transaction
1583 (this call), doing so would exceed our transaction
1584 size, so we need to end the transaction and start a
1585 new one (so goto). */
1592 gfs2_trans_add_meta(ip->i_gl, bh);
1595 if (bstart + blen == bn) {
1600 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1602 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1608 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1610 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1613 if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1614 outside the rgrp we just processed,
1615 do it all over again. */
1616 if (current->journal_info) {
1617 struct buffer_head *dibh;
1619 ret = gfs2_meta_inode_buffer(ip, &dibh);
1623 /* Every transaction boundary, we rewrite the dinode
1624 to keep its di_blocks current in case of failure. */
1625 ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1626 current_time(&ip->i_inode);
1627 gfs2_trans_add_meta(ip->i_gl, dibh);
1628 gfs2_dinode_out(ip, dibh->b_data);
1630 up_write(&ip->i_rw_mutex);
1631 gfs2_trans_end(sdp);
1633 gfs2_glock_dq_uninit(rd_gh);
1641 static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1643 if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1649 * find_nonnull_ptr - find a non-null pointer given a metapath and height
1650 * @mp: starting metapath
1651 * @h: desired height to search
1653 * Assumes the metapath is valid (with buffers) out to height h.
1654 * Returns: true if a non-null pointer was found in the metapath buffer
1655 * false if all remaining pointers are NULL in the buffer
1657 static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1659 __u16 *end_list, unsigned int end_aligned)
1661 struct buffer_head *bh = mp->mp_bh[h];
1662 __be64 *first, *ptr, *end;
1664 first = metaptr1(h, mp);
1665 ptr = first + mp->mp_list[h];
1666 end = (__be64 *)(bh->b_data + bh->b_size);
1667 if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1668 bool keep_end = h < end_aligned;
1669 end = first + end_list[h] + keep_end;
1673 if (*ptr) { /* if we have a non-null pointer */
1674 mp->mp_list[h] = ptr - first;
1676 if (h < GFS2_MAX_META_HEIGHT)
1685 enum dealloc_states {
1686 DEALLOC_MP_FULL = 0, /* Strip a metapath with all buffers read in */
1687 DEALLOC_MP_LOWER = 1, /* lower the metapath strip height */
1688 DEALLOC_FILL_MP = 2, /* Fill in the metapath to the given height. */
1689 DEALLOC_DONE = 3, /* process complete */
1693 metapointer_range(struct metapath *mp, int height,
1694 __u16 *start_list, unsigned int start_aligned,
1695 __u16 *end_list, unsigned int end_aligned,
1696 __be64 **start, __be64 **end)
1698 struct buffer_head *bh = mp->mp_bh[height];
1701 first = metaptr1(height, mp);
1703 if (mp_eq_to_hgt(mp, start_list, height)) {
1704 bool keep_start = height < start_aligned;
1705 *start = first + start_list[height] + keep_start;
1707 *end = (__be64 *)(bh->b_data + bh->b_size);
1708 if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1709 bool keep_end = height < end_aligned;
1710 *end = first + end_list[height] + keep_end;
1714 static inline bool walk_done(struct gfs2_sbd *sdp,
1715 struct metapath *mp, int height,
1716 __u16 *end_list, unsigned int end_aligned)
1721 bool keep_end = height < end_aligned;
1722 if (!mp_eq_to_hgt(mp, end_list, height))
1724 end = end_list[height] + keep_end;
1726 end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1727 return mp->mp_list[height] >= end;
1731 * punch_hole - deallocate blocks in a file
1732 * @ip: inode to truncate
1733 * @offset: the start of the hole
1734 * @length: the size of the hole (or 0 for truncate)
1736 * Punch a hole into a file or truncate a file at a given position. This
1737 * function operates in whole blocks (@offset and @length are rounded
1738 * accordingly); partially filled blocks must be cleared otherwise.
1740 * This function works from the bottom up, and from the right to the left. In
1741 * other words, it strips off the highest layer (data) before stripping any of
1742 * the metadata. Doing it this way is best in case the operation is interrupted
1743 * by power failure, etc. The dinode is rewritten in every transaction to
1744 * guarantee integrity.
1746 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1748 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1749 u64 maxsize = sdp->sd_heightsize[ip->i_height];
1750 struct metapath mp = {};
1751 struct buffer_head *dibh, *bh;
1752 struct gfs2_holder rd_gh;
1753 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1754 u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
1755 __u16 start_list[GFS2_MAX_META_HEIGHT];
1756 __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1757 unsigned int start_aligned, uninitialized_var(end_aligned);
1758 unsigned int strip_h = ip->i_height - 1;
1761 int mp_h; /* metapath buffers are read in to this height */
1763 __be64 *start, *end;
1765 if (offset >= maxsize) {
1767 * The starting point lies beyond the allocated meta-data;
1768 * there are no blocks do deallocate.
1774 * The start position of the hole is defined by lblock, start_list, and
1775 * start_aligned. The end position of the hole is defined by lend,
1776 * end_list, and end_aligned.
1778 * start_aligned and end_aligned define down to which height the start
1779 * and end positions are aligned to the metadata tree (i.e., the
1780 * position is a multiple of the metadata granularity at the height
1781 * above). This determines at which heights additional meta pointers
1782 * needs to be preserved for the remaining data.
1786 u64 end_offset = offset + length;
1790 * Clip the end at the maximum file size for the given height:
1791 * that's how far the metadata goes; files bigger than that
1792 * will have additional layers of indirection.
1794 if (end_offset > maxsize)
1795 end_offset = maxsize;
1796 lend = end_offset >> bsize_shift;
1801 find_metapath(sdp, lend, &mp, ip->i_height);
1802 end_list = __end_list;
1803 memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1805 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1812 find_metapath(sdp, lblock, &mp, ip->i_height);
1813 memcpy(start_list, mp.mp_list, sizeof(start_list));
1815 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1816 if (start_list[mp_h])
1819 start_aligned = mp_h;
1821 ret = gfs2_meta_inode_buffer(ip, &dibh);
1826 ret = lookup_metapath(ip, &mp);
1830 /* issue read-ahead on metadata */
1831 for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1832 metapointer_range(&mp, mp_h, start_list, start_aligned,
1833 end_list, end_aligned, &start, &end);
1834 gfs2_metapath_ra(ip->i_gl, start, end);
1837 if (mp.mp_aheight == ip->i_height)
1838 state = DEALLOC_MP_FULL; /* We have a complete metapath */
1840 state = DEALLOC_FILL_MP; /* deal with partial metapath */
1842 ret = gfs2_rindex_update(sdp);
1846 ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1849 gfs2_holder_mark_uninitialized(&rd_gh);
1853 while (state != DEALLOC_DONE) {
1855 /* Truncate a full metapath at the given strip height.
1856 * Note that strip_h == mp_h in order to be in this state. */
1857 case DEALLOC_MP_FULL:
1858 bh = mp.mp_bh[mp_h];
1859 gfs2_assert_withdraw(sdp, bh);
1860 if (gfs2_assert_withdraw(sdp,
1861 prev_bnr != bh->b_blocknr)) {
1862 printk(KERN_EMERG "GFS2: fsid=%s:inode %llu, "
1863 "block:%llu, i_h:%u, s_h:%u, mp_h:%u\n",
1865 (unsigned long long)ip->i_no_addr,
1866 prev_bnr, ip->i_height, strip_h, mp_h);
1868 prev_bnr = bh->b_blocknr;
1870 if (gfs2_metatype_check(sdp, bh,
1871 (mp_h ? GFS2_METATYPE_IN :
1872 GFS2_METATYPE_DI))) {
1878 * Below, passing end_aligned as 0 gives us the
1879 * metapointer range excluding the end point: the end
1880 * point is the first metapath we must not deallocate!
1883 metapointer_range(&mp, mp_h, start_list, start_aligned,
1884 end_list, 0 /* end_aligned */,
1886 ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1888 mp_h != ip->i_height - 1,
1891 /* If we hit an error or just swept dinode buffer,
1894 state = DEALLOC_DONE;
1897 state = DEALLOC_MP_LOWER;
1900 /* lower the metapath strip height */
1901 case DEALLOC_MP_LOWER:
1902 /* We're done with the current buffer, so release it,
1903 unless it's the dinode buffer. Then back up to the
1904 previous pointer. */
1906 brelse(mp.mp_bh[mp_h]);
1907 mp.mp_bh[mp_h] = NULL;
1909 /* If we can't get any lower in height, we've stripped
1910 off all we can. Next step is to back up and start
1911 stripping the previous level of metadata. */
1914 memcpy(mp.mp_list, start_list, sizeof(start_list));
1916 state = DEALLOC_FILL_MP;
1919 mp.mp_list[mp_h] = 0;
1920 mp_h--; /* search one metadata height down */
1922 if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1924 /* Here we've found a part of the metapath that is not
1925 * allocated. We need to search at that height for the
1926 * next non-null pointer. */
1927 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1928 state = DEALLOC_FILL_MP;
1931 /* No more non-null pointers at this height. Back up
1932 to the previous height and try again. */
1933 break; /* loop around in the same state */
1935 /* Fill the metapath with buffers to the given height. */
1936 case DEALLOC_FILL_MP:
1937 /* Fill the buffers out to the current height. */
1938 ret = fillup_metapath(ip, &mp, mp_h);
1942 /* On the first pass, issue read-ahead on metadata. */
1943 if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1944 unsigned int height = mp.mp_aheight - 1;
1946 /* No read-ahead for data blocks. */
1947 if (mp.mp_aheight - 1 == strip_h)
1950 for (; height >= mp.mp_aheight - ret; height--) {
1951 metapointer_range(&mp, height,
1952 start_list, start_aligned,
1953 end_list, end_aligned,
1955 gfs2_metapath_ra(ip->i_gl, start, end);
1959 /* If buffers found for the entire strip height */
1960 if (mp.mp_aheight - 1 == strip_h) {
1961 state = DEALLOC_MP_FULL;
1964 if (mp.mp_aheight < ip->i_height) /* We have a partial height */
1965 mp_h = mp.mp_aheight - 1;
1967 /* If we find a non-null block pointer, crawl a bit
1968 higher up in the metapath and try again, otherwise
1969 we need to look lower for a new starting point. */
1970 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1973 state = DEALLOC_MP_LOWER;
1979 if (current->journal_info == NULL) {
1980 ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1984 down_write(&ip->i_rw_mutex);
1986 gfs2_statfs_change(sdp, 0, +btotal, 0);
1987 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1989 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1990 gfs2_trans_add_meta(ip->i_gl, dibh);
1991 gfs2_dinode_out(ip, dibh->b_data);
1992 up_write(&ip->i_rw_mutex);
1993 gfs2_trans_end(sdp);
1997 if (gfs2_holder_initialized(&rd_gh))
1998 gfs2_glock_dq_uninit(&rd_gh);
1999 if (current->journal_info) {
2000 up_write(&ip->i_rw_mutex);
2001 gfs2_trans_end(sdp);
2004 gfs2_quota_unhold(ip);
2006 release_metapath(&mp);
2010 static int trunc_end(struct gfs2_inode *ip)
2012 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2013 struct buffer_head *dibh;
2016 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2020 down_write(&ip->i_rw_mutex);
2022 error = gfs2_meta_inode_buffer(ip, &dibh);
2026 if (!i_size_read(&ip->i_inode)) {
2028 ip->i_goal = ip->i_no_addr;
2029 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
2030 gfs2_ordered_del_inode(ip);
2032 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2033 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
2035 gfs2_trans_add_meta(ip->i_gl, dibh);
2036 gfs2_dinode_out(ip, dibh->b_data);
2040 up_write(&ip->i_rw_mutex);
2041 gfs2_trans_end(sdp);
2046 * do_shrink - make a file smaller
2048 * @newsize: the size to make the file
2050 * Called with an exclusive lock on @inode. The @size must
2051 * be equal to or smaller than the current inode size.
2056 static int do_shrink(struct inode *inode, u64 newsize)
2058 struct gfs2_inode *ip = GFS2_I(inode);
2061 error = trunc_start(inode, newsize);
2064 if (gfs2_is_stuffed(ip))
2067 error = punch_hole(ip, newsize, 0);
2069 error = trunc_end(ip);
2074 void gfs2_trim_blocks(struct inode *inode)
2078 ret = do_shrink(inode, inode->i_size);
2083 * do_grow - Touch and update inode size
2085 * @size: The new size
2087 * This function updates the timestamps on the inode and
2088 * may also increase the size of the inode. This function
2089 * must not be called with @size any smaller than the current
2092 * Although it is not strictly required to unstuff files here,
2093 * earlier versions of GFS2 have a bug in the stuffed file reading
2094 * code which will result in a buffer overrun if the size is larger
2095 * than the max stuffed file size. In order to prevent this from
2096 * occurring, such files are unstuffed, but in other cases we can
2097 * just update the inode size directly.
2099 * Returns: 0 on success, or -ve on error
2102 static int do_grow(struct inode *inode, u64 size)
2104 struct gfs2_inode *ip = GFS2_I(inode);
2105 struct gfs2_sbd *sdp = GFS2_SB(inode);
2106 struct gfs2_alloc_parms ap = { .target = 1, };
2107 struct buffer_head *dibh;
2111 if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
2112 error = gfs2_quota_lock_check(ip, &ap);
2116 error = gfs2_inplace_reserve(ip, &ap);
2118 goto do_grow_qunlock;
2122 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
2124 gfs2_is_jdata(ip) ? RES_JDATA : 0) +
2125 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
2128 goto do_grow_release;
2131 error = gfs2_unstuff_dinode(ip, NULL);
2136 error = gfs2_meta_inode_buffer(ip, &dibh);
2140 i_size_write(inode, size);
2141 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2142 gfs2_trans_add_meta(ip->i_gl, dibh);
2143 gfs2_dinode_out(ip, dibh->b_data);
2147 gfs2_trans_end(sdp);
2150 gfs2_inplace_release(ip);
2152 gfs2_quota_unlock(ip);
2158 * gfs2_setattr_size - make a file a given size
2160 * @newsize: the size to make the file
2162 * The file size can grow, shrink, or stay the same size. This
2163 * is called holding i_rwsem and an exclusive glock on the inode
2169 int gfs2_setattr_size(struct inode *inode, u64 newsize)
2171 struct gfs2_inode *ip = GFS2_I(inode);
2174 BUG_ON(!S_ISREG(inode->i_mode));
2176 ret = inode_newsize_ok(inode, newsize);
2180 inode_dio_wait(inode);
2182 ret = gfs2_rsqa_alloc(ip);
2186 if (newsize >= inode->i_size) {
2187 ret = do_grow(inode, newsize);
2191 ret = do_shrink(inode, newsize);
2193 gfs2_rsqa_delete(ip, NULL);
2197 int gfs2_truncatei_resume(struct gfs2_inode *ip)
2200 error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
2202 error = trunc_end(ip);
2206 int gfs2_file_dealloc(struct gfs2_inode *ip)
2208 return punch_hole(ip, 0, 0);
2212 * gfs2_free_journal_extents - Free cached journal bmap info
2217 void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
2219 struct gfs2_journal_extent *jext;
2221 while(!list_empty(&jd->extent_list)) {
2222 jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
2223 list_del(&jext->list);
2229 * gfs2_add_jextent - Add or merge a new extent to extent cache
2230 * @jd: The journal descriptor
2231 * @lblock: The logical block at start of new extent
2232 * @dblock: The physical block at start of new extent
2233 * @blocks: Size of extent in fs blocks
2235 * Returns: 0 on success or -ENOMEM
2238 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
2240 struct gfs2_journal_extent *jext;
2242 if (!list_empty(&jd->extent_list)) {
2243 jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
2244 if ((jext->dblock + jext->blocks) == dblock) {
2245 jext->blocks += blocks;
2250 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
2253 jext->dblock = dblock;
2254 jext->lblock = lblock;
2255 jext->blocks = blocks;
2256 list_add_tail(&jext->list, &jd->extent_list);
2262 * gfs2_map_journal_extents - Cache journal bmap info
2263 * @sdp: The super block
2264 * @jd: The journal to map
2266 * Create a reusable "extent" mapping from all logical
2267 * blocks to all physical blocks for the given journal. This will save
2268 * us time when writing journal blocks. Most journals will have only one
2269 * extent that maps all their logical blocks. That's because gfs2.mkfs
2270 * arranges the journal blocks sequentially to maximize performance.
2271 * So the extent would map the first block for the entire file length.
2272 * However, gfs2_jadd can happen while file activity is happening, so
2273 * those journals may not be sequential. Less likely is the case where
2274 * the users created their own journals by mounting the metafs and
2275 * laying it out. But it's still possible. These journals might have
2278 * Returns: 0 on success, or error on failure
2281 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
2285 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
2286 struct buffer_head bh;
2287 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
2292 start = ktime_get();
2293 lblock_stop = i_size_read(jd->jd_inode) >> shift;
2294 size = (lblock_stop - lblock) << shift;
2296 WARN_ON(!list_empty(&jd->extent_list));
2302 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
2303 if (rc || !buffer_mapped(&bh))
2305 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
2309 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2313 fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
2314 jd->nr_extents, ktime_ms_delta(end, start));
2318 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
2320 (unsigned long long)(i_size_read(jd->jd_inode) - size),
2322 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
2323 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
2324 bh.b_state, (unsigned long long)bh.b_size);
2325 gfs2_free_journal_extents(jd);
2330 * gfs2_write_alloc_required - figure out if a write will require an allocation
2331 * @ip: the file being written to
2332 * @offset: the offset to write to
2333 * @len: the number of bytes being written
2335 * Returns: 1 if an alloc is required, 0 otherwise
2338 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
2341 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2342 struct buffer_head bh;
2344 u64 lblock, lblock_stop, size;
2350 if (gfs2_is_stuffed(ip)) {
2351 if (offset + len > gfs2_max_stuffed_size(ip))
2356 shift = sdp->sd_sb.sb_bsize_shift;
2357 BUG_ON(gfs2_is_dir(ip));
2358 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
2359 lblock = offset >> shift;
2360 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
2361 if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
2364 size = (lblock_stop - lblock) << shift;
2368 gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
2369 if (!buffer_mapped(&bh))
2372 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2378 static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
2380 struct gfs2_inode *ip = GFS2_I(inode);
2381 struct buffer_head *dibh;
2384 if (offset >= inode->i_size)
2386 if (offset + length > inode->i_size)
2387 length = inode->i_size - offset;
2389 error = gfs2_meta_inode_buffer(ip, &dibh);
2392 gfs2_trans_add_meta(ip->i_gl, dibh);
2393 memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
2399 static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
2402 struct gfs2_sbd *sdp = GFS2_SB(inode);
2403 loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
2407 struct gfs2_trans *tr;
2412 if (chunk > max_chunk)
2415 offs = offset & ~PAGE_MASK;
2416 if (offs && chunk > PAGE_SIZE)
2417 chunk = offs + ((chunk - offs) & PAGE_MASK);
2419 truncate_pagecache_range(inode, offset, chunk);
2423 tr = current->journal_info;
2424 if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2427 gfs2_trans_end(sdp);
2428 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2435 int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2437 struct inode *inode = file_inode(file);
2438 struct gfs2_inode *ip = GFS2_I(inode);
2439 struct gfs2_sbd *sdp = GFS2_SB(inode);
2442 if (gfs2_is_jdata(ip))
2443 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2444 GFS2_JTRUNC_REVOKES);
2446 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2450 if (gfs2_is_stuffed(ip)) {
2451 error = stuffed_zero_range(inode, offset, length);
2455 unsigned int start_off, end_len, blocksize;
2457 blocksize = i_blocksize(inode);
2458 start_off = offset & (blocksize - 1);
2459 end_len = (offset + length) & (blocksize - 1);
2461 unsigned int len = length;
2462 if (length > blocksize - start_off)
2463 len = blocksize - start_off;
2464 error = gfs2_block_zero_range(inode, offset, len);
2467 if (start_off + length < blocksize)
2471 error = gfs2_block_zero_range(inode,
2472 offset + length - end_len, end_len);
2478 if (gfs2_is_jdata(ip)) {
2479 BUG_ON(!current->journal_info);
2480 gfs2_journaled_truncate_range(inode, offset, length);
2482 truncate_pagecache_range(inode, offset, offset + length - 1);
2484 file_update_time(file);
2485 mark_inode_dirty(inode);
2487 if (current->journal_info)
2488 gfs2_trans_end(sdp);
2490 if (!gfs2_is_stuffed(ip))
2491 error = punch_hole(ip, offset, length);
2494 if (current->journal_info)
2495 gfs2_trans_end(sdp);