gfs2: constify rhashtable_params
[linux-2.6-block.git] / fs / gfs2 / bmap.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3a8a9a10 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
b3b94faa
DT
10#include <linux/spinlock.h>
11#include <linux/completion.h>
12#include <linux/buffer_head.h>
64dd153c 13#include <linux/blkdev.h>
5c676f6d 14#include <linux/gfs2_ondisk.h>
71b86f56 15#include <linux/crc32.h>
b3b94faa
DT
16
17#include "gfs2.h"
5c676f6d 18#include "incore.h"
b3b94faa
DT
19#include "bmap.h"
20#include "glock.h"
21#include "inode.h"
b3b94faa 22#include "meta_io.h"
b3b94faa
DT
23#include "quota.h"
24#include "rgrp.h"
45138990 25#include "log.h"
4c16c36a 26#include "super.h"
b3b94faa 27#include "trans.h"
18ec7d5c 28#include "dir.h"
5c676f6d 29#include "util.h"
63997775 30#include "trace_gfs2.h"
b3b94faa
DT
31
32/* This doesn't need to be that large as max 64 bit pointers in a 4k
33 * block is 512, so __u16 is fine for that. It saves stack space to
34 * keep it small.
35 */
36struct metapath {
dbac6710 37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
b3b94faa
DT
38 __u16 mp_list[GFS2_MAX_META_HEIGHT];
39};
40
f25ef0c1
SW
41/**
42 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
43 * @ip: the inode
44 * @dibh: the dinode buffer
45 * @block: the block number that was allocated
ff8f33c8 46 * @page: The (optional) page. This is looked up if @page is NULL
f25ef0c1
SW
47 *
48 * Returns: errno
49 */
50
51static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
cd915493 52 u64 block, struct page *page)
f25ef0c1 53{
f25ef0c1
SW
54 struct inode *inode = &ip->i_inode;
55 struct buffer_head *bh;
56 int release = 0;
57
58 if (!page || page->index) {
220cca2a 59 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
f25ef0c1
SW
60 if (!page)
61 return -ENOMEM;
62 release = 1;
63 }
64
65 if (!PageUptodate(page)) {
66 void *kaddr = kmap(page);
602c89d2
SW
67 u64 dsize = i_size_read(inode);
68
69 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
70 dsize = dibh->b_size - sizeof(struct gfs2_dinode);
f25ef0c1 71
602c89d2 72 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
09cbfeaf 73 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
f25ef0c1
SW
74 kunmap(page);
75
76 SetPageUptodate(page);
77 }
78
79 if (!page_has_buffers(page))
47a9a527
FF
80 create_empty_buffers(page, BIT(inode->i_blkbits),
81 BIT(BH_Uptodate));
f25ef0c1
SW
82
83 bh = page_buffers(page);
84
85 if (!buffer_mapped(bh))
86 map_bh(bh, inode->i_sb, block);
87
88 set_buffer_uptodate(bh);
eaf96527
SW
89 if (!gfs2_is_jdata(ip))
90 mark_buffer_dirty(bh);
bf36a713 91 if (!gfs2_is_writeback(ip))
350a9b0a 92 gfs2_trans_add_data(ip->i_gl, bh);
f25ef0c1
SW
93
94 if (release) {
95 unlock_page(page);
09cbfeaf 96 put_page(page);
f25ef0c1
SW
97 }
98
99 return 0;
100}
101
b3b94faa
DT
102/**
103 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
104 * @ip: The GFS2 inode to unstuff
ff8f33c8 105 * @page: The (optional) page. This is looked up if the @page is NULL
b3b94faa
DT
106 *
107 * This routine unstuffs a dinode and returns it to a "normal" state such
108 * that the height can be grown in the traditional way.
109 *
110 * Returns: errno
111 */
112
f25ef0c1 113int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
b3b94faa
DT
114{
115 struct buffer_head *bh, *dibh;
48516ced 116 struct gfs2_dinode *di;
cd915493 117 u64 block = 0;
18ec7d5c 118 int isdir = gfs2_is_dir(ip);
b3b94faa
DT
119 int error;
120
121 down_write(&ip->i_rw_mutex);
122
123 error = gfs2_meta_inode_buffer(ip, &dibh);
124 if (error)
125 goto out;
907b9bce 126
a2e0f799 127 if (i_size_read(&ip->i_inode)) {
b3b94faa
DT
128 /* Get a free block, fill it with the stuffed data,
129 and write it out to disk */
130
b45e41d7 131 unsigned int n = 1;
6e87ed0f 132 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
09010978
SW
133 if (error)
134 goto out_brelse;
18ec7d5c 135 if (isdir) {
5731be53 136 gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1);
61e085a8 137 error = gfs2_dir_get_new_buffer(ip, block, &bh);
b3b94faa
DT
138 if (error)
139 goto out_brelse;
48516ced 140 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
b3b94faa
DT
141 dibh, sizeof(struct gfs2_dinode));
142 brelse(bh);
143 } else {
f25ef0c1 144 error = gfs2_unstuffer_page(ip, dibh, block, page);
b3b94faa
DT
145 if (error)
146 goto out_brelse;
147 }
148 }
149
150 /* Set up the pointer to the new block */
151
350a9b0a 152 gfs2_trans_add_meta(ip->i_gl, dibh);
48516ced 153 di = (struct gfs2_dinode *)dibh->b_data;
b3b94faa
DT
154 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
155
a2e0f799 156 if (i_size_read(&ip->i_inode)) {
48516ced 157 *(__be64 *)(di + 1) = cpu_to_be64(block);
77658aad
SW
158 gfs2_add_inode_blocks(&ip->i_inode, 1);
159 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
b3b94faa
DT
160 }
161
ecc30c79 162 ip->i_height = 1;
48516ced 163 di->di_height = cpu_to_be16(1);
b3b94faa 164
a91ea69f 165out_brelse:
b3b94faa 166 brelse(dibh);
a91ea69f 167out:
b3b94faa 168 up_write(&ip->i_rw_mutex);
b3b94faa
DT
169 return error;
170}
171
b3b94faa
DT
172
173/**
174 * find_metapath - Find path through the metadata tree
9b8c81d1 175 * @sdp: The superblock
b3b94faa
DT
176 * @mp: The metapath to return the result in
177 * @block: The disk block to look up
9b8c81d1 178 * @height: The pre-calculated height of the metadata tree
b3b94faa
DT
179 *
180 * This routine returns a struct metapath structure that defines a path
181 * through the metadata of inode "ip" to get to block "block".
182 *
183 * Example:
184 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a
185 * filesystem with a blocksize of 4096.
186 *
187 * find_metapath() would return a struct metapath structure set to:
188 * mp_offset = 101342453, mp_height = 3, mp_list[0] = 0, mp_list[1] = 48,
189 * and mp_list[2] = 165.
190 *
191 * That means that in order to get to the block containing the byte at
192 * offset 101342453, we would load the indirect block pointed to by pointer
193 * 0 in the dinode. We would then load the indirect block pointed to by
194 * pointer 48 in that indirect block. We would then load the data block
195 * pointed to by pointer 165 in that indirect block.
196 *
197 * ----------------------------------------
198 * | Dinode | |
199 * | | 4|
200 * | |0 1 2 3 4 5 9|
201 * | | 6|
202 * ----------------------------------------
203 * |
204 * |
205 * V
206 * ----------------------------------------
207 * | Indirect Block |
208 * | 5|
209 * | 4 4 4 4 4 5 5 1|
210 * |0 5 6 7 8 9 0 1 2|
211 * ----------------------------------------
212 * |
213 * |
214 * V
215 * ----------------------------------------
216 * | Indirect Block |
217 * | 1 1 1 1 1 5|
218 * | 6 6 6 6 6 1|
219 * |0 3 4 5 6 7 2|
220 * ----------------------------------------
221 * |
222 * |
223 * V
224 * ----------------------------------------
225 * | Data block containing offset |
226 * | 101342453 |
227 * | |
228 * | |
229 * ----------------------------------------
230 *
231 */
232
9b8c81d1
SW
233static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
234 struct metapath *mp, unsigned int height)
b3b94faa 235{
b3b94faa
DT
236 unsigned int i;
237
9b8c81d1 238 for (i = height; i--;)
7eabb77e 239 mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
b3b94faa
DT
240
241}
242
5af4e7a0 243static inline unsigned int metapath_branch_start(const struct metapath *mp)
9b8c81d1 244{
5af4e7a0
BM
245 if (mp->mp_list[0] == 0)
246 return 2;
247 return 1;
9b8c81d1
SW
248}
249
d552a2b9
BP
250/**
251 * metaptr1 - Return the first possible metadata pointer in a metaath buffer
252 * @height: The metadata height (0 = dinode)
253 * @mp: The metapath
254 */
255static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
256{
257 struct buffer_head *bh = mp->mp_bh[height];
258 if (height == 0)
259 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
260 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
261}
262
b3b94faa
DT
263/**
264 * metapointer - Return pointer to start of metadata in a buffer
b3b94faa
DT
265 * @height: The metadata height (0 = dinode)
266 * @mp: The metapath
267 *
268 * Return a pointer to the block number of the next height of the metadata
269 * tree given a buffer containing the pointer to the current height of the
270 * metadata tree.
271 */
272
9b8c81d1 273static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
b3b94faa 274{
d552a2b9
BP
275 __be64 *p = metaptr1(height, mp);
276 return p + mp->mp_list[height];
b3b94faa
DT
277}
278
b99b98dc
SW
279static void gfs2_metapath_ra(struct gfs2_glock *gl,
280 const struct buffer_head *bh, const __be64 *pos)
281{
282 struct buffer_head *rabh;
283 const __be64 *endp = (const __be64 *)(bh->b_data + bh->b_size);
284 const __be64 *t;
285
286 for (t = pos; t < endp; t++) {
287 if (!*t)
288 continue;
289
290 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
291 if (trylock_buffer(rabh)) {
292 if (!buffer_uptodate(rabh)) {
293 rabh->b_end_io = end_buffer_read_sync;
e477b24b
CL
294 submit_bh(REQ_OP_READ,
295 REQ_RAHEAD | REQ_META | REQ_PRIO,
296 rabh);
b99b98dc
SW
297 continue;
298 }
299 unlock_buffer(rabh);
300 }
301 brelse(rabh);
302 }
303}
304
d552a2b9
BP
305/**
306 * lookup_mp_height - helper function for lookup_metapath
307 * @ip: the inode
308 * @mp: the metapath
309 * @h: the height which needs looking up
310 */
311static int lookup_mp_height(struct gfs2_inode *ip, struct metapath *mp, int h)
312{
313 __be64 *ptr = metapointer(h, mp);
314 u64 dblock = be64_to_cpu(*ptr);
315
316 if (!dblock)
317 return h + 1;
318
319 return gfs2_meta_indirect_buffer(ip, h + 1, dblock, &mp->mp_bh[h + 1]);
320}
321
b3b94faa 322/**
9b8c81d1
SW
323 * lookup_metapath - Walk the metadata tree to a specific point
324 * @ip: The inode
b3b94faa 325 * @mp: The metapath
b3b94faa 326 *
9b8c81d1
SW
327 * Assumes that the inode's buffer has already been looked up and
328 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
329 * by find_metapath().
330 *
331 * If this function encounters part of the tree which has not been
332 * allocated, it returns the current height of the tree at the point
333 * at which it found the unallocated block. Blocks which are found are
334 * added to the mp->mp_bh[] list.
b3b94faa 335 *
9b8c81d1 336 * Returns: error or height of metadata tree
b3b94faa
DT
337 */
338
9b8c81d1 339static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
11707ea0 340{
11707ea0
SW
341 unsigned int end_of_metadata = ip->i_height - 1;
342 unsigned int x;
e23159d2 343 int ret;
11707ea0
SW
344
345 for (x = 0; x < end_of_metadata; x++) {
d552a2b9 346 ret = lookup_mp_height(ip, mp, x);
11707ea0
SW
347 if (ret)
348 return ret;
349 }
350
9b8c81d1 351 return ip->i_height;
dbac6710
SW
352}
353
d552a2b9
BP
354/**
355 * fillup_metapath - fill up buffers for the metadata path to a specific height
356 * @ip: The inode
357 * @mp: The metapath
358 * @h: The height to which it should be mapped
359 *
360 * Similar to lookup_metapath, but does lookups for a range of heights
361 *
362 * Returns: error or height of metadata tree
363 */
364
365static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
366{
367 unsigned int start_h = h - 1;
368 int ret;
369
370 if (h) {
371 /* find the first buffer we need to look up. */
372 while (start_h > 0 && mp->mp_bh[start_h] == NULL)
373 start_h--;
374 for (; start_h < h; start_h++) {
375 ret = lookup_mp_height(ip, mp, start_h);
376 if (ret)
377 return ret;
378 }
379 }
380 return ip->i_height;
381}
382
9b8c81d1 383static inline void release_metapath(struct metapath *mp)
dbac6710
SW
384{
385 int i;
386
9b8c81d1
SW
387 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
388 if (mp->mp_bh[i] == NULL)
389 break;
390 brelse(mp->mp_bh[i]);
391 }
11707ea0
SW
392}
393
30cbf189
SW
394/**
395 * gfs2_extent_length - Returns length of an extent of blocks
396 * @start: Start of the buffer
397 * @len: Length of the buffer in bytes
398 * @ptr: Current position in the buffer
399 * @limit: Max extent length to return (0 = unlimited)
400 * @eob: Set to 1 if we hit "end of block"
401 *
402 * If the first block is zero (unallocated) it will return the number of
403 * unallocated blocks in the extent, otherwise it will return the number
404 * of contiguous blocks in the extent.
405 *
406 * Returns: The length of the extent (minimum of one block)
407 */
408
b650738c 409static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, size_t limit, int *eob)
30cbf189
SW
410{
411 const __be64 *end = (start + len);
412 const __be64 *first = ptr;
413 u64 d = be64_to_cpu(*ptr);
414
415 *eob = 0;
416 do {
417 ptr++;
418 if (ptr >= end)
419 break;
420 if (limit && --limit == 0)
421 break;
422 if (d)
423 d++;
424 } while(be64_to_cpu(*ptr) == d);
425 if (ptr >= end)
426 *eob = 1;
427 return (ptr - first);
428}
429
9b8c81d1 430static inline void bmap_lock(struct gfs2_inode *ip, int create)
4cf1ed81 431{
4cf1ed81
SW
432 if (create)
433 down_write(&ip->i_rw_mutex);
434 else
435 down_read(&ip->i_rw_mutex);
436}
437
9b8c81d1 438static inline void bmap_unlock(struct gfs2_inode *ip, int create)
4cf1ed81 439{
4cf1ed81
SW
440 if (create)
441 up_write(&ip->i_rw_mutex);
442 else
443 up_read(&ip->i_rw_mutex);
444}
445
9b8c81d1
SW
446static inline __be64 *gfs2_indirect_init(struct metapath *mp,
447 struct gfs2_glock *gl, unsigned int i,
448 unsigned offset, u64 bn)
449{
450 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
451 ((i > 1) ? sizeof(struct gfs2_meta_header) :
452 sizeof(struct gfs2_dinode)));
453 BUG_ON(i < 1);
454 BUG_ON(mp->mp_bh[i] != NULL);
455 mp->mp_bh[i] = gfs2_meta_new(gl, bn);
350a9b0a 456 gfs2_trans_add_meta(gl, mp->mp_bh[i]);
9b8c81d1
SW
457 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
458 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
459 ptr += offset;
460 *ptr = cpu_to_be64(bn);
461 return ptr;
462}
463
464enum alloc_state {
465 ALLOC_DATA = 0,
466 ALLOC_GROW_DEPTH = 1,
467 ALLOC_GROW_HEIGHT = 2,
468 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */
469};
470
d552a2b9
BP
471static inline unsigned int hptrs(struct gfs2_sbd *sdp, const unsigned int hgt)
472{
473 if (hgt)
474 return sdp->sd_inptrs;
475 return sdp->sd_diptrs;
476}
477
9b8c81d1
SW
478/**
479 * gfs2_bmap_alloc - Build a metadata tree of the requested height
480 * @inode: The GFS2 inode
481 * @lblock: The logical starting block of the extent
482 * @bh_map: This is used to return the mapping details
483 * @mp: The metapath
484 * @sheight: The starting height (i.e. whats already mapped)
485 * @height: The height to build to
486 * @maxlen: The max number of data blocks to alloc
487 *
488 * In this routine we may have to alloc:
489 * i) Indirect blocks to grow the metadata tree height
490 * ii) Indirect blocks to fill in lower part of the metadata tree
491 * iii) Data blocks
492 *
493 * The function is in two parts. The first part works out the total
494 * number of blocks which we need. The second part does the actual
495 * allocation asking for an extent at a time (if enough contiguous free
496 * blocks are available, there will only be one request per bmap call)
497 * and uses the state machine to initialise the blocks in order.
498 *
499 * Returns: errno on error
500 */
501
502static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
503 struct buffer_head *bh_map, struct metapath *mp,
504 const unsigned int sheight,
505 const unsigned int height,
b650738c 506 const size_t maxlen)
9b8c81d1
SW
507{
508 struct gfs2_inode *ip = GFS2_I(inode);
509 struct gfs2_sbd *sdp = GFS2_SB(inode);
64dd153c 510 struct super_block *sb = sdp->sd_vfs;
9b8c81d1
SW
511 struct buffer_head *dibh = mp->mp_bh[0];
512 u64 bn, dblock = 0;
5af4e7a0 513 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
9b8c81d1
SW
514 unsigned dblks = 0;
515 unsigned ptrs_per_blk;
516 const unsigned end_of_metadata = height - 1;
64dd153c 517 int ret;
9b8c81d1
SW
518 int eob = 0;
519 enum alloc_state state;
520 __be64 *ptr;
521 __be64 zero_bn = 0;
522
523 BUG_ON(sheight < 1);
524 BUG_ON(dibh == NULL);
525
350a9b0a 526 gfs2_trans_add_meta(ip->i_gl, dibh);
9b8c81d1
SW
527
528 if (height == sheight) {
529 struct buffer_head *bh;
530 /* Bottom indirect block exists, find unalloced extent size */
531 ptr = metapointer(end_of_metadata, mp);
532 bh = mp->mp_bh[end_of_metadata];
533 dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
534 &eob);
535 BUG_ON(dblks < 1);
536 state = ALLOC_DATA;
537 } else {
538 /* Need to allocate indirect blocks */
539 ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
b650738c
BP
540 dblks = min(maxlen, (size_t)(ptrs_per_blk -
541 mp->mp_list[end_of_metadata]));
9b8c81d1
SW
542 if (height == ip->i_height) {
543 /* Writing into existing tree, extend tree down */
544 iblks = height - sheight;
545 state = ALLOC_GROW_DEPTH;
546 } else {
547 /* Building up tree height */
548 state = ALLOC_GROW_HEIGHT;
549 iblks = height - ip->i_height;
5af4e7a0
BM
550 branch_start = metapath_branch_start(mp);
551 iblks += (height - branch_start);
9b8c81d1
SW
552 }
553 }
554
555 /* start of the second part of the function (state machine) */
556
557 blks = dblks + iblks;
558 i = sheight;
559 do {
09010978 560 int error;
9b8c81d1 561 n = blks - alloced;
6e87ed0f 562 error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
09010978
SW
563 if (error)
564 return error;
9b8c81d1
SW
565 alloced += n;
566 if (state != ALLOC_DATA || gfs2_is_jdata(ip))
567 gfs2_trans_add_unrevoke(sdp, bn, n);
568 switch (state) {
569 /* Growing height of tree */
570 case ALLOC_GROW_HEIGHT:
571 if (i == 1) {
572 ptr = (__be64 *)(dibh->b_data +
573 sizeof(struct gfs2_dinode));
574 zero_bn = *ptr;
575 }
576 for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
577 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
578 if (i - 1 == height - ip->i_height) {
579 i--;
580 gfs2_buffer_copy_tail(mp->mp_bh[i],
581 sizeof(struct gfs2_meta_header),
582 dibh, sizeof(struct gfs2_dinode));
583 gfs2_buffer_clear_tail(dibh,
584 sizeof(struct gfs2_dinode) +
585 sizeof(__be64));
586 ptr = (__be64 *)(mp->mp_bh[i]->b_data +
587 sizeof(struct gfs2_meta_header));
588 *ptr = zero_bn;
589 state = ALLOC_GROW_DEPTH;
5af4e7a0 590 for(i = branch_start; i < height; i++) {
9b8c81d1
SW
591 if (mp->mp_bh[i] == NULL)
592 break;
593 brelse(mp->mp_bh[i]);
594 mp->mp_bh[i] = NULL;
595 }
5af4e7a0 596 i = branch_start;
9b8c81d1
SW
597 }
598 if (n == 0)
599 break;
600 /* Branching from existing tree */
601 case ALLOC_GROW_DEPTH:
602 if (i > 1 && i < height)
350a9b0a 603 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
9b8c81d1
SW
604 for (; i < height && n > 0; i++, n--)
605 gfs2_indirect_init(mp, ip->i_gl, i,
606 mp->mp_list[i-1], bn++);
607 if (i == height)
608 state = ALLOC_DATA;
609 if (n == 0)
610 break;
611 /* Tree complete, adding data blocks */
612 case ALLOC_DATA:
613 BUG_ON(n > dblks);
614 BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
350a9b0a 615 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
9b8c81d1
SW
616 dblks = n;
617 ptr = metapointer(end_of_metadata, mp);
618 dblock = bn;
619 while (n-- > 0)
620 *ptr++ = cpu_to_be64(bn++);
64dd153c
BM
621 if (buffer_zeronew(bh_map)) {
622 ret = sb_issue_zeroout(sb, dblock, dblks,
623 GFP_NOFS);
624 if (ret) {
625 fs_err(sdp,
626 "Failed to zero data buffers\n");
627 clear_buffer_zeronew(bh_map);
628 }
629 }
9b8c81d1
SW
630 break;
631 }
07ccb7bf 632 } while ((state != ALLOC_DATA) || !dblock);
9b8c81d1
SW
633
634 ip->i_height = height;
635 gfs2_add_inode_blocks(&ip->i_inode, alloced);
636 gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
637 map_bh(bh_map, inode->i_sb, dblock);
638 bh_map->b_size = dblks << inode->i_blkbits;
639 set_buffer_new(bh_map);
640 return 0;
641}
642
b3b94faa 643/**
4cf1ed81 644 * gfs2_block_map - Map a block from an inode to a disk block
fd88de56 645 * @inode: The inode
b3b94faa 646 * @lblock: The logical block number
4cf1ed81 647 * @bh_map: The bh to be mapped
9b8c81d1 648 * @create: True if its ok to alloc blocks to satify the request
b3b94faa 649 *
9b8c81d1
SW
650 * Sets buffer_mapped() if successful, sets buffer_boundary() if a
651 * read of metadata will be required before the next block can be
652 * mapped. Sets buffer_new() if new blocks were allocated.
b3b94faa
DT
653 *
654 * Returns: errno
655 */
656
e9e1ef2b
BP
657int gfs2_block_map(struct inode *inode, sector_t lblock,
658 struct buffer_head *bh_map, int create)
b3b94faa 659{
feaa7bba
SW
660 struct gfs2_inode *ip = GFS2_I(inode);
661 struct gfs2_sbd *sdp = GFS2_SB(inode);
ecc30c79 662 unsigned int bsize = sdp->sd_sb.sb_bsize;
b650738c 663 const size_t maxlen = bh_map->b_size >> inode->i_blkbits;
ecc30c79 664 const u64 *arr = sdp->sd_heightsize;
9b8c81d1
SW
665 __be64 *ptr;
666 u64 size;
667 struct metapath mp;
668 int ret;
669 int eob;
670 unsigned int len;
671 struct buffer_head *bh;
672 u8 height;
7276b3b0 673
9b8c81d1 674 BUG_ON(maxlen == 0);
b3b94faa 675
d552a2b9 676 memset(&mp, 0, sizeof(mp));
9b8c81d1 677 bmap_lock(ip, create);
4cf1ed81
SW
678 clear_buffer_mapped(bh_map);
679 clear_buffer_new(bh_map);
680 clear_buffer_boundary(bh_map);
63997775 681 trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
ecc30c79
SW
682 if (gfs2_is_dir(ip)) {
683 bsize = sdp->sd_jbsize;
684 arr = sdp->sd_jheightsize;
685 }
4cf1ed81 686
9b8c81d1
SW
687 ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
688 if (ret)
689 goto out;
b3b94faa 690
9b8c81d1
SW
691 height = ip->i_height;
692 size = (lblock + 1) * bsize;
693 while (size > arr[height])
694 height++;
695 find_metapath(sdp, lblock, &mp, height);
696 ret = 1;
697 if (height > ip->i_height || gfs2_is_stuffed(ip))
698 goto do_alloc;
699 ret = lookup_metapath(ip, &mp);
700 if (ret < 0)
701 goto out;
702 if (ret != ip->i_height)
703 goto do_alloc;
704 ptr = metapointer(ip->i_height - 1, &mp);
705 if (*ptr == 0)
706 goto do_alloc;
707 map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr));
708 bh = mp.mp_bh[ip->i_height - 1];
709 len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob);
710 bh_map->b_size = (len << inode->i_blkbits);
711 if (eob)
712 set_buffer_boundary(bh_map);
713 ret = 0;
714out:
715 release_metapath(&mp);
63997775 716 trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
9b8c81d1
SW
717 bmap_unlock(ip, create);
718 return ret;
30cbf189 719
9b8c81d1
SW
720do_alloc:
721 /* All allocations are done here, firstly check create flag */
722 if (!create) {
723 BUG_ON(gfs2_is_stuffed(ip));
724 ret = 0;
725 goto out;
b3b94faa 726 }
9b8c81d1
SW
727
728 /* At this point ret is the tree depth of already allocated blocks */
729 ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen);
730 goto out;
fd88de56
SW
731}
732
941e6d7d
SW
733/*
734 * Deprecated: do not use in new code
735 */
fd88de56
SW
736int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
737{
23591256 738 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
7a6bbacb 739 int ret;
fd88de56
SW
740 int create = *new;
741
742 BUG_ON(!extlen);
743 BUG_ON(!dblock);
744 BUG_ON(!new);
745
47a9a527 746 bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
e9e1ef2b 747 ret = gfs2_block_map(inode, lblock, &bh, create);
7a6bbacb
SW
748 *extlen = bh.b_size >> inode->i_blkbits;
749 *dblock = bh.b_blocknr;
750 if (buffer_new(&bh))
751 *new = 1;
752 else
753 *new = 0;
754 return ret;
b3b94faa
DT
755}
756
ba7f7290
SW
757/**
758 * gfs2_block_truncate_page - Deal with zeroing out data for truncate
759 *
760 * This is partly borrowed from ext3.
761 */
ff8f33c8 762static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
ba7f7290
SW
763{
764 struct inode *inode = mapping->host;
765 struct gfs2_inode *ip = GFS2_I(inode);
09cbfeaf
KS
766 unsigned long index = from >> PAGE_SHIFT;
767 unsigned offset = from & (PAGE_SIZE-1);
ba7f7290
SW
768 unsigned blocksize, iblock, length, pos;
769 struct buffer_head *bh;
770 struct page *page;
ba7f7290
SW
771 int err;
772
220cca2a 773 page = find_or_create_page(mapping, index, GFP_NOFS);
ba7f7290
SW
774 if (!page)
775 return 0;
776
777 blocksize = inode->i_sb->s_blocksize;
778 length = blocksize - (offset & (blocksize - 1));
09cbfeaf 779 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
ba7f7290
SW
780
781 if (!page_has_buffers(page))
782 create_empty_buffers(page, blocksize, 0);
783
784 /* Find the buffer that contains "offset" */
785 bh = page_buffers(page);
786 pos = blocksize;
787 while (offset >= pos) {
788 bh = bh->b_this_page;
789 iblock++;
790 pos += blocksize;
791 }
792
793 err = 0;
794
795 if (!buffer_mapped(bh)) {
e9e1ef2b 796 gfs2_block_map(inode, iblock, bh, 0);
ba7f7290
SW
797 /* unmapped? It's a hole - nothing to do */
798 if (!buffer_mapped(bh))
799 goto unlock;
800 }
801
802 /* Ok, it's mapped. Make sure it's up-to-date */
803 if (PageUptodate(page))
804 set_buffer_uptodate(bh);
805
806 if (!buffer_uptodate(bh)) {
807 err = -EIO;
dfec8a14 808 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
ba7f7290
SW
809 wait_on_buffer(bh);
810 /* Uhhuh. Read error. Complain and punt. */
811 if (!buffer_uptodate(bh))
812 goto unlock;
1875f2f3 813 err = 0;
ba7f7290
SW
814 }
815
bf36a713 816 if (!gfs2_is_writeback(ip))
350a9b0a 817 gfs2_trans_add_data(ip->i_gl, bh);
ba7f7290 818
eebd2aa3 819 zero_user(page, offset, length);
40bc9a27 820 mark_buffer_dirty(bh);
ba7f7290
SW
821unlock:
822 unlock_page(page);
09cbfeaf 823 put_page(page);
ba7f7290
SW
824 return err;
825}
826
c62baf65
FF
827#define GFS2_JTRUNC_REVOKES 8192
828
fa731fc4
SW
829/**
830 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
831 * @inode: The inode being truncated
832 * @oldsize: The original (larger) size
833 * @newsize: The new smaller size
834 *
835 * With jdata files, we have to journal a revoke for each block which is
836 * truncated. As a result, we need to split this into separate transactions
837 * if the number of pages being truncated gets too large.
838 */
839
fa731fc4
SW
840static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
841{
842 struct gfs2_sbd *sdp = GFS2_SB(inode);
843 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
844 u64 chunk;
845 int error;
846
847 while (oldsize != newsize) {
848 chunk = oldsize - newsize;
849 if (chunk > max_chunk)
850 chunk = max_chunk;
7caef267 851 truncate_pagecache(inode, oldsize - chunk);
fa731fc4
SW
852 oldsize -= chunk;
853 gfs2_trans_end(sdp);
854 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
855 if (error)
856 return error;
857 }
858
859 return 0;
860}
861
ff8f33c8 862static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
b3b94faa 863{
ff8f33c8
SW
864 struct gfs2_inode *ip = GFS2_I(inode);
865 struct gfs2_sbd *sdp = GFS2_SB(inode);
866 struct address_space *mapping = inode->i_mapping;
b3b94faa
DT
867 struct buffer_head *dibh;
868 int journaled = gfs2_is_jdata(ip);
869 int error;
870
fa731fc4
SW
871 if (journaled)
872 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
873 else
874 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
b3b94faa
DT
875 if (error)
876 return error;
877
878 error = gfs2_meta_inode_buffer(ip, &dibh);
879 if (error)
880 goto out;
881
350a9b0a 882 gfs2_trans_add_meta(ip->i_gl, dibh);
ff8f33c8 883
b3b94faa 884 if (gfs2_is_stuffed(ip)) {
ff8f33c8 885 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
b3b94faa 886 } else {
ff8f33c8
SW
887 if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) {
888 error = gfs2_block_truncate_page(mapping, newsize);
889 if (error)
890 goto out_brelse;
b3b94faa 891 }
ff8f33c8 892 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
b3b94faa
DT
893 }
894
ff8f33c8 895 i_size_write(inode, newsize);
078cd827 896 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
ff8f33c8 897 gfs2_dinode_out(ip, dibh->b_data);
b3b94faa 898
fa731fc4
SW
899 if (journaled)
900 error = gfs2_journaled_truncate(inode, oldsize, newsize);
901 else
7caef267 902 truncate_pagecache(inode, newsize);
fa731fc4
SW
903
904 if (error) {
905 brelse(dibh);
906 return error;
907 }
908
ff8f33c8
SW
909out_brelse:
910 brelse(dibh);
a91ea69f 911out:
b3b94faa 912 gfs2_trans_end(sdp);
b3b94faa
DT
913 return error;
914}
915
d552a2b9
BP
916/**
917 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
918 * @ip: inode
919 * @rg_gh: holder of resource group glock
920 * @mp: current metapath fully populated with buffers
921 * @btotal: place to keep count of total blocks freed
922 * @hgt: height we're processing
923 * @first: true if this is the first call to this function for this height
924 *
925 * We sweep a metadata buffer (provided by the metapath) for blocks we need to
926 * free, and free them all. However, we do it one rgrp at a time. If this
927 * block has references to multiple rgrps, we break it into individual
928 * transactions. This allows other processes to use the rgrps while we're
929 * focused on a single one, for better concurrency / performance.
930 * At every transaction boundary, we rewrite the inode into the journal.
931 * That way the bitmaps are kept consistent with the inode and we can recover
932 * if we're interrupted by power-outages.
933 *
934 * Returns: 0, or return code if an error occurred.
935 * *btotal has the total number of blocks freed
936 */
937static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
938 const struct metapath *mp, u32 *btotal, int hgt,
939 bool preserve1)
b3b94faa 940{
9b8c81d1 941 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
d552a2b9
BP
942 struct gfs2_rgrpd *rgd;
943 struct gfs2_trans *tr;
944 struct buffer_head *bh = mp->mp_bh[hgt];
945 __be64 *top, *bottom, *p;
946 int blks_outside_rgrp;
947 u64 bn, bstart, isize_blks;
948 s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
949 int meta = ((hgt != ip->i_height - 1) ? 1 : 0);
950 int ret = 0;
951 bool buf_in_tr = false; /* buffer was added to transaction */
952
953 if (gfs2_metatype_check(sdp, bh,
954 (hgt ? GFS2_METATYPE_IN : GFS2_METATYPE_DI)))
955 return -EIO;
956
957more_rgrps:
958 blks_outside_rgrp = 0;
959 bstart = 0;
960 blen = 0;
961 top = metapointer(hgt, mp); /* first ptr from metapath */
962 /* If we're keeping some data at the truncation point, we've got to
963 preserve the metadata tree by adding 1 to the starting metapath. */
964 if (preserve1)
965 top++;
966
967 bottom = (__be64 *)(bh->b_data + bh->b_size);
968
969 for (p = top; p < bottom; p++) {
970 if (!*p)
971 continue;
972 bn = be64_to_cpu(*p);
973 if (gfs2_holder_initialized(rd_gh)) {
6f6597ba 974 rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
d552a2b9
BP
975 gfs2_assert_withdraw(sdp,
976 gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
977 } else {
978 rgd = gfs2_blk2rgrpd(sdp, bn, false);
979 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
980 0, rd_gh);
981 if (ret)
982 goto out;
983
984 /* Must be done with the rgrp glock held: */
985 if (gfs2_rs_active(&ip->i_res) &&
986 rgd == ip->i_res.rs_rbm.rgd)
987 gfs2_rs_deltree(&ip->i_res);
988 }
989
990 if (!rgrp_contains_block(rgd, bn)) {
991 blks_outside_rgrp++;
992 continue;
993 }
994
995 /* The size of our transactions will be unknown until we
996 actually process all the metadata blocks that relate to
997 the rgrp. So we estimate. We know it can't be more than
998 the dinode's i_blocks and we don't want to exceed the
999 journal flush threshold, sd_log_thresh2. */
1000 if (current->journal_info == NULL) {
1001 unsigned int jblocks_rqsted, revokes;
1002
1003 jblocks_rqsted = rgd->rd_length + RES_DINODE +
1004 RES_INDIRECT;
1005 isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1006 if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1007 jblocks_rqsted +=
1008 atomic_read(&sdp->sd_log_thresh2);
1009 else
1010 jblocks_rqsted += isize_blks;
1011 revokes = jblocks_rqsted;
1012 if (meta)
1013 revokes += hptrs(sdp, hgt);
1014 else if (ip->i_depth)
1015 revokes += sdp->sd_inptrs;
1016 ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1017 if (ret)
1018 goto out_unlock;
1019 down_write(&ip->i_rw_mutex);
1020 }
1021 /* check if we will exceed the transaction blocks requested */
1022 tr = current->journal_info;
1023 if (tr->tr_num_buf_new + RES_STATFS +
1024 RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1025 /* We set blks_outside_rgrp to ensure the loop will
1026 be repeated for the same rgrp, but with a new
1027 transaction. */
1028 blks_outside_rgrp++;
1029 /* This next part is tricky. If the buffer was added
1030 to the transaction, we've already set some block
1031 pointers to 0, so we better follow through and free
1032 them, or we will introduce corruption (so break).
1033 This may be impossible, or at least rare, but I
1034 decided to cover the case regardless.
1035
1036 If the buffer was not added to the transaction
1037 (this call), doing so would exceed our transaction
1038 size, so we need to end the transaction and start a
1039 new one (so goto). */
1040
1041 if (buf_in_tr)
1042 break;
1043 goto out_unlock;
1044 }
1045
1046 gfs2_trans_add_meta(ip->i_gl, bh);
1047 buf_in_tr = true;
1048 *p = 0;
1049 if (bstart + blen == bn) {
1050 blen++;
1051 continue;
1052 }
1053 if (bstart) {
1054 __gfs2_free_blocks(ip, bstart, (u32)blen, meta);
1055 (*btotal) += blen;
1056 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1057 }
1058 bstart = bn;
1059 blen = 1;
1060 }
1061 if (bstart) {
1062 __gfs2_free_blocks(ip, bstart, (u32)blen, meta);
1063 (*btotal) += blen;
1064 gfs2_add_inode_blocks(&ip->i_inode, -blen);
1065 }
1066out_unlock:
1067 if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1068 outside the rgrp we just processed,
1069 do it all over again. */
1070 if (current->journal_info) {
1071 struct buffer_head *dibh = mp->mp_bh[0];
1072
1073 /* Every transaction boundary, we rewrite the dinode
1074 to keep its di_blocks current in case of failure. */
1075 ip->i_inode.i_mtime = ip->i_inode.i_ctime =
b32c8c76 1076 current_time(&ip->i_inode);
d552a2b9
BP
1077 gfs2_trans_add_meta(ip->i_gl, dibh);
1078 gfs2_dinode_out(ip, dibh->b_data);
1079 up_write(&ip->i_rw_mutex);
1080 gfs2_trans_end(sdp);
1081 }
1082 gfs2_glock_dq_uninit(rd_gh);
1083 cond_resched();
1084 goto more_rgrps;
1085 }
1086out:
1087 return ret;
1088}
1089
1090/**
1091 * find_nonnull_ptr - find a non-null pointer given a metapath and height
1092 * assumes the metapath is valid (with buffers) out to height h
1093 * @mp: starting metapath
1094 * @h: desired height to search
1095 *
1096 * Returns: true if a non-null pointer was found in the metapath buffer
1097 * false if all remaining pointers are NULL in the buffer
1098 */
1099static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1100 unsigned int h)
1101{
1102 __be64 *ptr;
1103 unsigned int ptrs = hptrs(sdp, h) - 1;
1104
1105 while (true) {
1106 ptr = metapointer(h, mp);
1107 if (*ptr) /* if we have a non-null pointer */
1108 return true;
1109
1110 if (mp->mp_list[h] < ptrs)
1111 mp->mp_list[h]++;
1112 else
1113 return false; /* no more pointers in this buffer */
1114 }
1115}
1116
1117enum dealloc_states {
1118 DEALLOC_MP_FULL = 0, /* Strip a metapath with all buffers read in */
1119 DEALLOC_MP_LOWER = 1, /* lower the metapath strip height */
1120 DEALLOC_FILL_MP = 2, /* Fill in the metapath to the given height. */
1121 DEALLOC_DONE = 3, /* process complete */
1122};
b3b94faa 1123
d552a2b9
BP
1124/**
1125 * trunc_dealloc - truncate a file down to a desired size
1126 * @ip: inode to truncate
1127 * @newsize: The desired size of the file
1128 *
1129 * This function truncates a file to newsize. It works from the
1130 * bottom up, and from the right to the left. In other words, it strips off
1131 * the highest layer (data) before stripping any of the metadata. Doing it
1132 * this way is best in case the operation is interrupted by power failure, etc.
1133 * The dinode is rewritten in every transaction to guarantee integrity.
1134 */
1135static int trunc_dealloc(struct gfs2_inode *ip, u64 newsize)
1136{
1137 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1138 struct metapath mp;
1139 struct buffer_head *dibh, *bh;
1140 struct gfs2_holder rd_gh;
1141 u64 lblock;
1142 __u16 nbof[GFS2_MAX_META_HEIGHT]; /* new beginning of truncation */
1143 unsigned int strip_h = ip->i_height - 1;
1144 u32 btotal = 0;
1145 int ret, state;
1146 int mp_h; /* metapath buffers are read in to this height */
1147 sector_t last_ra = 0;
1148 u64 prev_bnr = 0;
1149 bool preserve1; /* need to preserve the first meta pointer? */
1150
1151 if (!newsize)
b3b94faa 1152 lblock = 0;
18ec7d5c 1153 else
d552a2b9 1154 lblock = (newsize - 1) >> sdp->sd_sb.sb_bsize_shift;
b3b94faa 1155
d552a2b9 1156 memset(&mp, 0, sizeof(mp));
9b8c81d1 1157 find_metapath(sdp, lblock, &mp, ip->i_height);
b3b94faa 1158
d552a2b9
BP
1159 memcpy(&nbof, &mp.mp_list, sizeof(nbof));
1160
1161 ret = gfs2_meta_inode_buffer(ip, &dibh);
1162 if (ret)
1163 return ret;
b3b94faa 1164
d552a2b9
BP
1165 mp.mp_bh[0] = dibh;
1166 ret = lookup_metapath(ip, &mp);
1167 if (ret == ip->i_height)
1168 state = DEALLOC_MP_FULL; /* We have a complete metapath */
1169 else
1170 state = DEALLOC_FILL_MP; /* deal with partial metapath */
b3b94faa 1171
d552a2b9
BP
1172 ret = gfs2_rindex_update(sdp);
1173 if (ret)
1174 goto out_metapath;
1175
1176 ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1177 if (ret)
1178 goto out_metapath;
1179 gfs2_holder_mark_uninitialized(&rd_gh);
1180
1181 mp_h = strip_h;
1182
1183 while (state != DEALLOC_DONE) {
1184 switch (state) {
1185 /* Truncate a full metapath at the given strip height.
1186 * Note that strip_h == mp_h in order to be in this state. */
1187 case DEALLOC_MP_FULL:
1188 if (mp_h > 0) { /* issue read-ahead on metadata */
1189 __be64 *top;
1190
1191 bh = mp.mp_bh[mp_h - 1];
1192 if (bh->b_blocknr != last_ra) {
1193 last_ra = bh->b_blocknr;
1194 top = metaptr1(mp_h - 1, &mp);
1195 gfs2_metapath_ra(ip->i_gl, bh, top);
1196 }
1197 }
1198 /* If we're truncating to a non-zero size and the mp is
1199 at the beginning of file for the strip height, we
1200 need to preserve the first metadata pointer. */
1201 preserve1 = (newsize &&
1202 (mp.mp_list[mp_h] == nbof[mp_h]));
1203 bh = mp.mp_bh[mp_h];
1204 gfs2_assert_withdraw(sdp, bh);
1205 if (gfs2_assert_withdraw(sdp,
1206 prev_bnr != bh->b_blocknr)) {
1207 printk(KERN_EMERG "GFS2: fsid=%s:inode %llu, "
1208 "block:%llu, i_h:%u, s_h:%u, mp_h:%u\n",
1209 sdp->sd_fsname,
1210 (unsigned long long)ip->i_no_addr,
1211 prev_bnr, ip->i_height, strip_h, mp_h);
1212 }
1213 prev_bnr = bh->b_blocknr;
1214 ret = sweep_bh_for_rgrps(ip, &rd_gh, &mp, &btotal,
1215 mp_h, preserve1);
1216 /* If we hit an error or just swept dinode buffer,
1217 just exit. */
1218 if (ret || !mp_h) {
1219 state = DEALLOC_DONE;
1220 break;
1221 }
1222 state = DEALLOC_MP_LOWER;
1223 break;
1224
1225 /* lower the metapath strip height */
1226 case DEALLOC_MP_LOWER:
1227 /* We're done with the current buffer, so release it,
1228 unless it's the dinode buffer. Then back up to the
1229 previous pointer. */
1230 if (mp_h) {
1231 brelse(mp.mp_bh[mp_h]);
1232 mp.mp_bh[mp_h] = NULL;
1233 }
1234 /* If we can't get any lower in height, we've stripped
1235 off all we can. Next step is to back up and start
1236 stripping the previous level of metadata. */
1237 if (mp_h == 0) {
1238 strip_h--;
1239 memcpy(&mp.mp_list, &nbof, sizeof(nbof));
1240 mp_h = strip_h;
1241 state = DEALLOC_FILL_MP;
1242 break;
1243 }
1244 mp.mp_list[mp_h] = 0;
1245 mp_h--; /* search one metadata height down */
1246 if (mp.mp_list[mp_h] >= hptrs(sdp, mp_h) - 1)
1247 break; /* loop around in the same state */
1248 mp.mp_list[mp_h]++;
1249 /* Here we've found a part of the metapath that is not
1250 * allocated. We need to search at that height for the
1251 * next non-null pointer. */
1252 if (find_nonnull_ptr(sdp, &mp, mp_h)) {
1253 state = DEALLOC_FILL_MP;
1254 mp_h++;
1255 }
1256 /* No more non-null pointers at this height. Back up
1257 to the previous height and try again. */
1258 break; /* loop around in the same state */
1259
1260 /* Fill the metapath with buffers to the given height. */
1261 case DEALLOC_FILL_MP:
1262 /* Fill the buffers out to the current height. */
1263 ret = fillup_metapath(ip, &mp, mp_h);
1264 if (ret < 0)
1265 goto out;
1266
1267 /* If buffers found for the entire strip height */
1268 if ((ret == ip->i_height) && (mp_h == strip_h)) {
1269 state = DEALLOC_MP_FULL;
1270 break;
1271 }
1272 if (ret < ip->i_height) /* We have a partial height */
1273 mp_h = ret - 1;
1274
1275 /* If we find a non-null block pointer, crawl a bit
1276 higher up in the metapath and try again, otherwise
1277 we need to look lower for a new starting point. */
1278 if (find_nonnull_ptr(sdp, &mp, mp_h))
1279 mp_h++;
1280 else
1281 state = DEALLOC_MP_LOWER;
b3b94faa 1282 break;
d552a2b9 1283 }
b3b94faa
DT
1284 }
1285
d552a2b9
BP
1286 if (btotal) {
1287 if (current->journal_info == NULL) {
1288 ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1289 RES_QUOTA, 0);
1290 if (ret)
1291 goto out;
1292 down_write(&ip->i_rw_mutex);
1293 }
1294 gfs2_statfs_change(sdp, 0, +btotal, 0);
1295 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1296 ip->i_inode.i_gid);
b32c8c76 1297 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
d552a2b9
BP
1298 gfs2_trans_add_meta(ip->i_gl, dibh);
1299 gfs2_dinode_out(ip, dibh->b_data);
1300 up_write(&ip->i_rw_mutex);
1301 gfs2_trans_end(sdp);
1302 }
b3b94faa 1303
d552a2b9
BP
1304out:
1305 if (gfs2_holder_initialized(&rd_gh))
1306 gfs2_glock_dq_uninit(&rd_gh);
1307 if (current->journal_info) {
1308 up_write(&ip->i_rw_mutex);
1309 gfs2_trans_end(sdp);
1310 cond_resched();
1311 }
1312 gfs2_quota_unhold(ip);
1313out_metapath:
1314 release_metapath(&mp);
1315 return ret;
b3b94faa
DT
1316}
1317
1318static int trunc_end(struct gfs2_inode *ip)
1319{
feaa7bba 1320 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
1321 struct buffer_head *dibh;
1322 int error;
1323
1324 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1325 if (error)
1326 return error;
1327
1328 down_write(&ip->i_rw_mutex);
1329
1330 error = gfs2_meta_inode_buffer(ip, &dibh);
1331 if (error)
1332 goto out;
1333
a2e0f799 1334 if (!i_size_read(&ip->i_inode)) {
ecc30c79 1335 ip->i_height = 0;
ce276b06 1336 ip->i_goal = ip->i_no_addr;
b3b94faa 1337 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
45138990 1338 gfs2_ordered_del_inode(ip);
b3b94faa 1339 }
078cd827 1340 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
383f01fb 1341 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
b3b94faa 1342
350a9b0a 1343 gfs2_trans_add_meta(ip->i_gl, dibh);
539e5d6b 1344 gfs2_dinode_out(ip, dibh->b_data);
b3b94faa
DT
1345 brelse(dibh);
1346
a91ea69f 1347out:
b3b94faa 1348 up_write(&ip->i_rw_mutex);
b3b94faa 1349 gfs2_trans_end(sdp);
b3b94faa
DT
1350 return error;
1351}
1352
1353/**
1354 * do_shrink - make a file smaller
ff8f33c8
SW
1355 * @inode: the inode
1356 * @oldsize: the current inode size
1357 * @newsize: the size to make the file
b3b94faa 1358 *
ff8f33c8
SW
1359 * Called with an exclusive lock on @inode. The @size must
1360 * be equal to or smaller than the current inode size.
b3b94faa
DT
1361 *
1362 * Returns: errno
1363 */
1364
ff8f33c8 1365static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
b3b94faa 1366{
ff8f33c8 1367 struct gfs2_inode *ip = GFS2_I(inode);
b3b94faa
DT
1368 int error;
1369
ff8f33c8 1370 error = trunc_start(inode, oldsize, newsize);
b3b94faa
DT
1371 if (error < 0)
1372 return error;
ff8f33c8 1373 if (gfs2_is_stuffed(ip))
b3b94faa
DT
1374 return 0;
1375
ff8f33c8
SW
1376 error = trunc_dealloc(ip, newsize);
1377 if (error == 0)
b3b94faa
DT
1378 error = trunc_end(ip);
1379
1380 return error;
1381}
1382
ff8f33c8 1383void gfs2_trim_blocks(struct inode *inode)
a13b8c5f 1384{
ff8f33c8
SW
1385 u64 size = inode->i_size;
1386 int ret;
1387
1388 ret = do_shrink(inode, size, size);
1389 WARN_ON(ret != 0);
1390}
1391
1392/**
1393 * do_grow - Touch and update inode size
1394 * @inode: The inode
1395 * @size: The new size
1396 *
1397 * This function updates the timestamps on the inode and
1398 * may also increase the size of the inode. This function
1399 * must not be called with @size any smaller than the current
1400 * inode size.
1401 *
1402 * Although it is not strictly required to unstuff files here,
1403 * earlier versions of GFS2 have a bug in the stuffed file reading
1404 * code which will result in a buffer overrun if the size is larger
1405 * than the max stuffed file size. In order to prevent this from
25985edc 1406 * occurring, such files are unstuffed, but in other cases we can
ff8f33c8
SW
1407 * just update the inode size directly.
1408 *
1409 * Returns: 0 on success, or -ve on error
1410 */
1411
1412static int do_grow(struct inode *inode, u64 size)
1413{
1414 struct gfs2_inode *ip = GFS2_I(inode);
1415 struct gfs2_sbd *sdp = GFS2_SB(inode);
7b9cff46 1416 struct gfs2_alloc_parms ap = { .target = 1, };
a13b8c5f
WC
1417 struct buffer_head *dibh;
1418 int error;
2f7ee358 1419 int unstuff = 0;
a13b8c5f 1420
ff8f33c8
SW
1421 if (gfs2_is_stuffed(ip) &&
1422 (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
b8fbf471 1423 error = gfs2_quota_lock_check(ip, &ap);
ff8f33c8 1424 if (error)
5407e242 1425 return error;
ff8f33c8 1426
7b9cff46 1427 error = gfs2_inplace_reserve(ip, &ap);
ff8f33c8
SW
1428 if (error)
1429 goto do_grow_qunlock;
2f7ee358 1430 unstuff = 1;
ff8f33c8
SW
1431 }
1432
a01aedfe
BP
1433 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
1434 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
1435 0 : RES_QUOTA), 0);
a13b8c5f 1436 if (error)
ff8f33c8 1437 goto do_grow_release;
a13b8c5f 1438
2f7ee358 1439 if (unstuff) {
ff8f33c8
SW
1440 error = gfs2_unstuff_dinode(ip, NULL);
1441 if (error)
1442 goto do_end_trans;
1443 }
a13b8c5f
WC
1444
1445 error = gfs2_meta_inode_buffer(ip, &dibh);
1446 if (error)
ff8f33c8 1447 goto do_end_trans;
a13b8c5f 1448
ff8f33c8 1449 i_size_write(inode, size);
078cd827 1450 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
350a9b0a 1451 gfs2_trans_add_meta(ip->i_gl, dibh);
a13b8c5f
WC
1452 gfs2_dinode_out(ip, dibh->b_data);
1453 brelse(dibh);
1454
ff8f33c8 1455do_end_trans:
a13b8c5f 1456 gfs2_trans_end(sdp);
ff8f33c8 1457do_grow_release:
2f7ee358 1458 if (unstuff) {
ff8f33c8
SW
1459 gfs2_inplace_release(ip);
1460do_grow_qunlock:
1461 gfs2_quota_unlock(ip);
ff8f33c8 1462 }
a13b8c5f
WC
1463 return error;
1464}
1465
b3b94faa 1466/**
ff8f33c8
SW
1467 * gfs2_setattr_size - make a file a given size
1468 * @inode: the inode
1469 * @newsize: the size to make the file
b3b94faa 1470 *
ff8f33c8
SW
1471 * The file size can grow, shrink, or stay the same size. This
1472 * is called holding i_mutex and an exclusive glock on the inode
1473 * in question.
b3b94faa
DT
1474 *
1475 * Returns: errno
1476 */
1477
ff8f33c8 1478int gfs2_setattr_size(struct inode *inode, u64 newsize)
b3b94faa 1479{
af5c2697 1480 struct gfs2_inode *ip = GFS2_I(inode);
ff8f33c8
SW
1481 int ret;
1482 u64 oldsize;
b3b94faa 1483
ff8f33c8 1484 BUG_ON(!S_ISREG(inode->i_mode));
b3b94faa 1485
ff8f33c8
SW
1486 ret = inode_newsize_ok(inode, newsize);
1487 if (ret)
1488 return ret;
b3b94faa 1489
562c72aa
CH
1490 inode_dio_wait(inode);
1491
b54e9a0b 1492 ret = gfs2_rsqa_alloc(ip);
d2b47cfb 1493 if (ret)
2b3dcf35 1494 goto out;
d2b47cfb 1495
ff8f33c8 1496 oldsize = inode->i_size;
2b3dcf35
BP
1497 if (newsize >= oldsize) {
1498 ret = do_grow(inode, newsize);
1499 goto out;
1500 }
ff8f33c8 1501
2b3dcf35
BP
1502 ret = do_shrink(inode, oldsize, newsize);
1503out:
a097dc7e 1504 gfs2_rsqa_delete(ip, NULL);
2b3dcf35 1505 return ret;
b3b94faa
DT
1506}
1507
1508int gfs2_truncatei_resume(struct gfs2_inode *ip)
1509{
1510 int error;
a2e0f799 1511 error = trunc_dealloc(ip, i_size_read(&ip->i_inode));
b3b94faa
DT
1512 if (!error)
1513 error = trunc_end(ip);
1514 return error;
1515}
1516
1517int gfs2_file_dealloc(struct gfs2_inode *ip)
1518{
1519 return trunc_dealloc(ip, 0);
1520}
1521
b50f227b
SW
1522/**
1523 * gfs2_free_journal_extents - Free cached journal bmap info
1524 * @jd: The journal
1525 *
1526 */
1527
1528void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
1529{
1530 struct gfs2_journal_extent *jext;
1531
1532 while(!list_empty(&jd->extent_list)) {
1533 jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
1534 list_del(&jext->list);
1535 kfree(jext);
1536 }
1537}
1538
1539/**
1540 * gfs2_add_jextent - Add or merge a new extent to extent cache
1541 * @jd: The journal descriptor
1542 * @lblock: The logical block at start of new extent
c62baf65 1543 * @dblock: The physical block at start of new extent
b50f227b
SW
1544 * @blocks: Size of extent in fs blocks
1545 *
1546 * Returns: 0 on success or -ENOMEM
1547 */
1548
1549static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
1550{
1551 struct gfs2_journal_extent *jext;
1552
1553 if (!list_empty(&jd->extent_list)) {
1554 jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
1555 if ((jext->dblock + jext->blocks) == dblock) {
1556 jext->blocks += blocks;
1557 return 0;
1558 }
1559 }
1560
1561 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
1562 if (jext == NULL)
1563 return -ENOMEM;
1564 jext->dblock = dblock;
1565 jext->lblock = lblock;
1566 jext->blocks = blocks;
1567 list_add_tail(&jext->list, &jd->extent_list);
1568 jd->nr_extents++;
1569 return 0;
1570}
1571
1572/**
1573 * gfs2_map_journal_extents - Cache journal bmap info
1574 * @sdp: The super block
1575 * @jd: The journal to map
1576 *
1577 * Create a reusable "extent" mapping from all logical
1578 * blocks to all physical blocks for the given journal. This will save
1579 * us time when writing journal blocks. Most journals will have only one
1580 * extent that maps all their logical blocks. That's because gfs2.mkfs
1581 * arranges the journal blocks sequentially to maximize performance.
1582 * So the extent would map the first block for the entire file length.
1583 * However, gfs2_jadd can happen while file activity is happening, so
1584 * those journals may not be sequential. Less likely is the case where
1585 * the users created their own journals by mounting the metafs and
1586 * laying it out. But it's still possible. These journals might have
1587 * several extents.
1588 *
1589 * Returns: 0 on success, or error on failure
1590 */
1591
1592int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
1593{
1594 u64 lblock = 0;
1595 u64 lblock_stop;
1596 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1597 struct buffer_head bh;
1598 unsigned int shift = sdp->sd_sb.sb_bsize_shift;
1599 u64 size;
1600 int rc;
1601
1602 lblock_stop = i_size_read(jd->jd_inode) >> shift;
1603 size = (lblock_stop - lblock) << shift;
1604 jd->nr_extents = 0;
1605 WARN_ON(!list_empty(&jd->extent_list));
1606
1607 do {
1608 bh.b_state = 0;
1609 bh.b_blocknr = 0;
1610 bh.b_size = size;
1611 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
1612 if (rc || !buffer_mapped(&bh))
1613 goto fail;
1614 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
1615 if (rc)
1616 goto fail;
1617 size -= bh.b_size;
1618 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1619 } while(size > 0);
1620
1621 fs_info(sdp, "journal %d mapped with %u extents\n", jd->jd_jid,
1622 jd->nr_extents);
1623 return 0;
1624
1625fail:
1626 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
1627 rc, jd->jd_jid,
1628 (unsigned long long)(i_size_read(jd->jd_inode) - size),
1629 jd->nr_extents);
1630 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
1631 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
1632 bh.b_state, (unsigned long long)bh.b_size);
1633 gfs2_free_journal_extents(jd);
1634 return rc;
1635}
1636
b3b94faa
DT
1637/**
1638 * gfs2_write_alloc_required - figure out if a write will require an allocation
1639 * @ip: the file being written to
1640 * @offset: the offset to write to
1641 * @len: the number of bytes being written
b3b94faa 1642 *
461cb419 1643 * Returns: 1 if an alloc is required, 0 otherwise
b3b94faa
DT
1644 */
1645
cd915493 1646int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
461cb419 1647 unsigned int len)
b3b94faa 1648{
feaa7bba 1649 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
941e6d7d
SW
1650 struct buffer_head bh;
1651 unsigned int shift;
1652 u64 lblock, lblock_stop, size;
7ed122e4 1653 u64 end_of_file;
b3b94faa 1654
b3b94faa
DT
1655 if (!len)
1656 return 0;
1657
1658 if (gfs2_is_stuffed(ip)) {
1659 if (offset + len >
1660 sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
461cb419 1661 return 1;
b3b94faa
DT
1662 return 0;
1663 }
1664
941e6d7d 1665 shift = sdp->sd_sb.sb_bsize_shift;
7ed122e4 1666 BUG_ON(gfs2_is_dir(ip));
a2e0f799 1667 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
7ed122e4
SW
1668 lblock = offset >> shift;
1669 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
1670 if (lblock_stop > end_of_file)
461cb419 1671 return 1;
b3b94faa 1672
941e6d7d
SW
1673 size = (lblock_stop - lblock) << shift;
1674 do {
1675 bh.b_state = 0;
1676 bh.b_size = size;
1677 gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
1678 if (!buffer_mapped(&bh))
461cb419 1679 return 1;
941e6d7d
SW
1680 size -= bh.b_size;
1681 lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1682 } while(size > 0);
b3b94faa
DT
1683
1684 return 0;
1685}
1686