Btrfs: remove obsolete btrfs_next_leaf call from __resolve_indirect_ref
[linux-2.6-block.git] / fs / jffs2 / nodemgmt.c
CommitLineData
1da177e4
LT
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
c00c310e 4 * Copyright © 2001-2007 Red Hat, Inc.
1da177e4
LT
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
1da177e4
LT
10 */
11
5a528957
JP
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
1da177e4 14#include <linux/kernel.h>
1da177e4
LT
15#include <linux/mtd/mtd.h>
16#include <linux/compiler.h>
17#include <linux/sched.h> /* For cond_resched() */
18#include "nodelist.h"
e631ddba 19#include "debug.h"
1da177e4
LT
20
21/**
22 * jffs2_reserve_space - request physical space to write nodes to flash
23 * @c: superblock info
24 * @minsize: Minimum acceptable size of allocation
1da177e4
LT
25 * @len: Returned value of allocation length
26 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
27 *
28 * Requests a block of physical space on the flash. Returns zero for success
9fe4854c
DW
29 * and puts 'len' into the appropriate place, or returns -ENOSPC or other
30 * error if appropriate. Doesn't return len since that's
1da177e4
LT
31 *
32 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
33 * allocation semaphore, to prevent more than one allocation from being
34 * active at any time. The semaphore is later released by jffs2_commit_allocation()
35 *
36 * jffs2_reserve_space() may trigger garbage collection in order to make room
37 * for the requested allocation.
38 */
39
e631ddba 40static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
9fe4854c 41 uint32_t *len, uint32_t sumsize);
1da177e4 42
9fe4854c 43int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
e631ddba 44 uint32_t *len, int prio, uint32_t sumsize)
1da177e4
LT
45{
46 int ret = -EAGAIN;
47 int blocksneeded = c->resv_blocks_write;
48 /* align it */
49 minsize = PAD(minsize);
50
9c261b33 51 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
ced22070 52 mutex_lock(&c->alloc_sem);
1da177e4 53
9c261b33 54 jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
1da177e4
LT
55
56 spin_lock(&c->erase_completion_lock);
57
58 /* this needs a little more thought (true <tglx> :)) */
59 while(ret == -EAGAIN) {
60 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
1da177e4
LT
61 uint32_t dirty, avail;
62
63 /* calculate real dirty size
64 * dirty_size contains blocks on erase_pending_list
65 * those blocks are counted in c->nr_erasing_blocks.
66 * If one block is actually erased, it is not longer counted as dirty_space
67 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68 * with c->nr_erasing_blocks * c->sector_size again.
69 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70 * This helps us to force gc and pick eventually a clean block to spread the load.
71 * We add unchecked_size here, as we hopefully will find some space to use.
72 * This will affect the sum only once, as gc first finishes checking
73 * of nodes.
74 */
75 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76 if (dirty < c->nospc_dirty_size) {
77 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
9c261b33
JP
78 jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
79 __func__);
1da177e4
LT
80 break;
81 }
9c261b33
JP
82 jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
83 dirty, c->unchecked_size,
84 c->sector_size);
1da177e4
LT
85
86 spin_unlock(&c->erase_completion_lock);
ced22070 87 mutex_unlock(&c->alloc_sem);
1da177e4
LT
88 return -ENOSPC;
89 }
182ec4ee 90
1da177e4
LT
91 /* Calc possibly available space. Possibly available means that we
92 * don't know, if unchecked size contains obsoleted nodes, which could give us some
93 * more usable space. This will affect the sum only once, as gc first finishes checking
94 * of nodes.
182ec4ee 95 + Return -ENOSPC, if the maximum possibly available space is less or equal than
1da177e4
LT
96 * blocksneeded * sector_size.
97 * This blocks endless gc looping on a filesystem, which is nearly full, even if
98 * the check above passes.
99 */
100 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
101 if ( (avail / c->sector_size) <= blocksneeded) {
102 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
9c261b33
JP
103 jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
104 __func__);
1da177e4
LT
105 break;
106 }
107
9c261b33
JP
108 jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
109 avail, blocksneeded * c->sector_size);
1da177e4 110 spin_unlock(&c->erase_completion_lock);
ced22070 111 mutex_unlock(&c->alloc_sem);
1da177e4
LT
112 return -ENOSPC;
113 }
114
ced22070 115 mutex_unlock(&c->alloc_sem);
1da177e4 116
9c261b33
JP
117 jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
118 c->nr_free_blocks, c->nr_erasing_blocks,
119 c->free_size, c->dirty_size, c->wasted_size,
120 c->used_size, c->erasing_size, c->bad_size,
121 c->free_size + c->dirty_size +
122 c->wasted_size + c->used_size +
123 c->erasing_size + c->bad_size,
124 c->flash_size);
1da177e4 125 spin_unlock(&c->erase_completion_lock);
182ec4ee 126
1da177e4 127 ret = jffs2_garbage_collect_pass(c);
422b1202 128
0717bf84
DW
129 if (ret == -EAGAIN) {
130 spin_lock(&c->erase_completion_lock);
131 if (c->nr_erasing_blocks &&
132 list_empty(&c->erase_pending_list) &&
133 list_empty(&c->erase_complete_list)) {
134 DECLARE_WAITQUEUE(wait, current);
135 set_current_state(TASK_UNINTERRUPTIBLE);
136 add_wait_queue(&c->erase_wait, &wait);
9c261b33
JP
137 jffs2_dbg(1, "%s waiting for erase to complete\n",
138 __func__);
0717bf84
DW
139 spin_unlock(&c->erase_completion_lock);
140
141 schedule();
142 } else
143 spin_unlock(&c->erase_completion_lock);
144 } else if (ret)
1da177e4
LT
145 return ret;
146
147 cond_resched();
148
149 if (signal_pending(current))
150 return -EINTR;
151
ced22070 152 mutex_lock(&c->alloc_sem);
1da177e4
LT
153 spin_lock(&c->erase_completion_lock);
154 }
155
9fe4854c 156 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
1da177e4 157 if (ret) {
9c261b33 158 jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
1da177e4
LT
159 }
160 }
161 spin_unlock(&c->erase_completion_lock);
2f785402 162 if (!ret)
046b8b98 163 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
1da177e4 164 if (ret)
ced22070 165 mutex_unlock(&c->alloc_sem);
1da177e4
LT
166 return ret;
167}
168
9fe4854c
DW
169int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
170 uint32_t *len, uint32_t sumsize)
1da177e4
LT
171{
172 int ret = -EAGAIN;
173 minsize = PAD(minsize);
174
9c261b33 175 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
1da177e4
LT
176
177 spin_lock(&c->erase_completion_lock);
178 while(ret == -EAGAIN) {
9fe4854c 179 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
1da177e4 180 if (ret) {
9c261b33
JP
181 jffs2_dbg(1, "%s(): looping, ret is %d\n",
182 __func__, ret);
1da177e4
LT
183 }
184 }
185 spin_unlock(&c->erase_completion_lock);
2f785402 186 if (!ret)
046b8b98 187 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
2f785402 188
1da177e4
LT
189 return ret;
190}
191
e631ddba
FH
192
193/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
194
195static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1da177e4 196{
e631ddba 197
99c2594f 198 if (c->nextblock == NULL) {
9c261b33
JP
199 jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
200 __func__, jeb->offset);
99c2594f
AH
201 return;
202 }
e631ddba
FH
203 /* Check, if we have a dirty block now, or if it was dirty already */
204 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
205 c->dirty_size += jeb->wasted_size;
206 c->wasted_size -= jeb->wasted_size;
207 jeb->dirty_size += jeb->wasted_size;
208 jeb->wasted_size = 0;
209 if (VERYDIRTY(c, jeb->dirty_size)) {
9c261b33
JP
210 jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
211 jeb->offset, jeb->free_size, jeb->dirty_size,
212 jeb->used_size);
e631ddba
FH
213 list_add_tail(&jeb->list, &c->very_dirty_list);
214 } else {
9c261b33
JP
215 jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
216 jeb->offset, jeb->free_size, jeb->dirty_size,
217 jeb->used_size);
e631ddba
FH
218 list_add_tail(&jeb->list, &c->dirty_list);
219 }
182ec4ee 220 } else {
9c261b33
JP
221 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
222 jeb->offset, jeb->free_size, jeb->dirty_size,
223 jeb->used_size);
e631ddba
FH
224 list_add_tail(&jeb->list, &c->clean_list);
225 }
226 c->nextblock = NULL;
227
228}
229
230/* Select a new jeb for nextblock */
231
232static int jffs2_find_nextblock(struct jffs2_sb_info *c)
233{
234 struct list_head *next;
182ec4ee 235
e631ddba
FH
236 /* Take the next block off the 'free' list */
237
238 if (list_empty(&c->free_list)) {
239
240 if (!c->nr_erasing_blocks &&
241 !list_empty(&c->erasable_list)) {
242 struct jffs2_eraseblock *ejeb;
243
244 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
f116629d 245 list_move_tail(&ejeb->list, &c->erase_pending_list);
e631ddba 246 c->nr_erasing_blocks++;
ae3b6ba0 247 jffs2_garbage_collect_trigger(c);
9c261b33
JP
248 jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
249 __func__, ejeb->offset);
e631ddba
FH
250 }
251
252 if (!c->nr_erasing_blocks &&
253 !list_empty(&c->erasable_pending_wbuf_list)) {
9c261b33
JP
254 jffs2_dbg(1, "%s(): Flushing write buffer\n",
255 __func__);
e631ddba 256 /* c->nextblock is NULL, no update to c->nextblock allowed */
1da177e4 257 spin_unlock(&c->erase_completion_lock);
1da177e4
LT
258 jffs2_flush_wbuf_pad(c);
259 spin_lock(&c->erase_completion_lock);
e631ddba
FH
260 /* Have another go. It'll be on the erasable_list now */
261 return -EAGAIN;
1da177e4 262 }
e631ddba
FH
263
264 if (!c->nr_erasing_blocks) {
265 /* Ouch. We're in GC, or we wouldn't have got here.
266 And there's no space left. At all. */
da320f05
JP
267 pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
268 c->nr_erasing_blocks, c->nr_free_blocks,
269 list_empty(&c->erasable_list) ? "yes" : "no",
270 list_empty(&c->erasing_list) ? "yes" : "no",
271 list_empty(&c->erase_pending_list) ? "yes" : "no");
e631ddba 272 return -ENOSPC;
1da177e4 273 }
e631ddba
FH
274
275 spin_unlock(&c->erase_completion_lock);
276 /* Don't wait for it; just erase one right now */
277 jffs2_erase_pending_blocks(c, 1);
278 spin_lock(&c->erase_completion_lock);
279
280 /* An erase may have failed, decreasing the
281 amount of free space available. So we must
282 restart from the beginning */
283 return -EAGAIN;
1da177e4 284 }
e631ddba
FH
285
286 next = c->free_list.next;
287 list_del(next);
288 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
289 c->nr_free_blocks--;
182ec4ee 290
e631ddba
FH
291 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
292
f04de505 293#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
5bf17237
AB
294 /* adjust write buffer offset, else we get a non contiguous write bug */
295 if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
296 c->wbuf_ofs = 0xffffffff;
f04de505 297#endif
5bf17237 298
9c261b33
JP
299 jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
300 __func__, c->nextblock->offset);
e631ddba
FH
301
302 return 0;
303}
304
305/* Called with alloc sem _and_ erase_completion_lock */
9fe4854c
DW
306static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
307 uint32_t *len, uint32_t sumsize)
e631ddba
FH
308{
309 struct jffs2_eraseblock *jeb = c->nextblock;
9fe4854c 310 uint32_t reserved_size; /* for summary information at the end of the jeb */
e631ddba
FH
311 int ret;
312
313 restart:
314 reserved_size = 0;
315
316 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
317 /* NOSUM_SIZE means not to generate summary */
318
319 if (jeb) {
320 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
733802d9 321 dbg_summary("minsize=%d , jeb->free=%d ,"
e631ddba
FH
322 "summary->size=%d , sumsize=%d\n",
323 minsize, jeb->free_size,
324 c->summary->sum_size, sumsize);
325 }
326
327 /* Is there enough space for writing out the current node, or we have to
328 write out summary information now, close this jeb and select new nextblock? */
329 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
330 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
331
332 /* Has summary been disabled for this jeb? */
333 if (jffs2_sum_is_disabled(c->summary)) {
334 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
335 goto restart;
1da177e4
LT
336 }
337
e631ddba 338 /* Writing out the collected summary information */
733802d9 339 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
e631ddba
FH
340 ret = jffs2_sum_write_sumnode(c);
341
342 if (ret)
343 return ret;
344
345 if (jffs2_sum_is_disabled(c->summary)) {
346 /* jffs2_write_sumnode() couldn't write out the summary information
347 diabling summary for this jeb and free the collected information
348 */
349 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
350 goto restart;
351 }
352
353 jffs2_close_nextblock(c, jeb);
354 jeb = NULL;
34c0e906
FH
355 /* keep always valid value in reserved_size */
356 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
e631ddba
FH
357 }
358 } else {
359 if (jeb && minsize > jeb->free_size) {
fc6612f6
DW
360 uint32_t waste;
361
e631ddba
FH
362 /* Skip the end of this block and file it as having some dirty space */
363 /* If there's a pending write to it, flush now */
364
365 if (jffs2_wbuf_dirty(c)) {
1da177e4 366 spin_unlock(&c->erase_completion_lock);
9c261b33
JP
367 jffs2_dbg(1, "%s(): Flushing write buffer\n",
368 __func__);
1da177e4
LT
369 jffs2_flush_wbuf_pad(c);
370 spin_lock(&c->erase_completion_lock);
e631ddba
FH
371 jeb = c->nextblock;
372 goto restart;
1da177e4
LT
373 }
374
fc6612f6
DW
375 spin_unlock(&c->erase_completion_lock);
376
377 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
378 if (ret)
379 return ret;
380 /* Just lock it again and continue. Nothing much can change because
381 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
382 we hold c->erase_completion_lock in the majority of this function...
383 but that's a question for another (more caffeine-rich) day. */
384 spin_lock(&c->erase_completion_lock);
385
386 waste = jeb->free_size;
387 jffs2_link_node_ref(c, jeb,
388 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
389 waste, NULL);
390 /* FIXME: that made it count as dirty. Convert to wasted */
391 jeb->dirty_size -= waste;
392 c->dirty_size -= waste;
393 jeb->wasted_size += waste;
394 c->wasted_size += waste;
1da177e4 395
e631ddba
FH
396 jffs2_close_nextblock(c, jeb);
397 jeb = NULL;
1da177e4 398 }
e631ddba
FH
399 }
400
401 if (!jeb) {
402
403 ret = jffs2_find_nextblock(c);
404 if (ret)
405 return ret;
1da177e4 406
e631ddba 407 jeb = c->nextblock;
1da177e4
LT
408
409 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
da320f05
JP
410 pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
411 jeb->offset, jeb->free_size);
1da177e4
LT
412 goto restart;
413 }
414 }
415 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
416 enough space */
e631ddba 417 *len = jeb->free_size - reserved_size;
1da177e4
LT
418
419 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
420 !jeb->first_node->next_in_ino) {
182ec4ee 421 /* Only node in it beforehand was a CLEANMARKER node (we think).
1da177e4 422 So mark it obsolete now that there's going to be another node
182ec4ee 423 in the block. This will reduce used_size to zero but We've
1da177e4
LT
424 already set c->nextblock so that jffs2_mark_node_obsolete()
425 won't try to refile it to the dirty_list.
426 */
427 spin_unlock(&c->erase_completion_lock);
428 jffs2_mark_node_obsolete(c, jeb->first_node);
429 spin_lock(&c->erase_completion_lock);
430 }
431
9c261b33
JP
432 jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
433 __func__,
434 *len, jeb->offset + (c->sector_size - jeb->free_size));
1da177e4
LT
435 return 0;
436}
437
438/**
439 * jffs2_add_physical_node_ref - add a physical node reference to the list
440 * @c: superblock info
441 * @new: new node reference to add
442 * @len: length of this physical node
1da177e4 443 *
182ec4ee 444 * Should only be used to report nodes for which space has been allocated
1da177e4
LT
445 * by jffs2_reserve_space.
446 *
447 * Must be called with the alloc_sem held.
448 */
182ec4ee 449
2f785402
DW
450struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
451 uint32_t ofs, uint32_t len,
452 struct jffs2_inode_cache *ic)
1da177e4
LT
453{
454 struct jffs2_eraseblock *jeb;
2f785402 455 struct jffs2_raw_node_ref *new;
1da177e4 456
2f785402 457 jeb = &c->blocks[ofs / c->sector_size];
1da177e4 458
9c261b33
JP
459 jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
460 __func__, ofs & ~3, ofs & 3, len);
1da177e4 461#if 1
2f785402
DW
462 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
463 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
464 even after refiling c->nextblock */
465 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
466 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
da320f05
JP
467 pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
468 ofs & ~3, ofs & 3);
66bfaeaa 469 if (c->nextblock)
da320f05 470 pr_warn("nextblock 0x%08x", c->nextblock->offset);
66bfaeaa 471 else
da320f05
JP
472 pr_warn("No nextblock");
473 pr_cont(", expected at %08x\n",
474 jeb->offset + (c->sector_size - jeb->free_size));
2f785402 475 return ERR_PTR(-EINVAL);
1da177e4
LT
476 }
477#endif
478 spin_lock(&c->erase_completion_lock);
479
2f785402 480 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
1da177e4 481
9b88f473 482 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
1da177e4 483 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
9c261b33
JP
484 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
485 jeb->offset, jeb->free_size, jeb->dirty_size,
486 jeb->used_size);
1da177e4
LT
487 if (jffs2_wbuf_dirty(c)) {
488 /* Flush the last write in the block if it's outstanding */
489 spin_unlock(&c->erase_completion_lock);
490 jffs2_flush_wbuf_pad(c);
491 spin_lock(&c->erase_completion_lock);
492 }
493
494 list_add_tail(&jeb->list, &c->clean_list);
495 c->nextblock = NULL;
496 }
e0c8e42f
AB
497 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
498 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4
LT
499
500 spin_unlock(&c->erase_completion_lock);
501
2f785402 502 return new;
1da177e4
LT
503}
504
505
506void jffs2_complete_reservation(struct jffs2_sb_info *c)
507{
9c261b33 508 jffs2_dbg(1, "jffs2_complete_reservation()\n");
acb64a43 509 spin_lock(&c->erase_completion_lock);
1da177e4 510 jffs2_garbage_collect_trigger(c);
acb64a43 511 spin_unlock(&c->erase_completion_lock);
ced22070 512 mutex_unlock(&c->alloc_sem);
1da177e4
LT
513}
514
515static inline int on_list(struct list_head *obj, struct list_head *head)
516{
517 struct list_head *this;
518
519 list_for_each(this, head) {
520 if (this == obj) {
9c261b33 521 jffs2_dbg(1, "%p is on list at %p\n", obj, head);
1da177e4
LT
522 return 1;
523
524 }
525 }
526 return 0;
527}
528
529void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
530{
531 struct jffs2_eraseblock *jeb;
532 int blocknr;
533 struct jffs2_unknown_node n;
534 int ret, addedsize;
535 size_t retlen;
1417fc44 536 uint32_t freed_len;
1da177e4 537
9bfeb691 538 if(unlikely(!ref)) {
da320f05 539 pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
1da177e4
LT
540 return;
541 }
542 if (ref_obsolete(ref)) {
9c261b33
JP
543 jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
544 __func__, ref_offset(ref));
1da177e4
LT
545 return;
546 }
547 blocknr = ref->flash_offset / c->sector_size;
548 if (blocknr >= c->nr_blocks) {
da320f05
JP
549 pr_notice("raw node at 0x%08x is off the end of device!\n",
550 ref->flash_offset);
1da177e4
LT
551 BUG();
552 }
553 jeb = &c->blocks[blocknr];
554
555 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
31fbdf7a 556 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
182ec4ee
TG
557 /* Hm. This may confuse static lock analysis. If any of the above
558 three conditions is false, we're going to return from this
1da177e4
LT
559 function without actually obliterating any nodes or freeing
560 any jffs2_raw_node_refs. So we don't need to stop erases from
561 happening, or protect against people holding an obsolete
562 jffs2_raw_node_ref without the erase_completion_lock. */
ced22070 563 mutex_lock(&c->erase_free_sem);
1da177e4
LT
564 }
565
566 spin_lock(&c->erase_completion_lock);
567
1417fc44
DW
568 freed_len = ref_totlen(c, jeb, ref);
569
1da177e4 570 if (ref_flags(ref) == REF_UNCHECKED) {
1417fc44 571 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
da320f05
JP
572 pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
573 freed_len, blocknr,
574 ref->flash_offset, jeb->used_size);
1da177e4
LT
575 BUG();
576 })
9c261b33
JP
577 jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
578 ref_offset(ref), freed_len);
1417fc44
DW
579 jeb->unchecked_size -= freed_len;
580 c->unchecked_size -= freed_len;
1da177e4 581 } else {
1417fc44 582 D1(if (unlikely(jeb->used_size < freed_len)) {
da320f05
JP
583 pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
584 freed_len, blocknr,
585 ref->flash_offset, jeb->used_size);
1da177e4
LT
586 BUG();
587 })
9c261b33
JP
588 jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
589 ref_offset(ref), freed_len);
1417fc44
DW
590 jeb->used_size -= freed_len;
591 c->used_size -= freed_len;
1da177e4
LT
592 }
593
594 // Take care, that wasted size is taken into concern
1417fc44 595 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
9c261b33 596 jffs2_dbg(1, "Dirtying\n");
1417fc44
DW
597 addedsize = freed_len;
598 jeb->dirty_size += freed_len;
599 c->dirty_size += freed_len;
1da177e4
LT
600
601 /* Convert wasted space to dirty, if not a bad block */
602 if (jeb->wasted_size) {
603 if (on_list(&jeb->list, &c->bad_used_list)) {
9c261b33
JP
604 jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
605 jeb->offset);
1da177e4
LT
606 addedsize = 0; /* To fool the refiling code later */
607 } else {
9c261b33
JP
608 jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
609 jeb->wasted_size, jeb->offset);
1da177e4
LT
610 addedsize += jeb->wasted_size;
611 jeb->dirty_size += jeb->wasted_size;
612 c->dirty_size += jeb->wasted_size;
613 c->wasted_size -= jeb->wasted_size;
614 jeb->wasted_size = 0;
615 }
616 }
617 } else {
9c261b33 618 jffs2_dbg(1, "Wasting\n");
1da177e4 619 addedsize = 0;
1417fc44
DW
620 jeb->wasted_size += freed_len;
621 c->wasted_size += freed_len;
1da177e4
LT
622 }
623 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
182ec4ee 624
e0c8e42f
AB
625 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
626 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4 627
31fbdf7a
AB
628 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
629 /* Flash scanning is in progress. Don't muck about with the block
1da177e4 630 lists because they're not ready yet, and don't actually
182ec4ee 631 obliterate nodes that look obsolete. If they weren't
1da177e4
LT
632 marked obsolete on the flash at the time they _became_
633 obsolete, there was probably a reason for that. */
634 spin_unlock(&c->erase_completion_lock);
635 /* We didn't lock the erase_free_sem */
636 return;
637 }
638
639 if (jeb == c->nextblock) {
9c261b33
JP
640 jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
641 jeb->offset);
1da177e4
LT
642 } else if (!jeb->used_size && !jeb->unchecked_size) {
643 if (jeb == c->gcblock) {
9c261b33
JP
644 jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
645 jeb->offset);
1da177e4
LT
646 c->gcblock = NULL;
647 } else {
9c261b33
JP
648 jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
649 jeb->offset);
1da177e4
LT
650 list_del(&jeb->list);
651 }
652 if (jffs2_wbuf_dirty(c)) {
9c261b33 653 jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
1da177e4
LT
654 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
655 } else {
656 if (jiffies & 127) {
657 /* Most of the time, we just erase it immediately. Otherwise we
658 spend ages scanning it on mount, etc. */
9c261b33 659 jffs2_dbg(1, "...and adding to erase_pending_list\n");
1da177e4
LT
660 list_add_tail(&jeb->list, &c->erase_pending_list);
661 c->nr_erasing_blocks++;
ae3b6ba0 662 jffs2_garbage_collect_trigger(c);
1da177e4
LT
663 } else {
664 /* Sometimes, however, we leave it elsewhere so it doesn't get
665 immediately reused, and we spread the load a bit. */
9c261b33 666 jffs2_dbg(1, "...and adding to erasable_list\n");
1da177e4 667 list_add_tail(&jeb->list, &c->erasable_list);
182ec4ee 668 }
1da177e4 669 }
9c261b33 670 jffs2_dbg(1, "Done OK\n");
1da177e4 671 } else if (jeb == c->gcblock) {
9c261b33
JP
672 jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
673 jeb->offset);
1da177e4 674 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
9c261b33
JP
675 jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
676 jeb->offset);
1da177e4 677 list_del(&jeb->list);
9c261b33 678 jffs2_dbg(1, "...and adding to dirty_list\n");
1da177e4
LT
679 list_add_tail(&jeb->list, &c->dirty_list);
680 } else if (VERYDIRTY(c, jeb->dirty_size) &&
681 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
9c261b33
JP
682 jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
683 jeb->offset);
1da177e4 684 list_del(&jeb->list);
9c261b33 685 jffs2_dbg(1, "...and adding to very_dirty_list\n");
1da177e4
LT
686 list_add_tail(&jeb->list, &c->very_dirty_list);
687 } else {
9c261b33
JP
688 jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
689 jeb->offset, jeb->free_size, jeb->dirty_size,
690 jeb->used_size);
182ec4ee 691 }
1da177e4
LT
692
693 spin_unlock(&c->erase_completion_lock);
694
31fbdf7a
AB
695 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
696 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
1da177e4
LT
697 /* We didn't lock the erase_free_sem */
698 return;
699 }
700
701 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
702 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
703 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
c38c1b61 704 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
1da177e4 705
9c261b33
JP
706 jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
707 ref_offset(ref));
1da177e4
LT
708 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
709 if (ret) {
da320f05
JP
710 pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
711 ref_offset(ref), ret);
1da177e4
LT
712 goto out_erase_sem;
713 }
714 if (retlen != sizeof(n)) {
da320f05
JP
715 pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
716 ref_offset(ref), retlen);
1da177e4
LT
717 goto out_erase_sem;
718 }
1417fc44 719 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
da320f05
JP
720 pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
721 je32_to_cpu(n.totlen), freed_len);
1da177e4
LT
722 goto out_erase_sem;
723 }
724 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
9c261b33
JP
725 jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
726 ref_offset(ref), je16_to_cpu(n.nodetype));
1da177e4
LT
727 goto out_erase_sem;
728 }
729 /* XXX FIXME: This is ugly now */
730 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
731 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
732 if (ret) {
da320f05
JP
733 pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
734 ref_offset(ref), ret);
1da177e4
LT
735 goto out_erase_sem;
736 }
737 if (retlen != sizeof(n)) {
da320f05
JP
738 pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
739 ref_offset(ref), retlen);
1da177e4
LT
740 goto out_erase_sem;
741 }
742
743 /* Nodes which have been marked obsolete no longer need to be
744 associated with any inode. Remove them from the per-inode list.
182ec4ee
TG
745
746 Note we can't do this for NAND at the moment because we need
1da177e4
LT
747 obsolete dirent nodes to stay on the lists, because of the
748 horridness in jffs2_garbage_collect_deletion_dirent(). Also
182ec4ee 749 because we delete the inocache, and on NAND we need that to
1da177e4
LT
750 stay around until all the nodes are actually erased, in order
751 to stop us from giving the same inode number to another newly
752 created inode. */
753 if (ref->next_in_ino) {
754 struct jffs2_inode_cache *ic;
755 struct jffs2_raw_node_ref **p;
756
757 spin_lock(&c->erase_completion_lock);
758
759 ic = jffs2_raw_ref_to_ic(ref);
760 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
761 ;
762
763 *p = ref->next_in_ino;
764 ref->next_in_ino = NULL;
765
c9f700f8
KK
766 switch (ic->class) {
767#ifdef CONFIG_JFFS2_FS_XATTR
768 case RAWNODE_CLASS_XATTR_DATUM:
769 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
770 break;
771 case RAWNODE_CLASS_XATTR_REF:
772 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
773 break;
774#endif
775 default:
27c72b04 776 if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
c9f700f8
KK
777 jffs2_del_ino_cache(c, ic);
778 break;
779 }
1da177e4
LT
780 spin_unlock(&c->erase_completion_lock);
781 }
782
1da177e4 783 out_erase_sem:
ced22070 784 mutex_unlock(&c->erase_free_sem);
1da177e4
LT
785}
786
1da177e4
LT
787int jffs2_thread_should_wake(struct jffs2_sb_info *c)
788{
789 int ret = 0;
790 uint32_t dirty;
8fb870df
DW
791 int nr_very_dirty = 0;
792 struct jffs2_eraseblock *jeb;
1da177e4 793
d6ce1710
JT
794 if (!list_empty(&c->erase_complete_list) ||
795 !list_empty(&c->erase_pending_list))
796 return 1;
797
1da177e4 798 if (c->unchecked_size) {
9c261b33
JP
799 jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
800 c->unchecked_size, c->checked_ino);
1da177e4
LT
801 return 1;
802 }
803
804 /* dirty_size contains blocks on erase_pending_list
805 * those blocks are counted in c->nr_erasing_blocks.
806 * If one block is actually erased, it is not longer counted as dirty_space
807 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
808 * with c->nr_erasing_blocks * c->sector_size again.
809 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
810 * This helps us to force gc and pick eventually a clean block to spread the load.
811 */
812 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
813
182ec4ee
TG
814 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
815 (dirty > c->nospc_dirty_size))
1da177e4
LT
816 ret = 1;
817
8fb870df
DW
818 list_for_each_entry(jeb, &c->very_dirty_list, list) {
819 nr_very_dirty++;
820 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
821 ret = 1;
a8c68f32
DW
822 /* In debug mode, actually go through and count them all */
823 D1(continue);
824 break;
8fb870df
DW
825 }
826 }
827
9c261b33
JP
828 jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
829 __func__, c->nr_free_blocks, c->nr_erasing_blocks,
830 c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
1da177e4
LT
831
832 return ret;
833}