xref: /nrf52832-nimble/rt-thread/components/dfs/filesystems/jffs2/src/nodemgmt.c (revision 104654410c56c573564690304ae786df310c91fc)
1*10465441SEvalZero /*
2*10465441SEvalZero  * JFFS2 -- Journalling Flash File System, Version 2.
3*10465441SEvalZero  *
4*10465441SEvalZero  * Copyright (C) 2001-2003 Red Hat, Inc.
5*10465441SEvalZero  *
6*10465441SEvalZero  * Created by David Woodhouse <[email protected]>
7*10465441SEvalZero  *
8*10465441SEvalZero  * For licensing information, see the file 'LICENCE' in this directory.
9*10465441SEvalZero  *
10*10465441SEvalZero  * $Id: nodemgmt.c,v 1.124 2005/07/20 15:32:28 dedekind Exp $
11*10465441SEvalZero  *
12*10465441SEvalZero  */
13*10465441SEvalZero 
14*10465441SEvalZero #include <linux/kernel.h>
15*10465441SEvalZero #include <linux/slab.h>
16*10465441SEvalZero #include <linux/mtd/mtd.h>
17*10465441SEvalZero #include <linux/compiler.h>
18*10465441SEvalZero #include <linux/sched.h> /* For cond_resched() */
19*10465441SEvalZero #include "nodelist.h"
20*10465441SEvalZero 
21*10465441SEvalZero /**
22*10465441SEvalZero  *	jffs2_reserve_space - request physical space to write nodes to flash
23*10465441SEvalZero  *	@c: superblock info
24*10465441SEvalZero  *	@minsize: Minimum acceptable size of allocation
25*10465441SEvalZero  *	@ofs: Returned value of node offset
26*10465441SEvalZero  *	@len: Returned value of allocation length
27*10465441SEvalZero  *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
28*10465441SEvalZero  *
29*10465441SEvalZero  *	Requests a block of physical space on the flash. Returns zero for success
30*10465441SEvalZero  *	and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
31*10465441SEvalZero  *	or other error if appropriate.
32*10465441SEvalZero  *
33*10465441SEvalZero  *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34*10465441SEvalZero  *	allocation semaphore, to prevent more than one allocation from being
35*10465441SEvalZero  *	active at any time. The semaphore is later released by jffs2_commit_allocation()
36*10465441SEvalZero  *
37*10465441SEvalZero  *	jffs2_reserve_space() may trigger garbage collection in order to make room
38*10465441SEvalZero  *	for the requested allocation.
39*10465441SEvalZero  */
40*10465441SEvalZero 
41*10465441SEvalZero static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len);
42*10465441SEvalZero 
jffs2_reserve_space(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * ofs,uint32_t * len,int prio)43*10465441SEvalZero int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
44*10465441SEvalZero {
45*10465441SEvalZero 	int ret = -EAGAIN;
46*10465441SEvalZero 	int blocksneeded = c->resv_blocks_write;
47*10465441SEvalZero 	/* align it */
48*10465441SEvalZero 	minsize = PAD(minsize);
49*10465441SEvalZero 
50*10465441SEvalZero 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51*10465441SEvalZero 	down(&c->alloc_sem);
52*10465441SEvalZero 
53*10465441SEvalZero 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
54*10465441SEvalZero 
55*10465441SEvalZero 	spin_lock(&c->erase_completion_lock);
56*10465441SEvalZero 
57*10465441SEvalZero 	/* this needs a little more thought (true <tglx> :)) */
58*10465441SEvalZero 	while(ret == -EAGAIN) {
59*10465441SEvalZero 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60*10465441SEvalZero 			int ret;
61*10465441SEvalZero 			uint32_t dirty, avail;
62*10465441SEvalZero 
63*10465441SEvalZero 			/* calculate real dirty size
64*10465441SEvalZero 			 * dirty_size contains blocks on erase_pending_list
65*10465441SEvalZero 			 * those blocks are counted in c->nr_erasing_blocks.
66*10465441SEvalZero 			 * If one block is actually erased, it is not longer counted as dirty_space
67*10465441SEvalZero 			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68*10465441SEvalZero 			 * with c->nr_erasing_blocks * c->sector_size again.
69*10465441SEvalZero 			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70*10465441SEvalZero 			 * This helps us to force gc and pick eventually a clean block to spread the load.
71*10465441SEvalZero 			 * We add unchecked_size here, as we hopefully will find some space to use.
72*10465441SEvalZero 			 * This will affect the sum only once, as gc first finishes checking
73*10465441SEvalZero 			 * of nodes.
74*10465441SEvalZero 			 */
75*10465441SEvalZero 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76*10465441SEvalZero 			if (dirty < c->nospc_dirty_size) {
77*10465441SEvalZero 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
78*10465441SEvalZero 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
79*10465441SEvalZero 					break;
80*10465441SEvalZero 				}
81*10465441SEvalZero 				D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
82*10465441SEvalZero 					  dirty, c->unchecked_size, c->sector_size));
83*10465441SEvalZero 
84*10465441SEvalZero 				spin_unlock(&c->erase_completion_lock);
85*10465441SEvalZero 				up(&c->alloc_sem);
86*10465441SEvalZero 				return -ENOSPC;
87*10465441SEvalZero 			}
88*10465441SEvalZero 
89*10465441SEvalZero 			/* Calc possibly available space. Possibly available means that we
90*10465441SEvalZero 			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
91*10465441SEvalZero 			 * more usable space. This will affect the sum only once, as gc first finishes checking
92*10465441SEvalZero 			 * of nodes.
93*10465441SEvalZero 			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
94*10465441SEvalZero 			 * blocksneeded * sector_size.
95*10465441SEvalZero 			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
96*10465441SEvalZero 			 * the check above passes.
97*10465441SEvalZero 			 */
98*10465441SEvalZero 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99*10465441SEvalZero 			if ( (avail / c->sector_size) <= blocksneeded) {
100*10465441SEvalZero 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
101*10465441SEvalZero 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
102*10465441SEvalZero 					break;
103*10465441SEvalZero 				}
104*10465441SEvalZero 
105*10465441SEvalZero 				D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
106*10465441SEvalZero 					  avail, blocksneeded * c->sector_size));
107*10465441SEvalZero 				spin_unlock(&c->erase_completion_lock);
108*10465441SEvalZero 				up(&c->alloc_sem);
109*10465441SEvalZero 				return -ENOSPC;
110*10465441SEvalZero 			}
111*10465441SEvalZero 
112*10465441SEvalZero 			up(&c->alloc_sem);
113*10465441SEvalZero 
114*10465441SEvalZero 			D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
115*10465441SEvalZero 				  c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
116*10465441SEvalZero 				  c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
117*10465441SEvalZero 			spin_unlock(&c->erase_completion_lock);
118*10465441SEvalZero 
119*10465441SEvalZero 			ret = jffs2_garbage_collect_pass(c);
120*10465441SEvalZero 			if (ret)
121*10465441SEvalZero 				return ret;
122*10465441SEvalZero 
123*10465441SEvalZero 			cond_resched();
124*10465441SEvalZero 
125*10465441SEvalZero 			if (signal_pending(current))
126*10465441SEvalZero 				return -EINTR;
127*10465441SEvalZero 
128*10465441SEvalZero 			down(&c->alloc_sem);
129*10465441SEvalZero 			spin_lock(&c->erase_completion_lock);
130*10465441SEvalZero 		}
131*10465441SEvalZero 
132*10465441SEvalZero 		ret = jffs2_do_reserve_space(c, minsize, ofs, len);
133*10465441SEvalZero 		if (ret) {
134*10465441SEvalZero 			D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
135*10465441SEvalZero 		}
136*10465441SEvalZero 	}
137*10465441SEvalZero 	spin_unlock(&c->erase_completion_lock);
138*10465441SEvalZero 	if (ret)
139*10465441SEvalZero 		up(&c->alloc_sem);
140*10465441SEvalZero 	return ret;
141*10465441SEvalZero }
142*10465441SEvalZero 
jffs2_reserve_space_gc(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * ofs,uint32_t * len)143*10465441SEvalZero int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
144*10465441SEvalZero {
145*10465441SEvalZero 	int ret = -EAGAIN;
146*10465441SEvalZero 	minsize = PAD(minsize);
147*10465441SEvalZero 
148*10465441SEvalZero 	D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
149*10465441SEvalZero 
150*10465441SEvalZero 	spin_lock(&c->erase_completion_lock);
151*10465441SEvalZero 	while(ret == -EAGAIN) {
152*10465441SEvalZero 		ret = jffs2_do_reserve_space(c, minsize, ofs, len);
153*10465441SEvalZero 		if (ret) {
154*10465441SEvalZero 		        D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
155*10465441SEvalZero 		}
156*10465441SEvalZero 	}
157*10465441SEvalZero 	spin_unlock(&c->erase_completion_lock);
158*10465441SEvalZero 	return ret;
159*10465441SEvalZero }
160*10465441SEvalZero 
161*10465441SEvalZero /* Called with alloc sem _and_ erase_completion_lock */
jffs2_do_reserve_space(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * ofs,uint32_t * len)162*10465441SEvalZero static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len)
163*10465441SEvalZero {
164*10465441SEvalZero 	struct jffs2_eraseblock *jeb = c->nextblock;
165*10465441SEvalZero 
166*10465441SEvalZero  restart:
167*10465441SEvalZero 	if (jeb && minsize > jeb->free_size) {
168*10465441SEvalZero 		/* Skip the end of this block and file it as having some dirty space */
169*10465441SEvalZero 		/* If there's a pending write to it, flush now */
170*10465441SEvalZero 		if (jffs2_wbuf_dirty(c)) {
171*10465441SEvalZero 			spin_unlock(&c->erase_completion_lock);
172*10465441SEvalZero 			D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
173*10465441SEvalZero 			jffs2_flush_wbuf_pad(c);
174*10465441SEvalZero 			spin_lock(&c->erase_completion_lock);
175*10465441SEvalZero 			jeb = c->nextblock;
176*10465441SEvalZero 			goto restart;
177*10465441SEvalZero 		}
178*10465441SEvalZero 		c->wasted_size += jeb->free_size;
179*10465441SEvalZero 		c->free_size -= jeb->free_size;
180*10465441SEvalZero 		jeb->wasted_size += jeb->free_size;
181*10465441SEvalZero 		jeb->free_size = 0;
182*10465441SEvalZero 
183*10465441SEvalZero 		/* Check, if we have a dirty block now, or if it was dirty already */
184*10465441SEvalZero 		if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
185*10465441SEvalZero 			c->dirty_size += jeb->wasted_size;
186*10465441SEvalZero 			c->wasted_size -= jeb->wasted_size;
187*10465441SEvalZero 			jeb->dirty_size += jeb->wasted_size;
188*10465441SEvalZero 			jeb->wasted_size = 0;
189*10465441SEvalZero 			if (VERYDIRTY(c, jeb->dirty_size)) {
190*10465441SEvalZero 				D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
191*10465441SEvalZero 				  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
192*10465441SEvalZero 				list_add_tail(&jeb->list, &c->very_dirty_list);
193*10465441SEvalZero 			} else {
194*10465441SEvalZero 				D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
195*10465441SEvalZero 				  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
196*10465441SEvalZero 				list_add_tail(&jeb->list, &c->dirty_list);
197*10465441SEvalZero 			}
198*10465441SEvalZero 		} else {
199*10465441SEvalZero 			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
200*10465441SEvalZero 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
201*10465441SEvalZero 			list_add_tail(&jeb->list, &c->clean_list);
202*10465441SEvalZero 		}
203*10465441SEvalZero 		c->nextblock = jeb = NULL;
204*10465441SEvalZero 	}
205*10465441SEvalZero 
206*10465441SEvalZero 	if (!jeb) {
207*10465441SEvalZero 		struct list_head *next;
208*10465441SEvalZero 		/* Take the next block off the 'free' list */
209*10465441SEvalZero 
210*10465441SEvalZero 		if (list_empty(&c->free_list)) {
211*10465441SEvalZero 
212*10465441SEvalZero 			if (!c->nr_erasing_blocks &&
213*10465441SEvalZero 			    !list_empty(&c->erasable_list)) {
214*10465441SEvalZero 				struct jffs2_eraseblock *ejeb;
215*10465441SEvalZero 
216*10465441SEvalZero 				ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
217*10465441SEvalZero 				list_del(&ejeb->list);
218*10465441SEvalZero 				list_add_tail(&ejeb->list, &c->erase_pending_list);
219*10465441SEvalZero 				c->nr_erasing_blocks++;
220*10465441SEvalZero 				jffs2_erase_pending_trigger(c);
221*10465441SEvalZero 				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
222*10465441SEvalZero 					  ejeb->offset));
223*10465441SEvalZero 			}
224*10465441SEvalZero 
225*10465441SEvalZero 			if (!c->nr_erasing_blocks &&
226*10465441SEvalZero 			    !list_empty(&c->erasable_pending_wbuf_list)) {
227*10465441SEvalZero 				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
228*10465441SEvalZero 				/* c->nextblock is NULL, no update to c->nextblock allowed */
229*10465441SEvalZero 				spin_unlock(&c->erase_completion_lock);
230*10465441SEvalZero 				jffs2_flush_wbuf_pad(c);
231*10465441SEvalZero 				spin_lock(&c->erase_completion_lock);
232*10465441SEvalZero 				/* Have another go. It'll be on the erasable_list now */
233*10465441SEvalZero 				return -EAGAIN;
234*10465441SEvalZero 			}
235*10465441SEvalZero 
236*10465441SEvalZero 			if (!c->nr_erasing_blocks) {
237*10465441SEvalZero 				/* Ouch. We're in GC, or we wouldn't have got here.
238*10465441SEvalZero 				   And there's no space left. At all. */
239*10465441SEvalZero 				printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
240*10465441SEvalZero 				       c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
241*10465441SEvalZero 				       list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
242*10465441SEvalZero 				return -ENOSPC;
243*10465441SEvalZero 			}
244*10465441SEvalZero 
245*10465441SEvalZero 			spin_unlock(&c->erase_completion_lock);
246*10465441SEvalZero 			/* Don't wait for it; just erase one right now */
247*10465441SEvalZero 			jffs2_erase_pending_blocks(c, 1);
248*10465441SEvalZero 			spin_lock(&c->erase_completion_lock);
249*10465441SEvalZero 
250*10465441SEvalZero 			/* An erase may have failed, decreasing the
251*10465441SEvalZero 			   amount of free space available. So we must
252*10465441SEvalZero 			   restart from the beginning */
253*10465441SEvalZero 			return -EAGAIN;
254*10465441SEvalZero 		}
255*10465441SEvalZero 
256*10465441SEvalZero 		next = c->free_list.next;
257*10465441SEvalZero 		list_del(next);
258*10465441SEvalZero 		c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
259*10465441SEvalZero 		c->nr_free_blocks--;
260*10465441SEvalZero 
261*10465441SEvalZero 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
262*10465441SEvalZero 			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
263*10465441SEvalZero 			goto restart;
264*10465441SEvalZero 		}
265*10465441SEvalZero 	}
266*10465441SEvalZero 	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
267*10465441SEvalZero 	   enough space */
268*10465441SEvalZero 	*ofs = jeb->offset + (c->sector_size - jeb->free_size);
269*10465441SEvalZero 	*len = jeb->free_size;
270*10465441SEvalZero 
271*10465441SEvalZero 	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
272*10465441SEvalZero 	    !jeb->first_node->next_in_ino) {
273*10465441SEvalZero 		/* Only node in it beforehand was a CLEANMARKER node (we think).
274*10465441SEvalZero 		   So mark it obsolete now that there's going to be another node
275*10465441SEvalZero 		   in the block. This will reduce used_size to zero but We've
276*10465441SEvalZero 		   already set c->nextblock so that jffs2_mark_node_obsolete()
277*10465441SEvalZero 		   won't try to refile it to the dirty_list.
278*10465441SEvalZero 		*/
279*10465441SEvalZero 		spin_unlock(&c->erase_completion_lock);
280*10465441SEvalZero 		jffs2_mark_node_obsolete(c, jeb->first_node);
281*10465441SEvalZero 		spin_lock(&c->erase_completion_lock);
282*10465441SEvalZero 	}
283*10465441SEvalZero 
284*10465441SEvalZero 	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
285*10465441SEvalZero 	return 0;
286*10465441SEvalZero }
287*10465441SEvalZero 
288*10465441SEvalZero /**
289*10465441SEvalZero  *	jffs2_add_physical_node_ref - add a physical node reference to the list
290*10465441SEvalZero  *	@c: superblock info
291*10465441SEvalZero  *	@new: new node reference to add
292*10465441SEvalZero  *	@len: length of this physical node
293*10465441SEvalZero  *	@dirty: dirty flag for new node
294*10465441SEvalZero  *
295*10465441SEvalZero  *	Should only be used to report nodes for which space has been allocated
296*10465441SEvalZero  *	by jffs2_reserve_space.
297*10465441SEvalZero  *
298*10465441SEvalZero  *	Must be called with the alloc_sem held.
299*10465441SEvalZero  */
300*10465441SEvalZero 
jffs2_add_physical_node_ref(struct jffs2_sb_info * c,struct jffs2_raw_node_ref * new)301*10465441SEvalZero int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
302*10465441SEvalZero {
303*10465441SEvalZero 	struct jffs2_eraseblock *jeb;
304*10465441SEvalZero 	uint32_t len;
305*10465441SEvalZero 
306*10465441SEvalZero 	jeb = &c->blocks[new->flash_offset / c->sector_size];
307*10465441SEvalZero 	len = ref_totlen(c, jeb, new);
308*10465441SEvalZero 
309*10465441SEvalZero 	D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
310*10465441SEvalZero #if 1
311*10465441SEvalZero 	/* we could get some obsolete nodes after nextblock was refiled
312*10465441SEvalZero 	   in wbuf.c */
313*10465441SEvalZero 	if ((c->nextblock || !ref_obsolete(new))
314*10465441SEvalZero 	    &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) {
315*10465441SEvalZero 		printk(KERN_WARNING "argh. node added in wrong place\n");
316*10465441SEvalZero 		jffs2_free_raw_node_ref(new);
317*10465441SEvalZero 		return -EINVAL;
318*10465441SEvalZero 	}
319*10465441SEvalZero #endif
320*10465441SEvalZero 	spin_lock(&c->erase_completion_lock);
321*10465441SEvalZero 
322*10465441SEvalZero 	if (!jeb->first_node)
323*10465441SEvalZero 		jeb->first_node = new;
324*10465441SEvalZero 	if (jeb->last_node)
325*10465441SEvalZero 		jeb->last_node->next_phys = new;
326*10465441SEvalZero 	jeb->last_node = new;
327*10465441SEvalZero 
328*10465441SEvalZero 	jeb->free_size -= len;
329*10465441SEvalZero 	c->free_size -= len;
330*10465441SEvalZero 	if (ref_obsolete(new)) {
331*10465441SEvalZero 		jeb->dirty_size += len;
332*10465441SEvalZero 		c->dirty_size += len;
333*10465441SEvalZero 	} else {
334*10465441SEvalZero 		jeb->used_size += len;
335*10465441SEvalZero 		c->used_size += len;
336*10465441SEvalZero 	}
337*10465441SEvalZero 
338*10465441SEvalZero 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
339*10465441SEvalZero 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
340*10465441SEvalZero 		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
341*10465441SEvalZero 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
342*10465441SEvalZero 		if (jffs2_wbuf_dirty(c)) {
343*10465441SEvalZero 			/* Flush the last write in the block if it's outstanding */
344*10465441SEvalZero 			spin_unlock(&c->erase_completion_lock);
345*10465441SEvalZero 			jffs2_flush_wbuf_pad(c);
346*10465441SEvalZero 			spin_lock(&c->erase_completion_lock);
347*10465441SEvalZero 		}
348*10465441SEvalZero 
349*10465441SEvalZero 		list_add_tail(&jeb->list, &c->clean_list);
350*10465441SEvalZero 		c->nextblock = NULL;
351*10465441SEvalZero 	}
352*10465441SEvalZero 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
353*10465441SEvalZero 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
354*10465441SEvalZero 
355*10465441SEvalZero 	spin_unlock(&c->erase_completion_lock);
356*10465441SEvalZero 
357*10465441SEvalZero 	return 0;
358*10465441SEvalZero }
359*10465441SEvalZero 
360*10465441SEvalZero 
jffs2_complete_reservation(struct jffs2_sb_info * c)361*10465441SEvalZero void jffs2_complete_reservation(struct jffs2_sb_info *c)
362*10465441SEvalZero {
363*10465441SEvalZero 	D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
364*10465441SEvalZero 	jffs2_garbage_collect_trigger(c);
365*10465441SEvalZero 	up(&c->alloc_sem);
366*10465441SEvalZero }
367*10465441SEvalZero 
on_list(struct list_head * obj,struct list_head * head)368*10465441SEvalZero static inline int on_list(struct list_head *obj, struct list_head *head)
369*10465441SEvalZero {
370*10465441SEvalZero 	struct list_head *this;
371*10465441SEvalZero 
372*10465441SEvalZero 	list_for_each(this, head) {
373*10465441SEvalZero 		if (this == obj) {
374*10465441SEvalZero 			D1(printk("%p is on list at %p\n", obj, head));
375*10465441SEvalZero 			return 1;
376*10465441SEvalZero 
377*10465441SEvalZero 		}
378*10465441SEvalZero 	}
379*10465441SEvalZero 	return 0;
380*10465441SEvalZero }
381*10465441SEvalZero 
jffs2_mark_node_obsolete(struct jffs2_sb_info * c,struct jffs2_raw_node_ref * ref)382*10465441SEvalZero void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
383*10465441SEvalZero {
384*10465441SEvalZero 	struct jffs2_eraseblock *jeb;
385*10465441SEvalZero 	int blocknr;
386*10465441SEvalZero 	struct jffs2_unknown_node n;
387*10465441SEvalZero 	int ret, addedsize;
388*10465441SEvalZero 	size_t retlen;
389*10465441SEvalZero 
390*10465441SEvalZero 	if(!ref) {
391*10465441SEvalZero 		printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
392*10465441SEvalZero 		return;
393*10465441SEvalZero 	}
394*10465441SEvalZero 	if (ref_obsolete(ref)) {
395*10465441SEvalZero 		D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
396*10465441SEvalZero 		return;
397*10465441SEvalZero 	}
398*10465441SEvalZero 	blocknr = ref->flash_offset / c->sector_size;
399*10465441SEvalZero 	if (blocknr >= c->nr_blocks) {
400*10465441SEvalZero 		printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
401*10465441SEvalZero 		BUG();
402*10465441SEvalZero 	}
403*10465441SEvalZero 	jeb = &c->blocks[blocknr];
404*10465441SEvalZero 
405*10465441SEvalZero 	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
406*10465441SEvalZero 	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
407*10465441SEvalZero 		/* Hm. This may confuse static lock analysis. If any of the above
408*10465441SEvalZero 		   three conditions is false, we're going to return from this
409*10465441SEvalZero 		   function without actually obliterating any nodes or freeing
410*10465441SEvalZero 		   any jffs2_raw_node_refs. So we don't need to stop erases from
411*10465441SEvalZero 		   happening, or protect against people holding an obsolete
412*10465441SEvalZero 		   jffs2_raw_node_ref without the erase_completion_lock. */
413*10465441SEvalZero 		down(&c->erase_free_sem);
414*10465441SEvalZero 	}
415*10465441SEvalZero 
416*10465441SEvalZero 	spin_lock(&c->erase_completion_lock);
417*10465441SEvalZero 
418*10465441SEvalZero 	if (ref_flags(ref) == REF_UNCHECKED) {
419*10465441SEvalZero 		D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) {
420*10465441SEvalZero 			printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
421*10465441SEvalZero 			       ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
422*10465441SEvalZero 			BUG();
423*10465441SEvalZero 		})
424*10465441SEvalZero 		D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
425*10465441SEvalZero 		jeb->unchecked_size -= ref_totlen(c, jeb, ref);
426*10465441SEvalZero 		c->unchecked_size -= ref_totlen(c, jeb, ref);
427*10465441SEvalZero 	} else {
428*10465441SEvalZero 		D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) {
429*10465441SEvalZero 			printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
430*10465441SEvalZero 			       ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size);
431*10465441SEvalZero 			BUG();
432*10465441SEvalZero 		})
433*10465441SEvalZero 		D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), ref_totlen(c, jeb, ref)));
434*10465441SEvalZero 		jeb->used_size -= ref_totlen(c, jeb, ref);
435*10465441SEvalZero 		c->used_size -= ref_totlen(c, jeb, ref);
436*10465441SEvalZero 	}
437*10465441SEvalZero 
438*10465441SEvalZero 	// Take care, that wasted size is taken into concern
439*10465441SEvalZero 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) {
440*10465441SEvalZero 		D1(printk(KERN_DEBUG "Dirtying\n"));
441*10465441SEvalZero 		addedsize = ref_totlen(c, jeb, ref);
442*10465441SEvalZero 		jeb->dirty_size += ref_totlen(c, jeb, ref);
443*10465441SEvalZero 		c->dirty_size += ref_totlen(c, jeb, ref);
444*10465441SEvalZero 
445*10465441SEvalZero 		/* Convert wasted space to dirty, if not a bad block */
446*10465441SEvalZero 		if (jeb->wasted_size) {
447*10465441SEvalZero 			if (on_list(&jeb->list, &c->bad_used_list)) {
448*10465441SEvalZero 				D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
449*10465441SEvalZero 					  jeb->offset));
450*10465441SEvalZero 				addedsize = 0; /* To fool the refiling code later */
451*10465441SEvalZero 			} else {
452*10465441SEvalZero 				D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
453*10465441SEvalZero 					  jeb->wasted_size, jeb->offset));
454*10465441SEvalZero 				addedsize += jeb->wasted_size;
455*10465441SEvalZero 				jeb->dirty_size += jeb->wasted_size;
456*10465441SEvalZero 				c->dirty_size += jeb->wasted_size;
457*10465441SEvalZero 				c->wasted_size -= jeb->wasted_size;
458*10465441SEvalZero 				jeb->wasted_size = 0;
459*10465441SEvalZero 			}
460*10465441SEvalZero 		}
461*10465441SEvalZero 	} else {
462*10465441SEvalZero 		D1(printk(KERN_DEBUG "Wasting\n"));
463*10465441SEvalZero 		addedsize = 0;
464*10465441SEvalZero 		jeb->wasted_size += ref_totlen(c, jeb, ref);
465*10465441SEvalZero 		c->wasted_size += ref_totlen(c, jeb, ref);
466*10465441SEvalZero 	}
467*10465441SEvalZero 	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
468*10465441SEvalZero 
469*10465441SEvalZero 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
470*10465441SEvalZero 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
471*10465441SEvalZero 
472*10465441SEvalZero 	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
473*10465441SEvalZero 		/* Flash scanning is in progress. Don't muck about with the block
474*10465441SEvalZero 		   lists because they're not ready yet, and don't actually
475*10465441SEvalZero 		   obliterate nodes that look obsolete. If they weren't
476*10465441SEvalZero 		   marked obsolete on the flash at the time they _became_
477*10465441SEvalZero 		   obsolete, there was probably a reason for that. */
478*10465441SEvalZero 		spin_unlock(&c->erase_completion_lock);
479*10465441SEvalZero 		/* We didn't lock the erase_free_sem */
480*10465441SEvalZero 		return;
481*10465441SEvalZero 	}
482*10465441SEvalZero 
483*10465441SEvalZero 	if (jeb == c->nextblock) {
484*10465441SEvalZero 		D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
485*10465441SEvalZero 	} else if (!jeb->used_size && !jeb->unchecked_size) {
486*10465441SEvalZero 		if (jeb == c->gcblock) {
487*10465441SEvalZero 			D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
488*10465441SEvalZero 			c->gcblock = NULL;
489*10465441SEvalZero 		} else {
490*10465441SEvalZero 			D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
491*10465441SEvalZero 			list_del(&jeb->list);
492*10465441SEvalZero 		}
493*10465441SEvalZero 		if (jffs2_wbuf_dirty(c)) {
494*10465441SEvalZero 			D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
495*10465441SEvalZero 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
496*10465441SEvalZero 		} else {
497*10465441SEvalZero 			if (jiffies & 127) {
498*10465441SEvalZero 				/* Most of the time, we just erase it immediately. Otherwise we
499*10465441SEvalZero 				   spend ages scanning it on mount, etc. */
500*10465441SEvalZero 				D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
501*10465441SEvalZero 				list_add_tail(&jeb->list, &c->erase_pending_list);
502*10465441SEvalZero 				c->nr_erasing_blocks++;
503*10465441SEvalZero 				jffs2_erase_pending_trigger(c);
504*10465441SEvalZero 			} else {
505*10465441SEvalZero 				/* Sometimes, however, we leave it elsewhere so it doesn't get
506*10465441SEvalZero 				   immediately reused, and we spread the load a bit. */
507*10465441SEvalZero 				D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
508*10465441SEvalZero 				list_add_tail(&jeb->list, &c->erasable_list);
509*10465441SEvalZero 			}
510*10465441SEvalZero 		}
511*10465441SEvalZero 		D1(printk(KERN_DEBUG "Done OK\n"));
512*10465441SEvalZero 	} else if (jeb == c->gcblock) {
513*10465441SEvalZero 		D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
514*10465441SEvalZero 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
515*10465441SEvalZero 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
516*10465441SEvalZero 		list_del(&jeb->list);
517*10465441SEvalZero 		D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
518*10465441SEvalZero 		list_add_tail(&jeb->list, &c->dirty_list);
519*10465441SEvalZero 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
520*10465441SEvalZero 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
521*10465441SEvalZero 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
522*10465441SEvalZero 		list_del(&jeb->list);
523*10465441SEvalZero 		D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
524*10465441SEvalZero 		list_add_tail(&jeb->list, &c->very_dirty_list);
525*10465441SEvalZero 	} else {
526*10465441SEvalZero 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
527*10465441SEvalZero 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
528*10465441SEvalZero 	}
529*10465441SEvalZero 
530*10465441SEvalZero 	spin_unlock(&c->erase_completion_lock);
531*10465441SEvalZero 
532*10465441SEvalZero 	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
533*10465441SEvalZero 		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
534*10465441SEvalZero 		/* We didn't lock the erase_free_sem */
535*10465441SEvalZero 		return;
536*10465441SEvalZero 	}
537*10465441SEvalZero 
538*10465441SEvalZero 	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
539*10465441SEvalZero 	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
540*10465441SEvalZero 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
541*10465441SEvalZero 	   by jffs2_free_all_node_refs() in erase.c. Which is nice. */
542*10465441SEvalZero 
543*10465441SEvalZero 	D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
544*10465441SEvalZero 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (unsigned char *)&n);
545*10465441SEvalZero 	if (ret) {
546*10465441SEvalZero 		printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
547*10465441SEvalZero 		goto out_erase_sem;
548*10465441SEvalZero 	}
549*10465441SEvalZero 	if (retlen != sizeof(n)) {
550*10465441SEvalZero 		printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
551*10465441SEvalZero 		goto out_erase_sem;
552*10465441SEvalZero 	}
553*10465441SEvalZero 	if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) {
554*10465441SEvalZero 		printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref));
555*10465441SEvalZero 		goto out_erase_sem;
556*10465441SEvalZero 	}
557*10465441SEvalZero 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
558*10465441SEvalZero 		D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
559*10465441SEvalZero 		goto out_erase_sem;
560*10465441SEvalZero 	}
561*10465441SEvalZero 	/* XXX FIXME: This is ugly now */
562*10465441SEvalZero 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
563*10465441SEvalZero 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (unsigned char *)&n);
564*10465441SEvalZero 	if (ret) {
565*10465441SEvalZero 		printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
566*10465441SEvalZero 		goto out_erase_sem;
567*10465441SEvalZero 	}
568*10465441SEvalZero 	if (retlen != sizeof(n)) {
569*10465441SEvalZero 		printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
570*10465441SEvalZero 		goto out_erase_sem;
571*10465441SEvalZero 	}
572*10465441SEvalZero 
573*10465441SEvalZero 	/* Nodes which have been marked obsolete no longer need to be
574*10465441SEvalZero 	   associated with any inode. Remove them from the per-inode list.
575*10465441SEvalZero 
576*10465441SEvalZero 	   Note we can't do this for NAND at the moment because we need
577*10465441SEvalZero 	   obsolete dirent nodes to stay on the lists, because of the
578*10465441SEvalZero 	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
579*10465441SEvalZero 	   because we delete the inocache, and on NAND we need that to
580*10465441SEvalZero 	   stay around until all the nodes are actually erased, in order
581*10465441SEvalZero 	   to stop us from giving the same inode number to another newly
582*10465441SEvalZero 	   created inode. */
583*10465441SEvalZero 	if (ref->next_in_ino) {
584*10465441SEvalZero 		struct jffs2_inode_cache *ic;
585*10465441SEvalZero 		struct jffs2_raw_node_ref **p;
586*10465441SEvalZero 
587*10465441SEvalZero 		spin_lock(&c->erase_completion_lock);
588*10465441SEvalZero 
589*10465441SEvalZero 		ic = jffs2_raw_ref_to_ic(ref);
590*10465441SEvalZero 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
591*10465441SEvalZero 			;
592*10465441SEvalZero 
593*10465441SEvalZero 		*p = ref->next_in_ino;
594*10465441SEvalZero 		ref->next_in_ino = NULL;
595*10465441SEvalZero 
596*10465441SEvalZero 		if (ic->nodes == (void *)ic && ic->nlink == 0)
597*10465441SEvalZero 			jffs2_del_ino_cache(c, ic);
598*10465441SEvalZero 
599*10465441SEvalZero 		spin_unlock(&c->erase_completion_lock);
600*10465441SEvalZero 	}
601*10465441SEvalZero 
602*10465441SEvalZero 
603*10465441SEvalZero 	/* Merge with the next node in the physical list, if there is one
604*10465441SEvalZero 	   and if it's also obsolete and if it doesn't belong to any inode */
605*10465441SEvalZero 	if (ref->next_phys && ref_obsolete(ref->next_phys) &&
606*10465441SEvalZero 	    !ref->next_phys->next_in_ino) {
607*10465441SEvalZero 		struct jffs2_raw_node_ref *n = ref->next_phys;
608*10465441SEvalZero 
609*10465441SEvalZero 		spin_lock(&c->erase_completion_lock);
610*10465441SEvalZero 
611*10465441SEvalZero 		ref->__totlen += n->__totlen;
612*10465441SEvalZero 		ref->next_phys = n->next_phys;
613*10465441SEvalZero                 if (jeb->last_node == n) jeb->last_node = ref;
614*10465441SEvalZero 		if (jeb->gc_node == n) {
615*10465441SEvalZero 			/* gc will be happy continuing gc on this node */
616*10465441SEvalZero 			jeb->gc_node=ref;
617*10465441SEvalZero 		}
618*10465441SEvalZero 		spin_unlock(&c->erase_completion_lock);
619*10465441SEvalZero 
620*10465441SEvalZero 		jffs2_free_raw_node_ref(n);
621*10465441SEvalZero 	}
622*10465441SEvalZero 
623*10465441SEvalZero 	/* Also merge with the previous node in the list, if there is one
624*10465441SEvalZero 	   and that one is obsolete */
625*10465441SEvalZero 	if (ref != jeb->first_node ) {
626*10465441SEvalZero 		struct jffs2_raw_node_ref *p = jeb->first_node;
627*10465441SEvalZero 
628*10465441SEvalZero 		spin_lock(&c->erase_completion_lock);
629*10465441SEvalZero 
630*10465441SEvalZero 		while (p->next_phys != ref)
631*10465441SEvalZero 			p = p->next_phys;
632*10465441SEvalZero 
633*10465441SEvalZero 		if (ref_obsolete(p) && !ref->next_in_ino) {
634*10465441SEvalZero 			p->__totlen += ref->__totlen;
635*10465441SEvalZero 			if (jeb->last_node == ref) {
636*10465441SEvalZero 				jeb->last_node = p;
637*10465441SEvalZero 			}
638*10465441SEvalZero 			if (jeb->gc_node == ref) {
639*10465441SEvalZero 				/* gc will be happy continuing gc on this node */
640*10465441SEvalZero 				jeb->gc_node=p;
641*10465441SEvalZero 			}
642*10465441SEvalZero 			p->next_phys = ref->next_phys;
643*10465441SEvalZero 			jffs2_free_raw_node_ref(ref);
644*10465441SEvalZero 		}
645*10465441SEvalZero 		spin_unlock(&c->erase_completion_lock);
646*10465441SEvalZero 	}
647*10465441SEvalZero  out_erase_sem:
648*10465441SEvalZero 	up(&c->erase_free_sem);
649*10465441SEvalZero }
650*10465441SEvalZero 
jffs2_thread_should_wake(struct jffs2_sb_info * c)651*10465441SEvalZero int jffs2_thread_should_wake(struct jffs2_sb_info *c)
652*10465441SEvalZero {
653*10465441SEvalZero 	int ret = 0;
654*10465441SEvalZero 	uint32_t dirty;
655*10465441SEvalZero 
656*10465441SEvalZero 	if (c->unchecked_size) {
657*10465441SEvalZero 		D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
658*10465441SEvalZero 			  c->unchecked_size, c->checked_ino));
659*10465441SEvalZero 		return 1;
660*10465441SEvalZero 	}
661*10465441SEvalZero 
662*10465441SEvalZero 	/* dirty_size contains blocks on erase_pending_list
663*10465441SEvalZero 	 * those blocks are counted in c->nr_erasing_blocks.
664*10465441SEvalZero 	 * If one block is actually erased, it is not longer counted as dirty_space
665*10465441SEvalZero 	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
666*10465441SEvalZero 	 * with c->nr_erasing_blocks * c->sector_size again.
667*10465441SEvalZero 	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
668*10465441SEvalZero 	 * This helps us to force gc and pick eventually a clean block to spread the load.
669*10465441SEvalZero 	 */
670*10465441SEvalZero 	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
671*10465441SEvalZero 
672*10465441SEvalZero 	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
673*10465441SEvalZero 			(dirty > c->nospc_dirty_size))
674*10465441SEvalZero 		ret = 1;
675*10465441SEvalZero 
676*10465441SEvalZero 	D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
677*10465441SEvalZero 		  c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
678*10465441SEvalZero 
679*10465441SEvalZero 	return ret;
680*10465441SEvalZero }
681