1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/kernel.h>
15 #include <linux/mtd/mtd.h>
16 #include <linux/compiler.h>
17 #include <linux/sched.h> /* For cond_resched() */
18 #include "nodelist.h"
19 #include "debug.h"
20 
21 /**
22  *	jffs2_reserve_space - request physical space to write nodes to flash
23  *	@c: superblock info
24  *	@minsize: Minimum acceptable size of allocation
25  *	@len: Returned value of allocation length
26  *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
27  *
28  *	Requests a block of physical space on the flash. Returns zero for success
29  *	and puts 'len' into the appropriate place, or returns -ENOSPC or other
30  *	error if appropriate. Doesn't return len since that's
31  *
32  *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
33  *	allocation semaphore, to prevent more than one allocation from being
34  *	active at any time. The semaphore is later released by jffs2_commit_allocation()
35  *
36  *	jffs2_reserve_space() may trigger garbage collection in order to make room
37  *	for the requested allocation.
38  */
39 
40 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
41 				  uint32_t *len, uint32_t sumsize);
42 
jffs2_reserve_space(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * len,int prio,uint32_t sumsize)43 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
44 			uint32_t *len, int prio, uint32_t sumsize)
45 {
46 	int ret = -EAGAIN;
47 	int blocksneeded = c->resv_blocks_write;
48 	/* align it */
49 	minsize = PAD(minsize);
50 
51 	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
52 	mutex_lock(&c->alloc_sem);
53 
54 	jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
55 
56 	spin_lock(&c->erase_completion_lock);
57 
58 	/* this needs a little more thought (true <tglx> :)) */
59 	while(ret == -EAGAIN) {
60 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
61 			uint32_t dirty, avail;
62 
63 			/* calculate real dirty size
64 			 * dirty_size contains blocks on erase_pending_list
65 			 * those blocks are counted in c->nr_erasing_blocks.
66 			 * If one block is actually erased, it is not longer counted as dirty_space
67 			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68 			 * with c->nr_erasing_blocks * c->sector_size again.
69 			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70 			 * This helps us to force gc and pick eventually a clean block to spread the load.
71 			 * We add unchecked_size here, as we hopefully will find some space to use.
72 			 * This will affect the sum only once, as gc first finishes checking
73 			 * of nodes.
74 			 */
75 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76 			if (dirty < c->nospc_dirty_size) {
77 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
78 					jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
79 						  __func__);
80 					break;
81 				}
82 				jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
83 					  dirty, c->unchecked_size,
84 					  c->sector_size);
85 
86 				spin_unlock(&c->erase_completion_lock);
87 				mutex_unlock(&c->alloc_sem);
88 				return -ENOSPC;
89 			}
90 
91 			/* Calc possibly available space. Possibly available means that we
92 			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
93 			 * more usable space. This will affect the sum only once, as gc first finishes checking
94 			 * of nodes.
95 			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
96 			 * blocksneeded * sector_size.
97 			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
98 			 * the check above passes.
99 			 */
100 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
101 			if ( (avail / c->sector_size) <= blocksneeded) {
102 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
103 					jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
104 						  __func__);
105 					break;
106 				}
107 
108 				jffs2_dbg(1, "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
109 					  avail, blocksneeded * c->sector_size);
110 				spin_unlock(&c->erase_completion_lock);
111 				mutex_unlock(&c->alloc_sem);
112 				return -ENOSPC;
113 			}
114 
115 			mutex_unlock(&c->alloc_sem);
116 
117 			jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
118 				  c->nr_free_blocks, c->nr_erasing_blocks,
119 				  c->free_size, c->dirty_size, c->wasted_size,
120 				  c->used_size, c->erasing_size, c->bad_size,
121 				  c->free_size + c->dirty_size +
122 				  c->wasted_size + c->used_size +
123 				  c->erasing_size + c->bad_size,
124 				  c->flash_size);
125 			spin_unlock(&c->erase_completion_lock);
126 
127 			ret = jffs2_garbage_collect_pass(c);
128 
129 			if (ret == -EAGAIN) {
130 				spin_lock(&c->erase_completion_lock);
131 				if (c->nr_erasing_blocks &&
132 				    list_empty(&c->erase_pending_list) &&
133 				    list_empty(&c->erase_complete_list)) {
134 					DECLARE_WAITQUEUE(wait, current);
135 					set_current_state(TASK_UNINTERRUPTIBLE);
136 					add_wait_queue(&c->erase_wait, &wait);
137 					jffs2_dbg(1, "%s waiting for erase to complete\n",
138 						  __func__);
139 					spin_unlock(&c->erase_completion_lock);
140 
141 					schedule();
142 					remove_wait_queue(&c->erase_wait, &wait);
143 				} else
144 					spin_unlock(&c->erase_completion_lock);
145 			} else if (ret)
146 				return ret;
147 
148 			cond_resched();
149 
150 			if (signal_pending(current))
151 				return -EINTR;
152 
153 			mutex_lock(&c->alloc_sem);
154 			spin_lock(&c->erase_completion_lock);
155 		}
156 
157 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
158 		if (ret) {
159 			jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
160 		}
161 	}
162 	spin_unlock(&c->erase_completion_lock);
163 	if (!ret)
164 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
165 	if (ret)
166 		mutex_unlock(&c->alloc_sem);
167 	return ret;
168 }
169 
jffs2_reserve_space_gc(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * len,uint32_t sumsize)170 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
171 			   uint32_t *len, uint32_t sumsize)
172 {
173 	int ret;
174 	minsize = PAD(minsize);
175 
176 	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
177 
178 	while (true) {
179 		spin_lock(&c->erase_completion_lock);
180 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
181 		if (ret) {
182 			jffs2_dbg(1, "%s(): looping, ret is %d\n",
183 				  __func__, ret);
184 		}
185 		spin_unlock(&c->erase_completion_lock);
186 
187 		if (ret == -EAGAIN)
188 			cond_resched();
189 		else
190 			break;
191 	}
192 	if (!ret)
193 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
194 
195 	return ret;
196 }
197 
198 
199 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
200 
jffs2_close_nextblock(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb)201 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
202 {
203 
204 	if (c->nextblock == NULL) {
205 		jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
206 			  __func__, jeb->offset);
207 		return;
208 	}
209 	/* Check, if we have a dirty block now, or if it was dirty already */
210 	if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
211 		c->dirty_size += jeb->wasted_size;
212 		c->wasted_size -= jeb->wasted_size;
213 		jeb->dirty_size += jeb->wasted_size;
214 		jeb->wasted_size = 0;
215 		if (VERYDIRTY(c, jeb->dirty_size)) {
216 			jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
217 				  jeb->offset, jeb->free_size, jeb->dirty_size,
218 				  jeb->used_size);
219 			list_add_tail(&jeb->list, &c->very_dirty_list);
220 		} else {
221 			jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
222 				  jeb->offset, jeb->free_size, jeb->dirty_size,
223 				  jeb->used_size);
224 			list_add_tail(&jeb->list, &c->dirty_list);
225 		}
226 	} else {
227 		jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
228 			  jeb->offset, jeb->free_size, jeb->dirty_size,
229 			  jeb->used_size);
230 		list_add_tail(&jeb->list, &c->clean_list);
231 	}
232 	c->nextblock = NULL;
233 
234 }
235 
236 /* Select a new jeb for nextblock */
237 
jffs2_find_nextblock(struct jffs2_sb_info * c)238 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
239 {
240 	struct list_head *next;
241 
242 	/* Take the next block off the 'free' list */
243 
244 	if (list_empty(&c->free_list)) {
245 
246 		if (!c->nr_erasing_blocks &&
247 			!list_empty(&c->erasable_list)) {
248 			struct jffs2_eraseblock *ejeb;
249 
250 			ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
251 			list_move_tail(&ejeb->list, &c->erase_pending_list);
252 			c->nr_erasing_blocks++;
253 			jffs2_garbage_collect_trigger(c);
254 			jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
255 				  __func__, ejeb->offset);
256 		}
257 
258 		if (!c->nr_erasing_blocks &&
259 			!list_empty(&c->erasable_pending_wbuf_list)) {
260 			jffs2_dbg(1, "%s(): Flushing write buffer\n",
261 				  __func__);
262 			/* c->nextblock is NULL, no update to c->nextblock allowed */
263 			spin_unlock(&c->erase_completion_lock);
264 			jffs2_flush_wbuf_pad(c);
265 			spin_lock(&c->erase_completion_lock);
266 			/* Have another go. It'll be on the erasable_list now */
267 			return -EAGAIN;
268 		}
269 
270 		if (!c->nr_erasing_blocks) {
271 			/* Ouch. We're in GC, or we wouldn't have got here.
272 			   And there's no space left. At all. */
273 			pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
274 				c->nr_erasing_blocks, c->nr_free_blocks,
275 				list_empty(&c->erasable_list) ? "yes" : "no",
276 				list_empty(&c->erasing_list) ? "yes" : "no",
277 				list_empty(&c->erase_pending_list) ? "yes" : "no");
278 			return -ENOSPC;
279 		}
280 
281 		spin_unlock(&c->erase_completion_lock);
282 		/* Don't wait for it; just erase one right now */
283 		jffs2_erase_pending_blocks(c, 1);
284 		spin_lock(&c->erase_completion_lock);
285 
286 		/* An erase may have failed, decreasing the
287 		   amount of free space available. So we must
288 		   restart from the beginning */
289 		return -EAGAIN;
290 	}
291 
292 	next = c->free_list.next;
293 	list_del(next);
294 	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
295 	c->nr_free_blocks--;
296 
297 	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
298 
299 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
300 	/* adjust write buffer offset, else we get a non contiguous write bug */
301 	if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
302 		c->wbuf_ofs = 0xffffffff;
303 #endif
304 
305 	jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
306 		  __func__, c->nextblock->offset);
307 
308 	return 0;
309 }
310 
311 /* Called with alloc sem _and_ erase_completion_lock */
jffs2_do_reserve_space(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * len,uint32_t sumsize)312 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
313 				  uint32_t *len, uint32_t sumsize)
314 {
315 	struct jffs2_eraseblock *jeb = c->nextblock;
316 	uint32_t reserved_size;				/* for summary information at the end of the jeb */
317 	int ret;
318 
319  restart:
320 	reserved_size = 0;
321 
322 	if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
323 							/* NOSUM_SIZE means not to generate summary */
324 
325 		if (jeb) {
326 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
327 			dbg_summary("minsize=%d , jeb->free=%d ,"
328 						"summary->size=%d , sumsize=%d\n",
329 						minsize, jeb->free_size,
330 						c->summary->sum_size, sumsize);
331 		}
332 
333 		/* Is there enough space for writing out the current node, or we have to
334 		   write out summary information now, close this jeb and select new nextblock? */
335 		if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
336 					JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
337 
338 			/* Has summary been disabled for this jeb? */
339 			if (jffs2_sum_is_disabled(c->summary)) {
340 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
341 				goto restart;
342 			}
343 
344 			/* Writing out the collected summary information */
345 			dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
346 			ret = jffs2_sum_write_sumnode(c);
347 
348 			if (ret)
349 				return ret;
350 
351 			if (jffs2_sum_is_disabled(c->summary)) {
352 				/* jffs2_write_sumnode() couldn't write out the summary information
353 				   diabling summary for this jeb and free the collected information
354 				 */
355 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
356 				goto restart;
357 			}
358 
359 			jffs2_close_nextblock(c, jeb);
360 			jeb = NULL;
361 			/* keep always valid value in reserved_size */
362 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
363 		}
364 	} else {
365 		if (jeb && minsize > jeb->free_size) {
366 			uint32_t waste;
367 
368 			/* Skip the end of this block and file it as having some dirty space */
369 			/* If there's a pending write to it, flush now */
370 
371 			if (jffs2_wbuf_dirty(c)) {
372 				spin_unlock(&c->erase_completion_lock);
373 				jffs2_dbg(1, "%s(): Flushing write buffer\n",
374 					  __func__);
375 				jffs2_flush_wbuf_pad(c);
376 				spin_lock(&c->erase_completion_lock);
377 				jeb = c->nextblock;
378 				goto restart;
379 			}
380 
381 			spin_unlock(&c->erase_completion_lock);
382 
383 			ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
384 
385 			/* Just lock it again and continue. Nothing much can change because
386 			   we hold c->alloc_sem anyway. In fact, it's not entirely clear why
387 			   we hold c->erase_completion_lock in the majority of this function...
388 			   but that's a question for another (more caffeine-rich) day. */
389 			spin_lock(&c->erase_completion_lock);
390 
391 			if (ret)
392 				return ret;
393 
394 			waste = jeb->free_size;
395 			jffs2_link_node_ref(c, jeb,
396 					    (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
397 					    waste, NULL);
398 			/* FIXME: that made it count as dirty. Convert to wasted */
399 			jeb->dirty_size -= waste;
400 			c->dirty_size -= waste;
401 			jeb->wasted_size += waste;
402 			c->wasted_size += waste;
403 
404 			jffs2_close_nextblock(c, jeb);
405 			jeb = NULL;
406 		}
407 	}
408 
409 	if (!jeb) {
410 
411 		ret = jffs2_find_nextblock(c);
412 		if (ret)
413 			return ret;
414 
415 		jeb = c->nextblock;
416 
417 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
418 			pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
419 				jeb->offset, jeb->free_size);
420 			goto restart;
421 		}
422 	}
423 	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
424 	   enough space */
425 	*len = jeb->free_size - reserved_size;
426 
427 	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
428 	    !jeb->first_node->next_in_ino) {
429 		/* Only node in it beforehand was a CLEANMARKER node (we think).
430 		   So mark it obsolete now that there's going to be another node
431 		   in the block. This will reduce used_size to zero but We've
432 		   already set c->nextblock so that jffs2_mark_node_obsolete()
433 		   won't try to refile it to the dirty_list.
434 		*/
435 		spin_unlock(&c->erase_completion_lock);
436 		jffs2_mark_node_obsolete(c, jeb->first_node);
437 		spin_lock(&c->erase_completion_lock);
438 	}
439 
440 	jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
441 		  __func__,
442 		  *len, jeb->offset + (c->sector_size - jeb->free_size));
443 	return 0;
444 }
445 
446 /**
447  *	jffs2_add_physical_node_ref - add a physical node reference to the list
448  *	@c: superblock info
449  *	@new: new node reference to add
450  *	@len: length of this physical node
451  *
452  *	Should only be used to report nodes for which space has been allocated
453  *	by jffs2_reserve_space.
454  *
455  *	Must be called with the alloc_sem held.
456  */
457 
jffs2_add_physical_node_ref(struct jffs2_sb_info * c,uint32_t ofs,uint32_t len,struct jffs2_inode_cache * ic)458 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
459 						       uint32_t ofs, uint32_t len,
460 						       struct jffs2_inode_cache *ic)
461 {
462 	struct jffs2_eraseblock *jeb;
463 	struct jffs2_raw_node_ref *new;
464 
465 	jeb = &c->blocks[ofs / c->sector_size];
466 
467 	jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
468 		  __func__, ofs & ~3, ofs & 3, len);
469 #if 1
470 	/* Allow non-obsolete nodes only to be added at the end of c->nextblock,
471 	   if c->nextblock is set. Note that wbuf.c will file obsolete nodes
472 	   even after refiling c->nextblock */
473 	if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
474 	    && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
475 		pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
476 			ofs & ~3, ofs & 3);
477 		if (c->nextblock)
478 			pr_warn("nextblock 0x%08x", c->nextblock->offset);
479 		else
480 			pr_warn("No nextblock");
481 		pr_cont(", expected at %08x\n",
482 			jeb->offset + (c->sector_size - jeb->free_size));
483 		return ERR_PTR(-EINVAL);
484 	}
485 #endif
486 	spin_lock(&c->erase_completion_lock);
487 
488 	new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
489 
490 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
491 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
492 		jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
493 			  jeb->offset, jeb->free_size, jeb->dirty_size,
494 			  jeb->used_size);
495 		if (jffs2_wbuf_dirty(c)) {
496 			/* Flush the last write in the block if it's outstanding */
497 			spin_unlock(&c->erase_completion_lock);
498 			jffs2_flush_wbuf_pad(c);
499 			spin_lock(&c->erase_completion_lock);
500 		}
501 
502 		list_add_tail(&jeb->list, &c->clean_list);
503 		c->nextblock = NULL;
504 	}
505 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
506 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
507 
508 	spin_unlock(&c->erase_completion_lock);
509 
510 	return new;
511 }
512 
513 
jffs2_complete_reservation(struct jffs2_sb_info * c)514 void jffs2_complete_reservation(struct jffs2_sb_info *c)
515 {
516 	jffs2_dbg(1, "jffs2_complete_reservation()\n");
517 	spin_lock(&c->erase_completion_lock);
518 	jffs2_garbage_collect_trigger(c);
519 	spin_unlock(&c->erase_completion_lock);
520 	mutex_unlock(&c->alloc_sem);
521 }
522 
on_list(struct list_head * obj,struct list_head * head)523 static inline int on_list(struct list_head *obj, struct list_head *head)
524 {
525 	struct list_head *this;
526 
527 	list_for_each(this, head) {
528 		if (this == obj) {
529 			jffs2_dbg(1, "%p is on list at %p\n", obj, head);
530 			return 1;
531 
532 		}
533 	}
534 	return 0;
535 }
536 
jffs2_mark_node_obsolete(struct jffs2_sb_info * c,struct jffs2_raw_node_ref * ref)537 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
538 {
539 	struct jffs2_eraseblock *jeb;
540 	int blocknr;
541 	struct jffs2_unknown_node n;
542 	int ret, addedsize;
543 	size_t retlen;
544 	uint32_t freed_len;
545 
546 	if(unlikely(!ref)) {
547 		pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
548 		return;
549 	}
550 	if (ref_obsolete(ref)) {
551 		jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
552 			  __func__, ref_offset(ref));
553 		return;
554 	}
555 	blocknr = ref->flash_offset / c->sector_size;
556 	if (blocknr >= c->nr_blocks) {
557 		pr_notice("raw node at 0x%08x is off the end of device!\n",
558 			  ref->flash_offset);
559 		BUG();
560 	}
561 	jeb = &c->blocks[blocknr];
562 
563 	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
564 	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
565 		/* Hm. This may confuse static lock analysis. If any of the above
566 		   three conditions is false, we're going to return from this
567 		   function without actually obliterating any nodes or freeing
568 		   any jffs2_raw_node_refs. So we don't need to stop erases from
569 		   happening, or protect against people holding an obsolete
570 		   jffs2_raw_node_ref without the erase_completion_lock. */
571 		mutex_lock(&c->erase_free_sem);
572 	}
573 
574 	spin_lock(&c->erase_completion_lock);
575 
576 	freed_len = ref_totlen(c, jeb, ref);
577 
578 	if (ref_flags(ref) == REF_UNCHECKED) {
579 		D1(if (unlikely(jeb->unchecked_size < freed_len)) {
580 				pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
581 					  freed_len, blocknr,
582 					  ref->flash_offset, jeb->used_size);
583 			BUG();
584 		})
585 			jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
586 				  ref_offset(ref), freed_len);
587 		jeb->unchecked_size -= freed_len;
588 		c->unchecked_size -= freed_len;
589 	} else {
590 		D1(if (unlikely(jeb->used_size < freed_len)) {
591 				pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
592 					  freed_len, blocknr,
593 					  ref->flash_offset, jeb->used_size);
594 			BUG();
595 		})
596 			jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
597 				  ref_offset(ref), freed_len);
598 		jeb->used_size -= freed_len;
599 		c->used_size -= freed_len;
600 	}
601 
602 	// Take care, that wasted size is taken into concern
603 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
604 		jffs2_dbg(1, "Dirtying\n");
605 		addedsize = freed_len;
606 		jeb->dirty_size += freed_len;
607 		c->dirty_size += freed_len;
608 
609 		/* Convert wasted space to dirty, if not a bad block */
610 		if (jeb->wasted_size) {
611 			if (on_list(&jeb->list, &c->bad_used_list)) {
612 				jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
613 					  jeb->offset);
614 				addedsize = 0; /* To fool the refiling code later */
615 			} else {
616 				jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
617 					  jeb->wasted_size, jeb->offset);
618 				addedsize += jeb->wasted_size;
619 				jeb->dirty_size += jeb->wasted_size;
620 				c->dirty_size += jeb->wasted_size;
621 				c->wasted_size -= jeb->wasted_size;
622 				jeb->wasted_size = 0;
623 			}
624 		}
625 	} else {
626 		jffs2_dbg(1, "Wasting\n");
627 		addedsize = 0;
628 		jeb->wasted_size += freed_len;
629 		c->wasted_size += freed_len;
630 	}
631 	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
632 
633 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
634 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
635 
636 	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
637 		/* Flash scanning is in progress. Don't muck about with the block
638 		   lists because they're not ready yet, and don't actually
639 		   obliterate nodes that look obsolete. If they weren't
640 		   marked obsolete on the flash at the time they _became_
641 		   obsolete, there was probably a reason for that. */
642 		spin_unlock(&c->erase_completion_lock);
643 		/* We didn't lock the erase_free_sem */
644 		return;
645 	}
646 
647 	if (jeb == c->nextblock) {
648 		jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
649 			  jeb->offset);
650 	} else if (!jeb->used_size && !jeb->unchecked_size) {
651 		if (jeb == c->gcblock) {
652 			jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
653 				  jeb->offset);
654 			c->gcblock = NULL;
655 		} else {
656 			jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
657 				  jeb->offset);
658 			list_del(&jeb->list);
659 		}
660 		if (jffs2_wbuf_dirty(c)) {
661 			jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
662 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
663 		} else {
664 			if (jiffies & 127) {
665 				/* Most of the time, we just erase it immediately. Otherwise we
666 				   spend ages scanning it on mount, etc. */
667 				jffs2_dbg(1, "...and adding to erase_pending_list\n");
668 				list_add_tail(&jeb->list, &c->erase_pending_list);
669 				c->nr_erasing_blocks++;
670 				jffs2_garbage_collect_trigger(c);
671 			} else {
672 				/* Sometimes, however, we leave it elsewhere so it doesn't get
673 				   immediately reused, and we spread the load a bit. */
674 				jffs2_dbg(1, "...and adding to erasable_list\n");
675 				list_add_tail(&jeb->list, &c->erasable_list);
676 			}
677 		}
678 		jffs2_dbg(1, "Done OK\n");
679 	} else if (jeb == c->gcblock) {
680 		jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
681 			  jeb->offset);
682 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
683 		jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
684 			  jeb->offset);
685 		list_del(&jeb->list);
686 		jffs2_dbg(1, "...and adding to dirty_list\n");
687 		list_add_tail(&jeb->list, &c->dirty_list);
688 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
689 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
690 		jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
691 			  jeb->offset);
692 		list_del(&jeb->list);
693 		jffs2_dbg(1, "...and adding to very_dirty_list\n");
694 		list_add_tail(&jeb->list, &c->very_dirty_list);
695 	} else {
696 		jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
697 			  jeb->offset, jeb->free_size, jeb->dirty_size,
698 			  jeb->used_size);
699 	}
700 
701 	spin_unlock(&c->erase_completion_lock);
702 
703 	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
704 		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
705 		/* We didn't lock the erase_free_sem */
706 		return;
707 	}
708 
709 	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
710 	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
711 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
712 	   by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
713 
714 	jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
715 		  ref_offset(ref));
716 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
717 	if (ret) {
718 		pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
719 			ref_offset(ref), ret);
720 		goto out_erase_sem;
721 	}
722 	if (retlen != sizeof(n)) {
723 		pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
724 			ref_offset(ref), retlen);
725 		goto out_erase_sem;
726 	}
727 	if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
728 		pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
729 			je32_to_cpu(n.totlen), freed_len);
730 		goto out_erase_sem;
731 	}
732 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
733 		jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
734 			  ref_offset(ref), je16_to_cpu(n.nodetype));
735 		goto out_erase_sem;
736 	}
737 	/* XXX FIXME: This is ugly now */
738 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
739 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
740 	if (ret) {
741 		pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
742 			ref_offset(ref), ret);
743 		goto out_erase_sem;
744 	}
745 	if (retlen != sizeof(n)) {
746 		pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
747 			ref_offset(ref), retlen);
748 		goto out_erase_sem;
749 	}
750 
751 	/* Nodes which have been marked obsolete no longer need to be
752 	   associated with any inode. Remove them from the per-inode list.
753 
754 	   Note we can't do this for NAND at the moment because we need
755 	   obsolete dirent nodes to stay on the lists, because of the
756 	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
757 	   because we delete the inocache, and on NAND we need that to
758 	   stay around until all the nodes are actually erased, in order
759 	   to stop us from giving the same inode number to another newly
760 	   created inode. */
761 	if (ref->next_in_ino) {
762 		struct jffs2_inode_cache *ic;
763 		struct jffs2_raw_node_ref **p;
764 
765 		spin_lock(&c->erase_completion_lock);
766 
767 		ic = jffs2_raw_ref_to_ic(ref);
768 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
769 			;
770 
771 		*p = ref->next_in_ino;
772 		ref->next_in_ino = NULL;
773 
774 		switch (ic->class) {
775 #ifdef CONFIG_JFFS2_FS_XATTR
776 			case RAWNODE_CLASS_XATTR_DATUM:
777 				jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
778 				break;
779 			case RAWNODE_CLASS_XATTR_REF:
780 				jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
781 				break;
782 #endif
783 			default:
784 				if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
785 					jffs2_del_ino_cache(c, ic);
786 				break;
787 		}
788 		spin_unlock(&c->erase_completion_lock);
789 	}
790 
791  out_erase_sem:
792 	mutex_unlock(&c->erase_free_sem);
793 }
794 
jffs2_thread_should_wake(struct jffs2_sb_info * c)795 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
796 {
797 	int ret = 0;
798 	uint32_t dirty;
799 	int nr_very_dirty = 0;
800 	struct jffs2_eraseblock *jeb;
801 
802 	if (!list_empty(&c->erase_complete_list) ||
803 	    !list_empty(&c->erase_pending_list))
804 		return 1;
805 
806 	if (c->unchecked_size) {
807 		jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
808 			  c->unchecked_size, c->checked_ino);
809 		return 1;
810 	}
811 
812 	/* dirty_size contains blocks on erase_pending_list
813 	 * those blocks are counted in c->nr_erasing_blocks.
814 	 * If one block is actually erased, it is not longer counted as dirty_space
815 	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
816 	 * with c->nr_erasing_blocks * c->sector_size again.
817 	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
818 	 * This helps us to force gc and pick eventually a clean block to spread the load.
819 	 */
820 	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
821 
822 	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
823 			(dirty > c->nospc_dirty_size))
824 		ret = 1;
825 
826 	list_for_each_entry(jeb, &c->very_dirty_list, list) {
827 		nr_very_dirty++;
828 		if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
829 			ret = 1;
830 			/* In debug mode, actually go through and count them all */
831 			D1(continue);
832 			break;
833 		}
834 	}
835 
836 	jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
837 		  __func__, c->nr_free_blocks, c->nr_erasing_blocks,
838 		  c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
839 
840 	return ret;
841 }
842