1 /*
2  * @ubi: UBI device description object
3  * Copyright (c) International Business Machines Corp., 2006
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13  * the GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18  *
19  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
20  */
21 
22 /*
23  * UBI wear-leveling sub-system.
24  *
25  * This sub-system is responsible for wear-leveling. It works in terms of
26  * physical eraseblocks and erase counters and knows nothing about logical
27  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
28  * eraseblocks are of two types - used and free. Used physical eraseblocks are
29  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
30  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
31  *
32  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
33  * header. The rest of the physical eraseblock contains only %0xFF bytes.
34  *
35  * When physical eraseblocks are returned to the WL sub-system by means of the
36  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
37  * done asynchronously in context of the per-UBI device background thread,
38  * which is also managed by the WL sub-system.
39  *
40  * The wear-leveling is ensured by means of moving the contents of used
41  * physical eraseblocks with low erase counter to free physical eraseblocks
42  * with high erase counter.
43  *
44  * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
45  * an "optimal" physical eraseblock. For example, when it is known that the
46  * physical eraseblock will be "put" soon because it contains short-term data,
47  * the WL sub-system may pick a free physical eraseblock with low erase
48  * counter, and so forth.
49  *
50  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
51  * bad.
52  *
53  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
54  * in a physical eraseblock, it has to be moved. Technically this is the same
55  * as moving it for wear-leveling reasons.
56  *
57  * As it was said, for the UBI sub-system all physical eraseblocks are either
58  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
59  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
60  * RB-trees, as well as (temporarily) in the @wl->pq queue.
61  *
62  * When the WL sub-system returns a physical eraseblock, the physical
63  * eraseblock is protected from being moved for some "time". For this reason,
64  * the physical eraseblock is not directly moved from the @wl->free tree to the
65  * @wl->used tree. There is a protection queue in between where this
66  * physical eraseblock is temporarily stored (@wl->pq).
67  *
68  * All this protection stuff is needed because:
69  *  o we don't want to move physical eraseblocks just after we have given them
70  *    to the user; instead, we first want to let users fill them up with data;
71  *
72  *  o there is a chance that the user will put the physical eraseblock very
73  *    soon, so it makes sense not to move it for some time, but wait; this is
74  *    especially important in case of "short term" physical eraseblocks.
75  *
76  * Physical eraseblocks stay protected only for limited time. But the "time" is
77  * measured in erase cycles in this case. This is implemented with help of the
78  * protection queue. Eraseblocks are put to the tail of this queue when they
79  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
80  * head of the queue on each erase operation (for any eraseblock). So the
81  * length of the queue defines how may (global) erase cycles PEBs are protected.
82  *
83  * To put it differently, each physical eraseblock has 2 main states: free and
84  * used. The former state corresponds to the @wl->free tree. The latter state
85  * is split up on several sub-states:
86  * o the WL movement is allowed (@wl->used tree);
87  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
88  *   erroneous - e.g., there was a read error;
89  * o the WL movement is temporarily prohibited (@wl->pq queue);
90  * o scrubbing is needed (@wl->scrub tree).
91  *
92  * Depending on the sub-state, wear-leveling entries of the used physical
93  * eraseblocks may be kept in one of those structures.
94  *
95  * Note, in this implementation, we keep a small in-RAM object for each physical
96  * eraseblock. This is surely not a scalable solution. But it appears to be good
97  * enough for moderately large flashes and it is simple. In future, one may
98  * re-work this sub-system and make it more scalable.
99  *
100  * At the moment this sub-system does not utilize the sequence number, which
101  * was introduced relatively recently. But it would be wise to do this because
102  * the sequence number of a logical eraseblock characterizes how old is it. For
103  * example, when we move a PEB with low erase counter, and we need to pick the
104  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
105  * pick target PEB with an average EC if our PEB is not very "old". This is a
106  * room for future re-works of the WL sub-system.
107  */
108 
109 #include <linux/slab.h>
110 #include <linux/crc32.h>
111 #include <linux/freezer.h>
112 #include <linux/kthread.h>
113 #include "ubi.h"
114 
115 /* Number of physical eraseblocks reserved for wear-leveling purposes */
116 #define WL_RESERVED_PEBS 1
117 
118 /*
119  * Maximum difference between two erase counters. If this threshold is
120  * exceeded, the WL sub-system starts moving data from used physical
121  * eraseblocks with low erase counter to free physical eraseblocks with high
122  * erase counter.
123  */
124 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
125 
126 /*
127  * When a physical eraseblock is moved, the WL sub-system has to pick the target
128  * physical eraseblock to move to. The simplest way would be just to pick the
129  * one with the highest erase counter. But in certain workloads this could lead
130  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
131  * situation when the picked physical eraseblock is constantly erased after the
132  * data is written to it. So, we have a constant which limits the highest erase
133  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
134  * does not pick eraseblocks with erase counter greater than the lowest erase
135  * counter plus %WL_FREE_MAX_DIFF.
136  */
137 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
138 
139 /*
140  * Maximum number of consecutive background thread failures which is enough to
141  * switch to read-only mode.
142  */
143 #define WL_MAX_FAILURES 32
144 
145 /**
146  * struct ubi_work - UBI work description data structure.
147  * @list: a link in the list of pending works
148  * @func: worker function
149  * @e: physical eraseblock to erase
150  * @torture: if the physical eraseblock has to be tortured
151  *
152  * The @func pointer points to the worker function. If the @cancel argument is
153  * not zero, the worker has to free the resources and exit immediately. The
154  * worker has to return zero in case of success and a negative error code in
155  * case of failure.
156  */
157 struct ubi_work {
158 	struct list_head list;
159 	int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
160 	/* The below fields are only relevant to erasure works */
161 	struct ubi_wl_entry *e;
162 	int torture;
163 };
164 
165 #ifdef CONFIG_MTD_UBI_DEBUG
166 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
167 static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
168 				     struct ubi_wl_entry *e,
169 				     struct rb_root *root);
170 static int paranoid_check_in_pq(const struct ubi_device *ubi,
171 				struct ubi_wl_entry *e);
172 #else
173 #define paranoid_check_ec(ubi, pnum, ec) 0
174 #define paranoid_check_in_wl_tree(ubi, e, root)
175 #define paranoid_check_in_pq(ubi, e) 0
176 #endif
177 
178 /**
179  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
180  * @e: the wear-leveling entry to add
181  * @root: the root of the tree
182  *
183  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
184  * the @ubi->used and @ubi->free RB-trees.
185  */
wl_tree_add(struct ubi_wl_entry * e,struct rb_root * root)186 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
187 {
188 	struct rb_node **p, *parent = NULL;
189 
190 	p = &root->rb_node;
191 	while (*p) {
192 		struct ubi_wl_entry *e1;
193 
194 		parent = *p;
195 		e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
196 
197 		if (e->ec < e1->ec)
198 			p = &(*p)->rb_left;
199 		else if (e->ec > e1->ec)
200 			p = &(*p)->rb_right;
201 		else {
202 			ubi_assert(e->pnum != e1->pnum);
203 			if (e->pnum < e1->pnum)
204 				p = &(*p)->rb_left;
205 			else
206 				p = &(*p)->rb_right;
207 		}
208 	}
209 
210 	rb_link_node(&e->u.rb, parent, p);
211 	rb_insert_color(&e->u.rb, root);
212 }
213 
214 /**
215  * do_work - do one pending work.
216  * @ubi: UBI device description object
217  *
218  * This function returns zero in case of success and a negative error code in
219  * case of failure.
220  */
do_work(struct ubi_device * ubi)221 static int do_work(struct ubi_device *ubi)
222 {
223 	int err;
224 	struct ubi_work *wrk;
225 
226 	cond_resched();
227 
228 	/*
229 	 * @ubi->work_sem is used to synchronize with the workers. Workers take
230 	 * it in read mode, so many of them may be doing works at a time. But
231 	 * the queue flush code has to be sure the whole queue of works is
232 	 * done, and it takes the mutex in write mode.
233 	 */
234 	down_read(&ubi->work_sem);
235 	spin_lock(&ubi->wl_lock);
236 	if (list_empty(&ubi->works)) {
237 		spin_unlock(&ubi->wl_lock);
238 		up_read(&ubi->work_sem);
239 		return 0;
240 	}
241 
242 	wrk = list_entry(ubi->works.next, struct ubi_work, list);
243 	list_del(&wrk->list);
244 	ubi->works_count -= 1;
245 	ubi_assert(ubi->works_count >= 0);
246 	spin_unlock(&ubi->wl_lock);
247 
248 	/*
249 	 * Call the worker function. Do not touch the work structure
250 	 * after this call as it will have been freed or reused by that
251 	 * time by the worker function.
252 	 */
253 	err = wrk->func(ubi, wrk, 0);
254 	if (err)
255 		ubi_err("work failed with error code %d", err);
256 	up_read(&ubi->work_sem);
257 
258 	return err;
259 }
260 
261 /**
262  * produce_free_peb - produce a free physical eraseblock.
263  * @ubi: UBI device description object
264  *
265  * This function tries to make a free PEB by means of synchronous execution of
266  * pending works. This may be needed if, for example the background thread is
267  * disabled. Returns zero in case of success and a negative error code in case
268  * of failure.
269  */
produce_free_peb(struct ubi_device * ubi)270 static int produce_free_peb(struct ubi_device *ubi)
271 {
272 	int err;
273 
274 	spin_lock(&ubi->wl_lock);
275 	while (!ubi->free.rb_node) {
276 		spin_unlock(&ubi->wl_lock);
277 
278 		dbg_wl("do one work synchronously");
279 		err = do_work(ubi);
280 		if (err)
281 			return err;
282 
283 		spin_lock(&ubi->wl_lock);
284 	}
285 	spin_unlock(&ubi->wl_lock);
286 
287 	return 0;
288 }
289 
290 /**
291  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
292  * @e: the wear-leveling entry to check
293  * @root: the root of the tree
294  *
295  * This function returns non-zero if @e is in the @root RB-tree and zero if it
296  * is not.
297  */
in_wl_tree(struct ubi_wl_entry * e,struct rb_root * root)298 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
299 {
300 	struct rb_node *p;
301 
302 	p = root->rb_node;
303 	while (p) {
304 		struct ubi_wl_entry *e1;
305 
306 		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
307 
308 		if (e->pnum == e1->pnum) {
309 			ubi_assert(e == e1);
310 			return 1;
311 		}
312 
313 		if (e->ec < e1->ec)
314 			p = p->rb_left;
315 		else if (e->ec > e1->ec)
316 			p = p->rb_right;
317 		else {
318 			ubi_assert(e->pnum != e1->pnum);
319 			if (e->pnum < e1->pnum)
320 				p = p->rb_left;
321 			else
322 				p = p->rb_right;
323 		}
324 	}
325 
326 	return 0;
327 }
328 
329 /**
330  * prot_queue_add - add physical eraseblock to the protection queue.
331  * @ubi: UBI device description object
332  * @e: the physical eraseblock to add
333  *
334  * This function adds @e to the tail of the protection queue @ubi->pq, where
335  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
336  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
337  * be locked.
338  */
prot_queue_add(struct ubi_device * ubi,struct ubi_wl_entry * e)339 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
340 {
341 	int pq_tail = ubi->pq_head - 1;
342 
343 	if (pq_tail < 0)
344 		pq_tail = UBI_PROT_QUEUE_LEN - 1;
345 	ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
346 	list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
347 	dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
348 }
349 
350 /**
351  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
352  * @root: the RB-tree where to look for
353  * @diff: maximum possible difference from the smallest erase counter
354  *
355  * This function looks for a wear leveling entry with erase counter closest to
356  * min + @diff, where min is the smallest erase counter.
357  */
find_wl_entry(struct rb_root * root,int diff)358 static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff)
359 {
360 	struct rb_node *p;
361 	struct ubi_wl_entry *e;
362 	int max;
363 
364 	e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
365 	max = e->ec + diff;
366 
367 	p = root->rb_node;
368 	while (p) {
369 		struct ubi_wl_entry *e1;
370 
371 		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
372 		if (e1->ec >= max)
373 			p = p->rb_left;
374 		else {
375 			p = p->rb_right;
376 			e = e1;
377 		}
378 	}
379 
380 	return e;
381 }
382 
383 /**
384  * ubi_wl_get_peb - get a physical eraseblock.
385  * @ubi: UBI device description object
386  * @dtype: type of data which will be stored in this physical eraseblock
387  *
388  * This function returns a physical eraseblock in case of success and a
389  * negative error code in case of failure. Might sleep.
390  */
ubi_wl_get_peb(struct ubi_device * ubi,int dtype)391 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
392 {
393 	int err;
394 	struct ubi_wl_entry *e, *first, *last;
395 
396 	ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
397 		   dtype == UBI_UNKNOWN);
398 
399 retry:
400 	spin_lock(&ubi->wl_lock);
401 	if (!ubi->free.rb_node) {
402 		if (ubi->works_count == 0) {
403 			ubi_assert(list_empty(&ubi->works));
404 			ubi_err("no free eraseblocks");
405 			spin_unlock(&ubi->wl_lock);
406 			return -ENOSPC;
407 		}
408 		spin_unlock(&ubi->wl_lock);
409 
410 		err = produce_free_peb(ubi);
411 		if (err < 0)
412 			return err;
413 		goto retry;
414 	}
415 
416 	switch (dtype) {
417 	case UBI_LONGTERM:
418 		/*
419 		 * For long term data we pick a physical eraseblock with high
420 		 * erase counter. But the highest erase counter we can pick is
421 		 * bounded by the the lowest erase counter plus
422 		 * %WL_FREE_MAX_DIFF.
423 		 */
424 		e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
425 		break;
426 	case UBI_UNKNOWN:
427 		/*
428 		 * For unknown data we pick a physical eraseblock with medium
429 		 * erase counter. But we by no means can pick a physical
430 		 * eraseblock with erase counter greater or equivalent than the
431 		 * lowest erase counter plus %WL_FREE_MAX_DIFF/2.
432 		 */
433 		first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
434 					u.rb);
435 		last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
436 
437 		if (last->ec - first->ec < WL_FREE_MAX_DIFF)
438 			e = rb_entry(ubi->free.rb_node,
439 					struct ubi_wl_entry, u.rb);
440 		else
441 			e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2);
442 		break;
443 	case UBI_SHORTTERM:
444 		/*
445 		 * For short term data we pick a physical eraseblock with the
446 		 * lowest erase counter as we expect it will be erased soon.
447 		 */
448 		e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
449 		break;
450 	default:
451 		BUG();
452 	}
453 
454 	paranoid_check_in_wl_tree(ubi, e, &ubi->free);
455 
456 	/*
457 	 * Move the physical eraseblock to the protection queue where it will
458 	 * be protected from being moved for some time.
459 	 */
460 	rb_erase(&e->u.rb, &ubi->free);
461 	dbg_wl("PEB %d EC %d", e->pnum, e->ec);
462 	prot_queue_add(ubi, e);
463 	spin_unlock(&ubi->wl_lock);
464 
465 	err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
466 				   ubi->peb_size - ubi->vid_hdr_aloffset);
467 	if (err) {
468 		ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
469 		return err;
470 	}
471 
472 	return e->pnum;
473 }
474 
475 /**
476  * prot_queue_del - remove a physical eraseblock from the protection queue.
477  * @ubi: UBI device description object
478  * @pnum: the physical eraseblock to remove
479  *
480  * This function deletes PEB @pnum from the protection queue and returns zero
481  * in case of success and %-ENODEV if the PEB was not found.
482  */
prot_queue_del(struct ubi_device * ubi,int pnum)483 static int prot_queue_del(struct ubi_device *ubi, int pnum)
484 {
485 	struct ubi_wl_entry *e;
486 
487 	e = ubi->lookuptbl[pnum];
488 	if (!e)
489 		return -ENODEV;
490 
491 	if (paranoid_check_in_pq(ubi, e))
492 		return -ENODEV;
493 
494 	list_del(&e->u.list);
495 	dbg_wl("deleted PEB %d from the protection queue", e->pnum);
496 	return 0;
497 }
498 
499 /**
500  * sync_erase - synchronously erase a physical eraseblock.
501  * @ubi: UBI device description object
502  * @e: the the physical eraseblock to erase
503  * @torture: if the physical eraseblock has to be tortured
504  *
505  * This function returns zero in case of success and a negative error code in
506  * case of failure.
507  */
sync_erase(struct ubi_device * ubi,struct ubi_wl_entry * e,int torture)508 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
509 		      int torture)
510 {
511 	int err;
512 	struct ubi_ec_hdr *ec_hdr;
513 	unsigned long long ec = e->ec;
514 
515 	dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
516 
517 	err = paranoid_check_ec(ubi, e->pnum, e->ec);
518 	if (err)
519 		return -EINVAL;
520 
521 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
522 	if (!ec_hdr)
523 		return -ENOMEM;
524 
525 	err = ubi_io_sync_erase(ubi, e->pnum, torture);
526 	if (err < 0)
527 		goto out_free;
528 
529 	ec += err;
530 	if (ec > UBI_MAX_ERASECOUNTER) {
531 		/*
532 		 * Erase counter overflow. Upgrade UBI and use 64-bit
533 		 * erase counters internally.
534 		 */
535 		ubi_err("erase counter overflow at PEB %d, EC %llu",
536 			e->pnum, ec);
537 		err = -EINVAL;
538 		goto out_free;
539 	}
540 
541 	dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
542 
543 	ec_hdr->ec = cpu_to_be64(ec);
544 
545 	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
546 	if (err)
547 		goto out_free;
548 
549 	e->ec = ec;
550 	spin_lock(&ubi->wl_lock);
551 	if (e->ec > ubi->max_ec)
552 		ubi->max_ec = e->ec;
553 	spin_unlock(&ubi->wl_lock);
554 
555 out_free:
556 	kfree(ec_hdr);
557 	return err;
558 }
559 
560 /**
561  * serve_prot_queue - check if it is time to stop protecting PEBs.
562  * @ubi: UBI device description object
563  *
564  * This function is called after each erase operation and removes PEBs from the
565  * tail of the protection queue. These PEBs have been protected for long enough
566  * and should be moved to the used tree.
567  */
serve_prot_queue(struct ubi_device * ubi)568 static void serve_prot_queue(struct ubi_device *ubi)
569 {
570 	struct ubi_wl_entry *e, *tmp;
571 	int count;
572 
573 	/*
574 	 * There may be several protected physical eraseblock to remove,
575 	 * process them all.
576 	 */
577 repeat:
578 	count = 0;
579 	spin_lock(&ubi->wl_lock);
580 	list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
581 		dbg_wl("PEB %d EC %d protection over, move to used tree",
582 			e->pnum, e->ec);
583 
584 		list_del(&e->u.list);
585 		wl_tree_add(e, &ubi->used);
586 		if (count++ > 32) {
587 			/*
588 			 * Let's be nice and avoid holding the spinlock for
589 			 * too long.
590 			 */
591 			spin_unlock(&ubi->wl_lock);
592 			cond_resched();
593 			goto repeat;
594 		}
595 	}
596 
597 	ubi->pq_head += 1;
598 	if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
599 		ubi->pq_head = 0;
600 	ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
601 	spin_unlock(&ubi->wl_lock);
602 }
603 
604 /**
605  * schedule_ubi_work - schedule a work.
606  * @ubi: UBI device description object
607  * @wrk: the work to schedule
608  *
609  * This function adds a work defined by @wrk to the tail of the pending works
610  * list.
611  */
schedule_ubi_work(struct ubi_device * ubi,struct ubi_work * wrk)612 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
613 {
614 	spin_lock(&ubi->wl_lock);
615 	list_add_tail(&wrk->list, &ubi->works);
616 	ubi_assert(ubi->works_count >= 0);
617 	ubi->works_count += 1;
618 	if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
619 		wake_up_process(ubi->bgt_thread);
620 	spin_unlock(&ubi->wl_lock);
621 }
622 
623 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
624 			int cancel);
625 
626 /**
627  * schedule_erase - schedule an erase work.
628  * @ubi: UBI device description object
629  * @e: the WL entry of the physical eraseblock to erase
630  * @torture: if the physical eraseblock has to be tortured
631  *
632  * This function returns zero in case of success and a %-ENOMEM in case of
633  * failure.
634  */
schedule_erase(struct ubi_device * ubi,struct ubi_wl_entry * e,int torture)635 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
636 			  int torture)
637 {
638 	struct ubi_work *wl_wrk;
639 
640 	dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
641 	       e->pnum, e->ec, torture);
642 
643 	wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
644 	if (!wl_wrk)
645 		return -ENOMEM;
646 
647 	wl_wrk->func = &erase_worker;
648 	wl_wrk->e = e;
649 	wl_wrk->torture = torture;
650 
651 	schedule_ubi_work(ubi, wl_wrk);
652 	return 0;
653 }
654 
655 /**
656  * wear_leveling_worker - wear-leveling worker function.
657  * @ubi: UBI device description object
658  * @wrk: the work object
659  * @cancel: non-zero if the worker has to free memory and exit
660  *
661  * This function copies a more worn out physical eraseblock to a less worn out
662  * one. Returns zero in case of success and a negative error code in case of
663  * failure.
664  */
wear_leveling_worker(struct ubi_device * ubi,struct ubi_work * wrk,int cancel)665 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
666 				int cancel)
667 {
668 	int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
669 	int vol_id = -1, uninitialized_var(lnum);
670 	struct ubi_wl_entry *e1, *e2;
671 	struct ubi_vid_hdr *vid_hdr;
672 
673 	kfree(wrk);
674 	if (cancel)
675 		return 0;
676 
677 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
678 	if (!vid_hdr)
679 		return -ENOMEM;
680 
681 	mutex_lock(&ubi->move_mutex);
682 	spin_lock(&ubi->wl_lock);
683 	ubi_assert(!ubi->move_from && !ubi->move_to);
684 	ubi_assert(!ubi->move_to_put);
685 
686 	if (!ubi->free.rb_node ||
687 	    (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
688 		/*
689 		 * No free physical eraseblocks? Well, they must be waiting in
690 		 * the queue to be erased. Cancel movement - it will be
691 		 * triggered again when a free physical eraseblock appears.
692 		 *
693 		 * No used physical eraseblocks? They must be temporarily
694 		 * protected from being moved. They will be moved to the
695 		 * @ubi->used tree later and the wear-leveling will be
696 		 * triggered again.
697 		 */
698 		dbg_wl("cancel WL, a list is empty: free %d, used %d",
699 		       !ubi->free.rb_node, !ubi->used.rb_node);
700 		goto out_cancel;
701 	}
702 
703 	if (!ubi->scrub.rb_node) {
704 		/*
705 		 * Now pick the least worn-out used physical eraseblock and a
706 		 * highly worn-out free physical eraseblock. If the erase
707 		 * counters differ much enough, start wear-leveling.
708 		 */
709 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
710 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
711 
712 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
713 			dbg_wl("no WL needed: min used EC %d, max free EC %d",
714 			       e1->ec, e2->ec);
715 			goto out_cancel;
716 		}
717 		paranoid_check_in_wl_tree(ubi, e1, &ubi->used);
718 		rb_erase(&e1->u.rb, &ubi->used);
719 		dbg_wl("move PEB %d EC %d to PEB %d EC %d",
720 		       e1->pnum, e1->ec, e2->pnum, e2->ec);
721 	} else {
722 		/* Perform scrubbing */
723 		scrubbing = 1;
724 		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
725 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
726 		paranoid_check_in_wl_tree(ubi, e1, &ubi->scrub);
727 		rb_erase(&e1->u.rb, &ubi->scrub);
728 		dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
729 	}
730 
731 	paranoid_check_in_wl_tree(ubi, e2, &ubi->free);
732 	rb_erase(&e2->u.rb, &ubi->free);
733 	ubi->move_from = e1;
734 	ubi->move_to = e2;
735 	spin_unlock(&ubi->wl_lock);
736 
737 	/*
738 	 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
739 	 * We so far do not know which logical eraseblock our physical
740 	 * eraseblock (@e1) belongs to. We have to read the volume identifier
741 	 * header first.
742 	 *
743 	 * Note, we are protected from this PEB being unmapped and erased. The
744 	 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
745 	 * which is being moved was unmapped.
746 	 */
747 
748 	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
749 	if (err && err != UBI_IO_BITFLIPS) {
750 		if (err == UBI_IO_FF) {
751 			/*
752 			 * We are trying to move PEB without a VID header. UBI
753 			 * always write VID headers shortly after the PEB was
754 			 * given, so we have a situation when it has not yet
755 			 * had a chance to write it, because it was preempted.
756 			 * So add this PEB to the protection queue so far,
757 			 * because presumably more data will be written there
758 			 * (including the missing VID header), and then we'll
759 			 * move it.
760 			 */
761 			dbg_wl("PEB %d has no VID header", e1->pnum);
762 			protect = 1;
763 			goto out_not_moved;
764 		} else if (err == UBI_IO_FF_BITFLIPS) {
765 			/*
766 			 * The same situation as %UBI_IO_FF, but bit-flips were
767 			 * detected. It is better to schedule this PEB for
768 			 * scrubbing.
769 			 */
770 			dbg_wl("PEB %d has no VID header but has bit-flips",
771 			       e1->pnum);
772 			scrubbing = 1;
773 			goto out_not_moved;
774 		}
775 
776 		ubi_err("error %d while reading VID header from PEB %d",
777 			err, e1->pnum);
778 		goto out_error;
779 	}
780 
781 	vol_id = be32_to_cpu(vid_hdr->vol_id);
782 	lnum = be32_to_cpu(vid_hdr->lnum);
783 
784 	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
785 	if (err) {
786 		if (err == MOVE_CANCEL_RACE) {
787 			/*
788 			 * The LEB has not been moved because the volume is
789 			 * being deleted or the PEB has been put meanwhile. We
790 			 * should prevent this PEB from being selected for
791 			 * wear-leveling movement again, so put it to the
792 			 * protection queue.
793 			 */
794 			protect = 1;
795 			goto out_not_moved;
796 		}
797 		if (err == MOVE_RETRY) {
798 			scrubbing = 1;
799 			goto out_not_moved;
800 		}
801 		if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
802 		    err == MOVE_TARGET_RD_ERR) {
803 			/*
804 			 * Target PEB had bit-flips or write error - torture it.
805 			 */
806 			torture = 1;
807 			goto out_not_moved;
808 		}
809 
810 		if (err == MOVE_SOURCE_RD_ERR) {
811 			/*
812 			 * An error happened while reading the source PEB. Do
813 			 * not switch to R/O mode in this case, and give the
814 			 * upper layers a possibility to recover from this,
815 			 * e.g. by unmapping corresponding LEB. Instead, just
816 			 * put this PEB to the @ubi->erroneous list to prevent
817 			 * UBI from trying to move it over and over again.
818 			 */
819 			if (ubi->erroneous_peb_count > ubi->max_erroneous) {
820 				ubi_err("too many erroneous eraseblocks (%d)",
821 					ubi->erroneous_peb_count);
822 				goto out_error;
823 			}
824 			erroneous = 1;
825 			goto out_not_moved;
826 		}
827 
828 		if (err < 0)
829 			goto out_error;
830 
831 		ubi_assert(0);
832 	}
833 
834 	/* The PEB has been successfully moved */
835 	if (scrubbing)
836 		ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
837 			e1->pnum, vol_id, lnum, e2->pnum);
838 	ubi_free_vid_hdr(ubi, vid_hdr);
839 
840 	spin_lock(&ubi->wl_lock);
841 	if (!ubi->move_to_put) {
842 		wl_tree_add(e2, &ubi->used);
843 		e2 = NULL;
844 	}
845 	ubi->move_from = ubi->move_to = NULL;
846 	ubi->move_to_put = ubi->wl_scheduled = 0;
847 	spin_unlock(&ubi->wl_lock);
848 
849 	err = schedule_erase(ubi, e1, 0);
850 	if (err) {
851 		kmem_cache_free(ubi_wl_entry_slab, e1);
852 		if (e2)
853 			kmem_cache_free(ubi_wl_entry_slab, e2);
854 		goto out_ro;
855 	}
856 
857 	if (e2) {
858 		/*
859 		 * Well, the target PEB was put meanwhile, schedule it for
860 		 * erasure.
861 		 */
862 		dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
863 		       e2->pnum, vol_id, lnum);
864 		err = schedule_erase(ubi, e2, 0);
865 		if (err) {
866 			kmem_cache_free(ubi_wl_entry_slab, e2);
867 			goto out_ro;
868 		}
869 	}
870 
871 	dbg_wl("done");
872 	mutex_unlock(&ubi->move_mutex);
873 	return 0;
874 
875 	/*
876 	 * For some reasons the LEB was not moved, might be an error, might be
877 	 * something else. @e1 was not changed, so return it back. @e2 might
878 	 * have been changed, schedule it for erasure.
879 	 */
880 out_not_moved:
881 	if (vol_id != -1)
882 		dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
883 		       e1->pnum, vol_id, lnum, e2->pnum, err);
884 	else
885 		dbg_wl("cancel moving PEB %d to PEB %d (%d)",
886 		       e1->pnum, e2->pnum, err);
887 	spin_lock(&ubi->wl_lock);
888 	if (protect)
889 		prot_queue_add(ubi, e1);
890 	else if (erroneous) {
891 		wl_tree_add(e1, &ubi->erroneous);
892 		ubi->erroneous_peb_count += 1;
893 	} else if (scrubbing)
894 		wl_tree_add(e1, &ubi->scrub);
895 	else
896 		wl_tree_add(e1, &ubi->used);
897 	ubi_assert(!ubi->move_to_put);
898 	ubi->move_from = ubi->move_to = NULL;
899 	ubi->wl_scheduled = 0;
900 	spin_unlock(&ubi->wl_lock);
901 
902 	ubi_free_vid_hdr(ubi, vid_hdr);
903 	err = schedule_erase(ubi, e2, torture);
904 	if (err) {
905 		kmem_cache_free(ubi_wl_entry_slab, e2);
906 		goto out_ro;
907 	}
908 	mutex_unlock(&ubi->move_mutex);
909 	return 0;
910 
911 out_error:
912 	if (vol_id != -1)
913 		ubi_err("error %d while moving PEB %d to PEB %d",
914 			err, e1->pnum, e2->pnum);
915 	else
916 		ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
917 			err, e1->pnum, vol_id, lnum, e2->pnum);
918 	spin_lock(&ubi->wl_lock);
919 	ubi->move_from = ubi->move_to = NULL;
920 	ubi->move_to_put = ubi->wl_scheduled = 0;
921 	spin_unlock(&ubi->wl_lock);
922 
923 	ubi_free_vid_hdr(ubi, vid_hdr);
924 	kmem_cache_free(ubi_wl_entry_slab, e1);
925 	kmem_cache_free(ubi_wl_entry_slab, e2);
926 
927 out_ro:
928 	ubi_ro_mode(ubi);
929 	mutex_unlock(&ubi->move_mutex);
930 	ubi_assert(err != 0);
931 	return err < 0 ? err : -EIO;
932 
933 out_cancel:
934 	ubi->wl_scheduled = 0;
935 	spin_unlock(&ubi->wl_lock);
936 	mutex_unlock(&ubi->move_mutex);
937 	ubi_free_vid_hdr(ubi, vid_hdr);
938 	return 0;
939 }
940 
941 /**
942  * ensure_wear_leveling - schedule wear-leveling if it is needed.
943  * @ubi: UBI device description object
944  *
945  * This function checks if it is time to start wear-leveling and schedules it
946  * if yes. This function returns zero in case of success and a negative error
947  * code in case of failure.
948  */
ensure_wear_leveling(struct ubi_device * ubi)949 static int ensure_wear_leveling(struct ubi_device *ubi)
950 {
951 	int err = 0;
952 	struct ubi_wl_entry *e1;
953 	struct ubi_wl_entry *e2;
954 	struct ubi_work *wrk;
955 
956 	spin_lock(&ubi->wl_lock);
957 	if (ubi->wl_scheduled)
958 		/* Wear-leveling is already in the work queue */
959 		goto out_unlock;
960 
961 	/*
962 	 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
963 	 * the WL worker has to be scheduled anyway.
964 	 */
965 	if (!ubi->scrub.rb_node) {
966 		if (!ubi->used.rb_node || !ubi->free.rb_node)
967 			/* No physical eraseblocks - no deal */
968 			goto out_unlock;
969 
970 		/*
971 		 * We schedule wear-leveling only if the difference between the
972 		 * lowest erase counter of used physical eraseblocks and a high
973 		 * erase counter of free physical eraseblocks is greater than
974 		 * %UBI_WL_THRESHOLD.
975 		 */
976 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
977 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
978 
979 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
980 			goto out_unlock;
981 		dbg_wl("schedule wear-leveling");
982 	} else
983 		dbg_wl("schedule scrubbing");
984 
985 	ubi->wl_scheduled = 1;
986 	spin_unlock(&ubi->wl_lock);
987 
988 	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
989 	if (!wrk) {
990 		err = -ENOMEM;
991 		goto out_cancel;
992 	}
993 
994 	wrk->func = &wear_leveling_worker;
995 	schedule_ubi_work(ubi, wrk);
996 	return err;
997 
998 out_cancel:
999 	spin_lock(&ubi->wl_lock);
1000 	ubi->wl_scheduled = 0;
1001 out_unlock:
1002 	spin_unlock(&ubi->wl_lock);
1003 	return err;
1004 }
1005 
1006 /**
1007  * erase_worker - physical eraseblock erase worker function.
1008  * @ubi: UBI device description object
1009  * @wl_wrk: the work object
1010  * @cancel: non-zero if the worker has to free memory and exit
1011  *
1012  * This function erases a physical eraseblock and perform torture testing if
1013  * needed. It also takes care about marking the physical eraseblock bad if
1014  * needed. Returns zero in case of success and a negative error code in case of
1015  * failure.
1016  */
erase_worker(struct ubi_device * ubi,struct ubi_work * wl_wrk,int cancel)1017 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1018 			int cancel)
1019 {
1020 	struct ubi_wl_entry *e = wl_wrk->e;
1021 	int pnum = e->pnum, err, need;
1022 
1023 	if (cancel) {
1024 		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1025 		kfree(wl_wrk);
1026 		kmem_cache_free(ubi_wl_entry_slab, e);
1027 		return 0;
1028 	}
1029 
1030 	dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1031 
1032 	err = sync_erase(ubi, e, wl_wrk->torture);
1033 	if (!err) {
1034 		/* Fine, we've erased it successfully */
1035 		kfree(wl_wrk);
1036 
1037 		spin_lock(&ubi->wl_lock);
1038 		wl_tree_add(e, &ubi->free);
1039 		spin_unlock(&ubi->wl_lock);
1040 
1041 		/*
1042 		 * One more erase operation has happened, take care about
1043 		 * protected physical eraseblocks.
1044 		 */
1045 		serve_prot_queue(ubi);
1046 
1047 		/* And take care about wear-leveling */
1048 		err = ensure_wear_leveling(ubi);
1049 		return err;
1050 	}
1051 
1052 	ubi_err("failed to erase PEB %d, error %d", pnum, err);
1053 	kfree(wl_wrk);
1054 
1055 	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1056 	    err == -EBUSY) {
1057 		int err1;
1058 
1059 		/* Re-schedule the LEB for erasure */
1060 		err1 = schedule_erase(ubi, e, 0);
1061 		if (err1) {
1062 			err = err1;
1063 			goto out_ro;
1064 		}
1065 		return err;
1066 	}
1067 
1068 	kmem_cache_free(ubi_wl_entry_slab, e);
1069 	if (err != -EIO)
1070 		/*
1071 		 * If this is not %-EIO, we have no idea what to do. Scheduling
1072 		 * this physical eraseblock for erasure again would cause
1073 		 * errors again and again. Well, lets switch to R/O mode.
1074 		 */
1075 		goto out_ro;
1076 
1077 	/* It is %-EIO, the PEB went bad */
1078 
1079 	if (!ubi->bad_allowed) {
1080 		ubi_err("bad physical eraseblock %d detected", pnum);
1081 		goto out_ro;
1082 	}
1083 
1084 	spin_lock(&ubi->volumes_lock);
1085 	need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1086 	if (need > 0) {
1087 		need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1088 		ubi->avail_pebs -= need;
1089 		ubi->rsvd_pebs += need;
1090 		ubi->beb_rsvd_pebs += need;
1091 		if (need > 0)
1092 			ubi_msg("reserve more %d PEBs", need);
1093 	}
1094 
1095 	if (ubi->beb_rsvd_pebs == 0) {
1096 		spin_unlock(&ubi->volumes_lock);
1097 		ubi_err("no reserved physical eraseblocks");
1098 		goto out_ro;
1099 	}
1100 	spin_unlock(&ubi->volumes_lock);
1101 
1102 	ubi_msg("mark PEB %d as bad", pnum);
1103 	err = ubi_io_mark_bad(ubi, pnum);
1104 	if (err)
1105 		goto out_ro;
1106 
1107 	spin_lock(&ubi->volumes_lock);
1108 	ubi->beb_rsvd_pebs -= 1;
1109 	ubi->bad_peb_count += 1;
1110 	ubi->good_peb_count -= 1;
1111 	ubi_calculate_reserved(ubi);
1112 	if (ubi->beb_rsvd_pebs)
1113 		ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1114 	else
1115 		ubi_warn("last PEB from the reserved pool was used");
1116 	spin_unlock(&ubi->volumes_lock);
1117 
1118 	return err;
1119 
1120 out_ro:
1121 	ubi_ro_mode(ubi);
1122 	return err;
1123 }
1124 
1125 /**
1126  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1127  * @ubi: UBI device description object
1128  * @pnum: physical eraseblock to return
1129  * @torture: if this physical eraseblock has to be tortured
1130  *
1131  * This function is called to return physical eraseblock @pnum to the pool of
1132  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1133  * occurred to this @pnum and it has to be tested. This function returns zero
1134  * in case of success, and a negative error code in case of failure.
1135  */
ubi_wl_put_peb(struct ubi_device * ubi,int pnum,int torture)1136 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1137 {
1138 	int err;
1139 	struct ubi_wl_entry *e;
1140 
1141 	dbg_wl("PEB %d", pnum);
1142 	ubi_assert(pnum >= 0);
1143 	ubi_assert(pnum < ubi->peb_count);
1144 
1145 retry:
1146 	spin_lock(&ubi->wl_lock);
1147 	e = ubi->lookuptbl[pnum];
1148 	if (e == ubi->move_from) {
1149 		/*
1150 		 * User is putting the physical eraseblock which was selected to
1151 		 * be moved. It will be scheduled for erasure in the
1152 		 * wear-leveling worker.
1153 		 */
1154 		dbg_wl("PEB %d is being moved, wait", pnum);
1155 		spin_unlock(&ubi->wl_lock);
1156 
1157 		/* Wait for the WL worker by taking the @ubi->move_mutex */
1158 		mutex_lock(&ubi->move_mutex);
1159 		mutex_unlock(&ubi->move_mutex);
1160 		goto retry;
1161 	} else if (e == ubi->move_to) {
1162 		/*
1163 		 * User is putting the physical eraseblock which was selected
1164 		 * as the target the data is moved to. It may happen if the EBA
1165 		 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1166 		 * but the WL sub-system has not put the PEB to the "used" tree
1167 		 * yet, but it is about to do this. So we just set a flag which
1168 		 * will tell the WL worker that the PEB is not needed anymore
1169 		 * and should be scheduled for erasure.
1170 		 */
1171 		dbg_wl("PEB %d is the target of data moving", pnum);
1172 		ubi_assert(!ubi->move_to_put);
1173 		ubi->move_to_put = 1;
1174 		spin_unlock(&ubi->wl_lock);
1175 		return 0;
1176 	} else {
1177 		if (in_wl_tree(e, &ubi->used)) {
1178 			paranoid_check_in_wl_tree(ubi, e, &ubi->used);
1179 			rb_erase(&e->u.rb, &ubi->used);
1180 		} else if (in_wl_tree(e, &ubi->scrub)) {
1181 			paranoid_check_in_wl_tree(ubi, e, &ubi->scrub);
1182 			rb_erase(&e->u.rb, &ubi->scrub);
1183 		} else if (in_wl_tree(e, &ubi->erroneous)) {
1184 			paranoid_check_in_wl_tree(ubi, e, &ubi->erroneous);
1185 			rb_erase(&e->u.rb, &ubi->erroneous);
1186 			ubi->erroneous_peb_count -= 1;
1187 			ubi_assert(ubi->erroneous_peb_count >= 0);
1188 			/* Erroneous PEBs should be tortured */
1189 			torture = 1;
1190 		} else {
1191 			err = prot_queue_del(ubi, e->pnum);
1192 			if (err) {
1193 				ubi_err("PEB %d not found", pnum);
1194 				ubi_ro_mode(ubi);
1195 				spin_unlock(&ubi->wl_lock);
1196 				return err;
1197 			}
1198 		}
1199 	}
1200 	spin_unlock(&ubi->wl_lock);
1201 
1202 	err = schedule_erase(ubi, e, torture);
1203 	if (err) {
1204 		spin_lock(&ubi->wl_lock);
1205 		wl_tree_add(e, &ubi->used);
1206 		spin_unlock(&ubi->wl_lock);
1207 	}
1208 
1209 	return err;
1210 }
1211 
1212 /**
1213  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1214  * @ubi: UBI device description object
1215  * @pnum: the physical eraseblock to schedule
1216  *
1217  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1218  * needs scrubbing. This function schedules a physical eraseblock for
1219  * scrubbing which is done in background. This function returns zero in case of
1220  * success and a negative error code in case of failure.
1221  */
ubi_wl_scrub_peb(struct ubi_device * ubi,int pnum)1222 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1223 {
1224 	struct ubi_wl_entry *e;
1225 
1226 	dbg_msg("schedule PEB %d for scrubbing", pnum);
1227 
1228 retry:
1229 	spin_lock(&ubi->wl_lock);
1230 	e = ubi->lookuptbl[pnum];
1231 	if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1232 				   in_wl_tree(e, &ubi->erroneous)) {
1233 		spin_unlock(&ubi->wl_lock);
1234 		return 0;
1235 	}
1236 
1237 	if (e == ubi->move_to) {
1238 		/*
1239 		 * This physical eraseblock was used to move data to. The data
1240 		 * was moved but the PEB was not yet inserted to the proper
1241 		 * tree. We should just wait a little and let the WL worker
1242 		 * proceed.
1243 		 */
1244 		spin_unlock(&ubi->wl_lock);
1245 		dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1246 		yield();
1247 		goto retry;
1248 	}
1249 
1250 	if (in_wl_tree(e, &ubi->used)) {
1251 		paranoid_check_in_wl_tree(ubi, e, &ubi->used);
1252 		rb_erase(&e->u.rb, &ubi->used);
1253 	} else {
1254 		int err;
1255 
1256 		err = prot_queue_del(ubi, e->pnum);
1257 		if (err) {
1258 			ubi_err("PEB %d not found", pnum);
1259 			ubi_ro_mode(ubi);
1260 			spin_unlock(&ubi->wl_lock);
1261 			return err;
1262 		}
1263 	}
1264 
1265 	wl_tree_add(e, &ubi->scrub);
1266 	spin_unlock(&ubi->wl_lock);
1267 
1268 	/*
1269 	 * Technically scrubbing is the same as wear-leveling, so it is done
1270 	 * by the WL worker.
1271 	 */
1272 	return ensure_wear_leveling(ubi);
1273 }
1274 
1275 /**
1276  * ubi_wl_flush - flush all pending works.
1277  * @ubi: UBI device description object
1278  *
1279  * This function returns zero in case of success and a negative error code in
1280  * case of failure.
1281  */
ubi_wl_flush(struct ubi_device * ubi)1282 int ubi_wl_flush(struct ubi_device *ubi)
1283 {
1284 	int err;
1285 
1286 	/*
1287 	 * Erase while the pending works queue is not empty, but not more than
1288 	 * the number of currently pending works.
1289 	 */
1290 	dbg_wl("flush (%d pending works)", ubi->works_count);
1291 	while (ubi->works_count) {
1292 		err = do_work(ubi);
1293 		if (err)
1294 			return err;
1295 	}
1296 
1297 	/*
1298 	 * Make sure all the works which have been done in parallel are
1299 	 * finished.
1300 	 */
1301 	down_write(&ubi->work_sem);
1302 	up_write(&ubi->work_sem);
1303 
1304 	/*
1305 	 * And in case last was the WL worker and it canceled the LEB
1306 	 * movement, flush again.
1307 	 */
1308 	while (ubi->works_count) {
1309 		dbg_wl("flush more (%d pending works)", ubi->works_count);
1310 		err = do_work(ubi);
1311 		if (err)
1312 			return err;
1313 	}
1314 
1315 	return 0;
1316 }
1317 
1318 /**
1319  * tree_destroy - destroy an RB-tree.
1320  * @root: the root of the tree to destroy
1321  */
tree_destroy(struct rb_root * root)1322 static void tree_destroy(struct rb_root *root)
1323 {
1324 	struct rb_node *rb;
1325 	struct ubi_wl_entry *e;
1326 
1327 	rb = root->rb_node;
1328 	while (rb) {
1329 		if (rb->rb_left)
1330 			rb = rb->rb_left;
1331 		else if (rb->rb_right)
1332 			rb = rb->rb_right;
1333 		else {
1334 			e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1335 
1336 			rb = rb_parent(rb);
1337 			if (rb) {
1338 				if (rb->rb_left == &e->u.rb)
1339 					rb->rb_left = NULL;
1340 				else
1341 					rb->rb_right = NULL;
1342 			}
1343 
1344 			kmem_cache_free(ubi_wl_entry_slab, e);
1345 		}
1346 	}
1347 }
1348 
1349 /**
1350  * ubi_thread - UBI background thread.
1351  * @u: the UBI device description object pointer
1352  */
ubi_thread(void * u)1353 int ubi_thread(void *u)
1354 {
1355 	int failures = 0;
1356 	struct ubi_device *ubi = u;
1357 
1358 	ubi_msg("background thread \"%s\" started, PID %d",
1359 		ubi->bgt_name, task_pid_nr(current));
1360 
1361 	set_freezable();
1362 	for (;;) {
1363 		int err;
1364 
1365 		if (kthread_should_stop())
1366 			break;
1367 
1368 		if (try_to_freeze())
1369 			continue;
1370 
1371 		spin_lock(&ubi->wl_lock);
1372 		if (list_empty(&ubi->works) || ubi->ro_mode ||
1373 		    !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1374 			set_current_state(TASK_INTERRUPTIBLE);
1375 			spin_unlock(&ubi->wl_lock);
1376 			schedule();
1377 			continue;
1378 		}
1379 		spin_unlock(&ubi->wl_lock);
1380 
1381 		err = do_work(ubi);
1382 		if (err) {
1383 			ubi_err("%s: work failed with error code %d",
1384 				ubi->bgt_name, err);
1385 			if (failures++ > WL_MAX_FAILURES) {
1386 				/*
1387 				 * Too many failures, disable the thread and
1388 				 * switch to read-only mode.
1389 				 */
1390 				ubi_msg("%s: %d consecutive failures",
1391 					ubi->bgt_name, WL_MAX_FAILURES);
1392 				ubi_ro_mode(ubi);
1393 				ubi->thread_enabled = 0;
1394 				continue;
1395 			}
1396 		} else
1397 			failures = 0;
1398 
1399 		cond_resched();
1400 	}
1401 
1402 	dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1403 	return 0;
1404 }
1405 
1406 /**
1407  * cancel_pending - cancel all pending works.
1408  * @ubi: UBI device description object
1409  */
cancel_pending(struct ubi_device * ubi)1410 static void cancel_pending(struct ubi_device *ubi)
1411 {
1412 	while (!list_empty(&ubi->works)) {
1413 		struct ubi_work *wrk;
1414 
1415 		wrk = list_entry(ubi->works.next, struct ubi_work, list);
1416 		list_del(&wrk->list);
1417 		wrk->func(ubi, wrk, 1);
1418 		ubi->works_count -= 1;
1419 		ubi_assert(ubi->works_count >= 0);
1420 	}
1421 }
1422 
1423 /**
1424  * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
1425  * @ubi: UBI device description object
1426  * @si: scanning information
1427  *
1428  * This function returns zero in case of success, and a negative error code in
1429  * case of failure.
1430  */
ubi_wl_init_scan(struct ubi_device * ubi,struct ubi_scan_info * si)1431 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1432 {
1433 	int err, i;
1434 	struct rb_node *rb1, *rb2;
1435 	struct ubi_scan_volume *sv;
1436 	struct ubi_scan_leb *seb, *tmp;
1437 	struct ubi_wl_entry *e;
1438 
1439 	ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1440 	spin_lock_init(&ubi->wl_lock);
1441 	mutex_init(&ubi->move_mutex);
1442 	init_rwsem(&ubi->work_sem);
1443 	ubi->max_ec = si->max_ec;
1444 	INIT_LIST_HEAD(&ubi->works);
1445 
1446 	sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1447 
1448 	err = -ENOMEM;
1449 	ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1450 	if (!ubi->lookuptbl)
1451 		return err;
1452 
1453 	for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1454 		INIT_LIST_HEAD(&ubi->pq[i]);
1455 	ubi->pq_head = 0;
1456 
1457 	list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1458 		cond_resched();
1459 
1460 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1461 		if (!e)
1462 			goto out_free;
1463 
1464 		e->pnum = seb->pnum;
1465 		e->ec = seb->ec;
1466 		ubi->lookuptbl[e->pnum] = e;
1467 		if (schedule_erase(ubi, e, 0)) {
1468 			kmem_cache_free(ubi_wl_entry_slab, e);
1469 			goto out_free;
1470 		}
1471 	}
1472 
1473 	list_for_each_entry(seb, &si->free, u.list) {
1474 		cond_resched();
1475 
1476 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1477 		if (!e)
1478 			goto out_free;
1479 
1480 		e->pnum = seb->pnum;
1481 		e->ec = seb->ec;
1482 		ubi_assert(e->ec >= 0);
1483 		wl_tree_add(e, &ubi->free);
1484 		ubi->lookuptbl[e->pnum] = e;
1485 	}
1486 
1487 	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1488 		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1489 			cond_resched();
1490 
1491 			e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1492 			if (!e)
1493 				goto out_free;
1494 
1495 			e->pnum = seb->pnum;
1496 			e->ec = seb->ec;
1497 			ubi->lookuptbl[e->pnum] = e;
1498 			if (!seb->scrub) {
1499 				dbg_wl("add PEB %d EC %d to the used tree",
1500 				       e->pnum, e->ec);
1501 				wl_tree_add(e, &ubi->used);
1502 			} else {
1503 				dbg_wl("add PEB %d EC %d to the scrub tree",
1504 				       e->pnum, e->ec);
1505 				wl_tree_add(e, &ubi->scrub);
1506 			}
1507 		}
1508 	}
1509 
1510 	if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1511 		ubi_err("no enough physical eraseblocks (%d, need %d)",
1512 			ubi->avail_pebs, WL_RESERVED_PEBS);
1513 		if (ubi->corr_peb_count)
1514 			ubi_err("%d PEBs are corrupted and not used",
1515 				ubi->corr_peb_count);
1516 		goto out_free;
1517 	}
1518 	ubi->avail_pebs -= WL_RESERVED_PEBS;
1519 	ubi->rsvd_pebs += WL_RESERVED_PEBS;
1520 
1521 	/* Schedule wear-leveling if needed */
1522 	err = ensure_wear_leveling(ubi);
1523 	if (err)
1524 		goto out_free;
1525 
1526 	return 0;
1527 
1528 out_free:
1529 	cancel_pending(ubi);
1530 	tree_destroy(&ubi->used);
1531 	tree_destroy(&ubi->free);
1532 	tree_destroy(&ubi->scrub);
1533 	kfree(ubi->lookuptbl);
1534 	return err;
1535 }
1536 
1537 /**
1538  * protection_queue_destroy - destroy the protection queue.
1539  * @ubi: UBI device description object
1540  */
protection_queue_destroy(struct ubi_device * ubi)1541 static void protection_queue_destroy(struct ubi_device *ubi)
1542 {
1543 	int i;
1544 	struct ubi_wl_entry *e, *tmp;
1545 
1546 	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1547 		list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1548 			list_del(&e->u.list);
1549 			kmem_cache_free(ubi_wl_entry_slab, e);
1550 		}
1551 	}
1552 }
1553 
1554 /**
1555  * ubi_wl_close - close the wear-leveling sub-system.
1556  * @ubi: UBI device description object
1557  */
ubi_wl_close(struct ubi_device * ubi)1558 void ubi_wl_close(struct ubi_device *ubi)
1559 {
1560 	dbg_wl("close the WL sub-system");
1561 	cancel_pending(ubi);
1562 	protection_queue_destroy(ubi);
1563 	tree_destroy(&ubi->used);
1564 	tree_destroy(&ubi->erroneous);
1565 	tree_destroy(&ubi->free);
1566 	tree_destroy(&ubi->scrub);
1567 	kfree(ubi->lookuptbl);
1568 }
1569 
1570 #ifdef CONFIG_MTD_UBI_DEBUG
1571 
1572 /**
1573  * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
1574  * @ubi: UBI device description object
1575  * @pnum: the physical eraseblock number to check
1576  * @ec: the erase counter to check
1577  *
1578  * This function returns zero if the erase counter of physical eraseblock @pnum
1579  * is equivalent to @ec, and a negative error code if not or if an error
1580  * occurred.
1581  */
paranoid_check_ec(struct ubi_device * ubi,int pnum,int ec)1582 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1583 {
1584 	int err;
1585 	long long read_ec;
1586 	struct ubi_ec_hdr *ec_hdr;
1587 
1588 	if (!ubi->dbg->chk_gen)
1589 		return 0;
1590 
1591 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1592 	if (!ec_hdr)
1593 		return -ENOMEM;
1594 
1595 	err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1596 	if (err && err != UBI_IO_BITFLIPS) {
1597 		/* The header does not have to exist */
1598 		err = 0;
1599 		goto out_free;
1600 	}
1601 
1602 	read_ec = be64_to_cpu(ec_hdr->ec);
1603 	if (ec != read_ec) {
1604 		ubi_err("paranoid check failed for PEB %d", pnum);
1605 		ubi_err("read EC is %lld, should be %d", read_ec, ec);
1606 		ubi_dbg_dump_stack();
1607 		err = 1;
1608 	} else
1609 		err = 0;
1610 
1611 out_free:
1612 	kfree(ec_hdr);
1613 	return err;
1614 }
1615 
1616 /**
1617  * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1618  * @ubi: UBI device description object
1619  * @e: the wear-leveling entry to check
1620  * @root: the root of the tree
1621  *
1622  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
1623  * is not.
1624  */
paranoid_check_in_wl_tree(const struct ubi_device * ubi,struct ubi_wl_entry * e,struct rb_root * root)1625 static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
1626 				     struct ubi_wl_entry *e,
1627 				     struct rb_root *root)
1628 {
1629 	if (!ubi->dbg->chk_gen)
1630 		return 0;
1631 
1632 	if (in_wl_tree(e, root))
1633 		return 0;
1634 
1635 	ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1636 		e->pnum, e->ec, root);
1637 	ubi_dbg_dump_stack();
1638 	return -EINVAL;
1639 }
1640 
1641 /**
1642  * paranoid_check_in_pq - check if wear-leveling entry is in the protection
1643  *                        queue.
1644  * @ubi: UBI device description object
1645  * @e: the wear-leveling entry to check
1646  *
1647  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
1648  */
paranoid_check_in_pq(const struct ubi_device * ubi,struct ubi_wl_entry * e)1649 static int paranoid_check_in_pq(const struct ubi_device *ubi,
1650 				struct ubi_wl_entry *e)
1651 {
1652 	struct ubi_wl_entry *p;
1653 	int i;
1654 
1655 	if (!ubi->dbg->chk_gen)
1656 		return 0;
1657 
1658 	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1659 		list_for_each_entry(p, &ubi->pq[i], u.list)
1660 			if (p == e)
1661 				return 0;
1662 
1663 	ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
1664 		e->pnum, e->ec);
1665 	ubi_dbg_dump_stack();
1666 	return -EINVAL;
1667 }
1668 
1669 #endif /* CONFIG_MTD_UBI_DEBUG */
1670