]> git.sur5r.net Git - u-boot/blob - drivers/mtd/ubi/wl.c
0de2a4a5f8d8e6ade8fc2f286d52e3f7e601848e
[u-boot] / drivers / mtd / ubi / wl.c
1 /*
2  * Copyright (c) International Business Machines Corp., 2006
3  *
4  * SPDX-License-Identifier:     GPL-2.0+
5  *
6  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
7  */
8
9 /*
10  * UBI wear-leveling sub-system.
11  *
12  * This sub-system is responsible for wear-leveling. It works in terms of
13  * physical eraseblocks and erase counters and knows nothing about logical
14  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
15  * eraseblocks are of two types - used and free. Used physical eraseblocks are
16  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
17  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
18  *
19  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
20  * header. The rest of the physical eraseblock contains only %0xFF bytes.
21  *
22  * When physical eraseblocks are returned to the WL sub-system by means of the
23  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
24  * done asynchronously in context of the per-UBI device background thread,
25  * which is also managed by the WL sub-system.
26  *
27  * The wear-leveling is ensured by means of moving the contents of used
28  * physical eraseblocks with low erase counter to free physical eraseblocks
29  * with high erase counter.
30  *
31  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
32  * bad.
33  *
34  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
35  * in a physical eraseblock, it has to be moved. Technically this is the same
36  * as moving it for wear-leveling reasons.
37  *
38  * As it was said, for the UBI sub-system all physical eraseblocks are either
39  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
40  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
41  * RB-trees, as well as (temporarily) in the @wl->pq queue.
42  *
43  * When the WL sub-system returns a physical eraseblock, the physical
44  * eraseblock is protected from being moved for some "time". For this reason,
45  * the physical eraseblock is not directly moved from the @wl->free tree to the
46  * @wl->used tree. There is a protection queue in between where this
47  * physical eraseblock is temporarily stored (@wl->pq).
48  *
49  * All this protection stuff is needed because:
50  *  o we don't want to move physical eraseblocks just after we have given them
51  *    to the user; instead, we first want to let users fill them up with data;
52  *
53  *  o there is a chance that the user will put the physical eraseblock very
54  *    soon, so it makes sense not to move it for some time, but wait.
55  *
56  * Physical eraseblocks stay protected only for limited time. But the "time" is
57  * measured in erase cycles in this case. This is implemented with help of the
58  * protection queue. Eraseblocks are put to the tail of this queue when they
59  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
60  * head of the queue on each erase operation (for any eraseblock). So the
61  * length of the queue defines how may (global) erase cycles PEBs are protected.
62  *
63  * To put it differently, each physical eraseblock has 2 main states: free and
64  * used. The former state corresponds to the @wl->free tree. The latter state
65  * is split up on several sub-states:
66  * o the WL movement is allowed (@wl->used tree);
67  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
68  *   erroneous - e.g., there was a read error;
69  * o the WL movement is temporarily prohibited (@wl->pq queue);
70  * o scrubbing is needed (@wl->scrub tree).
71  *
72  * Depending on the sub-state, wear-leveling entries of the used physical
73  * eraseblocks may be kept in one of those structures.
74  *
75  * Note, in this implementation, we keep a small in-RAM object for each physical
76  * eraseblock. This is surely not a scalable solution. But it appears to be good
77  * enough for moderately large flashes and it is simple. In future, one may
78  * re-work this sub-system and make it more scalable.
79  *
80  * At the moment this sub-system does not utilize the sequence number, which
81  * was introduced relatively recently. But it would be wise to do this because
82  * the sequence number of a logical eraseblock characterizes how old is it. For
83  * example, when we move a PEB with low erase counter, and we need to pick the
84  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
85  * pick target PEB with an average EC if our PEB is not very "old". This is a
86  * room for future re-works of the WL sub-system.
87  */
88
89 #ifndef __UBOOT__
90 #include <linux/slab.h>
91 #include <linux/crc32.h>
92 #include <linux/freezer.h>
93 #include <linux/kthread.h>
94 #else
95 #include <ubi_uboot.h>
96 #endif
97
98 #include "ubi.h"
99 #include "wl.h"
100
101 /* Number of physical eraseblocks reserved for wear-leveling purposes */
102 #define WL_RESERVED_PEBS 1
103
104 /*
105  * Maximum difference between two erase counters. If this threshold is
106  * exceeded, the WL sub-system starts moving data from used physical
107  * eraseblocks with low erase counter to free physical eraseblocks with high
108  * erase counter.
109  */
110 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
111
112 /*
113  * When a physical eraseblock is moved, the WL sub-system has to pick the target
114  * physical eraseblock to move to. The simplest way would be just to pick the
115  * one with the highest erase counter. But in certain workloads this could lead
116  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
117  * situation when the picked physical eraseblock is constantly erased after the
118  * data is written to it. So, we have a constant which limits the highest erase
119  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
120  * does not pick eraseblocks with erase counter greater than the lowest erase
121  * counter plus %WL_FREE_MAX_DIFF.
122  */
123 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
124
125 /*
126  * Maximum number of consecutive background thread failures which is enough to
127  * switch to read-only mode.
128  */
129 #define WL_MAX_FAILURES 32
130
131 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
132 static int self_check_in_wl_tree(const struct ubi_device *ubi,
133                                  struct ubi_wl_entry *e, struct rb_root *root);
134 static int self_check_in_pq(const struct ubi_device *ubi,
135                             struct ubi_wl_entry *e);
136
137 /**
138  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
139  * @e: the wear-leveling entry to add
140  * @root: the root of the tree
141  *
142  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
143  * the @ubi->used and @ubi->free RB-trees.
144  */
145 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
146 {
147         struct rb_node **p, *parent = NULL;
148
149         p = &root->rb_node;
150         while (*p) {
151                 struct ubi_wl_entry *e1;
152
153                 parent = *p;
154                 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
155
156                 if (e->ec < e1->ec)
157                         p = &(*p)->rb_left;
158                 else if (e->ec > e1->ec)
159                         p = &(*p)->rb_right;
160                 else {
161                         ubi_assert(e->pnum != e1->pnum);
162                         if (e->pnum < e1->pnum)
163                                 p = &(*p)->rb_left;
164                         else
165                                 p = &(*p)->rb_right;
166                 }
167         }
168
169         rb_link_node(&e->u.rb, parent, p);
170         rb_insert_color(&e->u.rb, root);
171 }
172
173 /**
174  * wl_tree_destroy - destroy a wear-leveling entry.
175  * @ubi: UBI device description object
176  * @e: the wear-leveling entry to add
177  *
178  * This function destroys a wear leveling entry and removes
179  * the reference from the lookup table.
180  */
181 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
182 {
183         ubi->lookuptbl[e->pnum] = NULL;
184         kmem_cache_free(ubi_wl_entry_slab, e);
185 }
186
187 /**
188  * do_work - do one pending work.
189  * @ubi: UBI device description object
190  *
191  * This function returns zero in case of success and a negative error code in
192  * case of failure.
193  */
194 static int do_work(struct ubi_device *ubi)
195 {
196         int err;
197         struct ubi_work *wrk;
198
199         cond_resched();
200
201         /*
202          * @ubi->work_sem is used to synchronize with the workers. Workers take
203          * it in read mode, so many of them may be doing works at a time. But
204          * the queue flush code has to be sure the whole queue of works is
205          * done, and it takes the mutex in write mode.
206          */
207         down_read(&ubi->work_sem);
208         spin_lock(&ubi->wl_lock);
209         if (list_empty(&ubi->works)) {
210                 spin_unlock(&ubi->wl_lock);
211                 up_read(&ubi->work_sem);
212                 return 0;
213         }
214
215         wrk = list_entry(ubi->works.next, struct ubi_work, list);
216         list_del(&wrk->list);
217         ubi->works_count -= 1;
218         ubi_assert(ubi->works_count >= 0);
219         spin_unlock(&ubi->wl_lock);
220
221         /*
222          * Call the worker function. Do not touch the work structure
223          * after this call as it will have been freed or reused by that
224          * time by the worker function.
225          */
226         err = wrk->func(ubi, wrk, 0);
227         if (err)
228                 ubi_err(ubi, "work failed with error code %d", err);
229         up_read(&ubi->work_sem);
230
231         return err;
232 }
233
234 /**
235  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
236  * @e: the wear-leveling entry to check
237  * @root: the root of the tree
238  *
239  * This function returns non-zero if @e is in the @root RB-tree and zero if it
240  * is not.
241  */
242 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
243 {
244         struct rb_node *p;
245
246         p = root->rb_node;
247         while (p) {
248                 struct ubi_wl_entry *e1;
249
250                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
251
252                 if (e->pnum == e1->pnum) {
253                         ubi_assert(e == e1);
254                         return 1;
255                 }
256
257                 if (e->ec < e1->ec)
258                         p = p->rb_left;
259                 else if (e->ec > e1->ec)
260                         p = p->rb_right;
261                 else {
262                         ubi_assert(e->pnum != e1->pnum);
263                         if (e->pnum < e1->pnum)
264                                 p = p->rb_left;
265                         else
266                                 p = p->rb_right;
267                 }
268         }
269
270         return 0;
271 }
272
273 /**
274  * prot_queue_add - add physical eraseblock to the protection queue.
275  * @ubi: UBI device description object
276  * @e: the physical eraseblock to add
277  *
278  * This function adds @e to the tail of the protection queue @ubi->pq, where
279  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
280  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
281  * be locked.
282  */
283 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
284 {
285         int pq_tail = ubi->pq_head - 1;
286
287         if (pq_tail < 0)
288                 pq_tail = UBI_PROT_QUEUE_LEN - 1;
289         ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
290         list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
291         dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
292 }
293
294 /**
295  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
296  * @ubi: UBI device description object
297  * @root: the RB-tree where to look for
298  * @diff: maximum possible difference from the smallest erase counter
299  *
300  * This function looks for a wear leveling entry with erase counter closest to
301  * min + @diff, where min is the smallest erase counter.
302  */
303 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
304                                           struct rb_root *root, int diff)
305 {
306         struct rb_node *p;
307         struct ubi_wl_entry *e, *prev_e = NULL;
308         int max;
309
310         e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
311         max = e->ec + diff;
312
313         p = root->rb_node;
314         while (p) {
315                 struct ubi_wl_entry *e1;
316
317                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
318                 if (e1->ec >= max)
319                         p = p->rb_left;
320                 else {
321                         p = p->rb_right;
322                         prev_e = e;
323                         e = e1;
324                 }
325         }
326
327         /* If no fastmap has been written and this WL entry can be used
328          * as anchor PEB, hold it back and return the second best WL entry
329          * such that fastmap can use the anchor PEB later. */
330         if (prev_e && !ubi->fm_disabled &&
331             !ubi->fm && e->pnum < UBI_FM_MAX_START)
332                 return prev_e;
333
334         return e;
335 }
336
337 /**
338  * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
339  * @ubi: UBI device description object
340  * @root: the RB-tree where to look for
341  *
342  * This function looks for a wear leveling entry with medium erase counter,
343  * but not greater or equivalent than the lowest erase counter plus
344  * %WL_FREE_MAX_DIFF/2.
345  */
346 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
347                                                struct rb_root *root)
348 {
349         struct ubi_wl_entry *e, *first, *last;
350
351         first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
352         last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
353
354         if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
355                 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
356
357                 /* If no fastmap has been written and this WL entry can be used
358                  * as anchor PEB, hold it back and return the second best
359                  * WL entry such that fastmap can use the anchor PEB later. */
360                 e = may_reserve_for_fm(ubi, e, root);
361         } else
362                 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
363
364         return e;
365 }
366
367 /**
368  * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
369  * refill_wl_user_pool().
370  * @ubi: UBI device description object
371  *
372  * This function returns a a wear leveling entry in case of success and
373  * NULL in case of failure.
374  */
375 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
376 {
377         struct ubi_wl_entry *e;
378
379         e = find_mean_wl_entry(ubi, &ubi->free);
380         if (!e) {
381                 ubi_err(ubi, "no free eraseblocks");
382                 return NULL;
383         }
384
385         self_check_in_wl_tree(ubi, e, &ubi->free);
386
387         /*
388          * Move the physical eraseblock to the protection queue where it will
389          * be protected from being moved for some time.
390          */
391         rb_erase(&e->u.rb, &ubi->free);
392         ubi->free_count--;
393         dbg_wl("PEB %d EC %d", e->pnum, e->ec);
394
395         return e;
396 }
397
398 /**
399  * prot_queue_del - remove a physical eraseblock from the protection queue.
400  * @ubi: UBI device description object
401  * @pnum: the physical eraseblock to remove
402  *
403  * This function deletes PEB @pnum from the protection queue and returns zero
404  * in case of success and %-ENODEV if the PEB was not found.
405  */
406 static int prot_queue_del(struct ubi_device *ubi, int pnum)
407 {
408         struct ubi_wl_entry *e;
409
410         e = ubi->lookuptbl[pnum];
411         if (!e)
412                 return -ENODEV;
413
414         if (self_check_in_pq(ubi, e))
415                 return -ENODEV;
416
417         list_del(&e->u.list);
418         dbg_wl("deleted PEB %d from the protection queue", e->pnum);
419         return 0;
420 }
421
422 /**
423  * sync_erase - synchronously erase a physical eraseblock.
424  * @ubi: UBI device description object
425  * @e: the the physical eraseblock to erase
426  * @torture: if the physical eraseblock has to be tortured
427  *
428  * This function returns zero in case of success and a negative error code in
429  * case of failure.
430  */
431 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
432                       int torture)
433 {
434         int err;
435         struct ubi_ec_hdr *ec_hdr;
436         unsigned long long ec = e->ec;
437
438         dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
439
440         err = self_check_ec(ubi, e->pnum, e->ec);
441         if (err)
442                 return -EINVAL;
443
444         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
445         if (!ec_hdr)
446                 return -ENOMEM;
447
448         err = ubi_io_sync_erase(ubi, e->pnum, torture);
449         if (err < 0)
450                 goto out_free;
451
452         ec += err;
453         if (ec > UBI_MAX_ERASECOUNTER) {
454                 /*
455                  * Erase counter overflow. Upgrade UBI and use 64-bit
456                  * erase counters internally.
457                  */
458                 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
459                         e->pnum, ec);
460                 err = -EINVAL;
461                 goto out_free;
462         }
463
464         dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
465
466         ec_hdr->ec = cpu_to_be64(ec);
467
468         err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
469         if (err)
470                 goto out_free;
471
472         e->ec = ec;
473         spin_lock(&ubi->wl_lock);
474         if (e->ec > ubi->max_ec)
475                 ubi->max_ec = e->ec;
476         spin_unlock(&ubi->wl_lock);
477
478 out_free:
479         kfree(ec_hdr);
480         return err;
481 }
482
483 /**
484  * serve_prot_queue - check if it is time to stop protecting PEBs.
485  * @ubi: UBI device description object
486  *
487  * This function is called after each erase operation and removes PEBs from the
488  * tail of the protection queue. These PEBs have been protected for long enough
489  * and should be moved to the used tree.
490  */
491 static void serve_prot_queue(struct ubi_device *ubi)
492 {
493         struct ubi_wl_entry *e, *tmp;
494         int count;
495
496         /*
497          * There may be several protected physical eraseblock to remove,
498          * process them all.
499          */
500 repeat:
501         count = 0;
502         spin_lock(&ubi->wl_lock);
503         list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
504                 dbg_wl("PEB %d EC %d protection over, move to used tree",
505                         e->pnum, e->ec);
506
507                 list_del(&e->u.list);
508                 wl_tree_add(e, &ubi->used);
509                 if (count++ > 32) {
510                         /*
511                          * Let's be nice and avoid holding the spinlock for
512                          * too long.
513                          */
514                         spin_unlock(&ubi->wl_lock);
515                         cond_resched();
516                         goto repeat;
517                 }
518         }
519
520         ubi->pq_head += 1;
521         if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
522                 ubi->pq_head = 0;
523         ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
524         spin_unlock(&ubi->wl_lock);
525 }
526
527 #ifdef __UBOOT__
528 void ubi_do_worker(struct ubi_device *ubi)
529 {
530         int err;
531
532         if (list_empty(&ubi->works) || ubi->ro_mode ||
533             !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi))
534                 return;
535
536         spin_lock(&ubi->wl_lock);
537         while (!list_empty(&ubi->works)) {
538                 /*
539                  * call do_work, which executes exactly one work form the queue,
540                  * including removeing it from the work queue.
541                  */
542                 spin_unlock(&ubi->wl_lock);
543                 err = do_work(ubi);
544                 spin_lock(&ubi->wl_lock);
545                 if (err) {
546                         ubi_err(ubi, "%s: work failed with error code %d",
547                                 ubi->bgt_name, err);
548                 }
549         }
550         spin_unlock(&ubi->wl_lock);
551 }
552 #endif
553
554 /**
555  * __schedule_ubi_work - schedule a work.
556  * @ubi: UBI device description object
557  * @wrk: the work to schedule
558  *
559  * This function adds a work defined by @wrk to the tail of the pending works
560  * list. Can only be used if ubi->work_sem is already held in read mode!
561  */
562 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
563 {
564         spin_lock(&ubi->wl_lock);
565         list_add_tail(&wrk->list, &ubi->works);
566         ubi_assert(ubi->works_count >= 0);
567         ubi->works_count += 1;
568 #ifndef __UBOOT__
569         if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
570                 wake_up_process(ubi->bgt_thread);
571 #endif
572         spin_unlock(&ubi->wl_lock);
573 }
574
575 /**
576  * schedule_ubi_work - schedule a work.
577  * @ubi: UBI device description object
578  * @wrk: the work to schedule
579  *
580  * This function adds a work defined by @wrk to the tail of the pending works
581  * list.
582  */
583 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
584 {
585         down_read(&ubi->work_sem);
586         __schedule_ubi_work(ubi, wrk);
587         up_read(&ubi->work_sem);
588 }
589
590 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
591                         int shutdown);
592
593 /**
594  * schedule_erase - schedule an erase work.
595  * @ubi: UBI device description object
596  * @e: the WL entry of the physical eraseblock to erase
597  * @vol_id: the volume ID that last used this PEB
598  * @lnum: the last used logical eraseblock number for the PEB
599  * @torture: if the physical eraseblock has to be tortured
600  *
601  * This function returns zero in case of success and a %-ENOMEM in case of
602  * failure.
603  */
604 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
605                           int vol_id, int lnum, int torture)
606 {
607         struct ubi_work *wl_wrk;
608
609         ubi_assert(e);
610
611         dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
612                e->pnum, e->ec, torture);
613
614         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
615         if (!wl_wrk)
616                 return -ENOMEM;
617
618         wl_wrk->func = &erase_worker;
619         wl_wrk->e = e;
620         wl_wrk->vol_id = vol_id;
621         wl_wrk->lnum = lnum;
622         wl_wrk->torture = torture;
623
624         schedule_ubi_work(ubi, wl_wrk);
625
626 #ifdef __UBOOT__
627         ubi_do_worker(ubi);
628 #endif
629         return 0;
630 }
631
632 /**
633  * do_sync_erase - run the erase worker synchronously.
634  * @ubi: UBI device description object
635  * @e: the WL entry of the physical eraseblock to erase
636  * @vol_id: the volume ID that last used this PEB
637  * @lnum: the last used logical eraseblock number for the PEB
638  * @torture: if the physical eraseblock has to be tortured
639  *
640  */
641 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
642                          int vol_id, int lnum, int torture)
643 {
644         struct ubi_work *wl_wrk;
645
646         dbg_wl("sync erase of PEB %i", e->pnum);
647
648         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
649         if (!wl_wrk)
650                 return -ENOMEM;
651
652         wl_wrk->e = e;
653         wl_wrk->vol_id = vol_id;
654         wl_wrk->lnum = lnum;
655         wl_wrk->torture = torture;
656
657         return erase_worker(ubi, wl_wrk, 0);
658 }
659
660 /**
661  * wear_leveling_worker - wear-leveling worker function.
662  * @ubi: UBI device description object
663  * @wrk: the work object
664  * @shutdown: non-zero if the worker has to free memory and exit
665  * because the WL-subsystem is shutting down
666  *
667  * This function copies a more worn out physical eraseblock to a less worn out
668  * one. Returns zero in case of success and a negative error code in case of
669  * failure.
670  */
671 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
672                                 int shutdown)
673 {
674         int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
675         int vol_id = -1, lnum = -1;
676 #ifdef CONFIG_MTD_UBI_FASTMAP
677         int anchor = wrk->anchor;
678 #endif
679         struct ubi_wl_entry *e1, *e2;
680         struct ubi_vid_hdr *vid_hdr;
681
682         kfree(wrk);
683         if (shutdown)
684                 return 0;
685
686         vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
687         if (!vid_hdr)
688                 return -ENOMEM;
689
690         mutex_lock(&ubi->move_mutex);
691         spin_lock(&ubi->wl_lock);
692         ubi_assert(!ubi->move_from && !ubi->move_to);
693         ubi_assert(!ubi->move_to_put);
694
695         if (!ubi->free.rb_node ||
696             (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
697                 /*
698                  * No free physical eraseblocks? Well, they must be waiting in
699                  * the queue to be erased. Cancel movement - it will be
700                  * triggered again when a free physical eraseblock appears.
701                  *
702                  * No used physical eraseblocks? They must be temporarily
703                  * protected from being moved. They will be moved to the
704                  * @ubi->used tree later and the wear-leveling will be
705                  * triggered again.
706                  */
707                 dbg_wl("cancel WL, a list is empty: free %d, used %d",
708                        !ubi->free.rb_node, !ubi->used.rb_node);
709                 goto out_cancel;
710         }
711
712 #ifdef CONFIG_MTD_UBI_FASTMAP
713         /* Check whether we need to produce an anchor PEB */
714         if (!anchor)
715                 anchor = !anchor_pebs_avalible(&ubi->free);
716
717         if (anchor) {
718                 e1 = find_anchor_wl_entry(&ubi->used);
719                 if (!e1)
720                         goto out_cancel;
721                 e2 = get_peb_for_wl(ubi);
722                 if (!e2)
723                         goto out_cancel;
724
725                 self_check_in_wl_tree(ubi, e1, &ubi->used);
726                 rb_erase(&e1->u.rb, &ubi->used);
727                 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
728         } else if (!ubi->scrub.rb_node) {
729 #else
730         if (!ubi->scrub.rb_node) {
731 #endif
732                 /*
733                  * Now pick the least worn-out used physical eraseblock and a
734                  * highly worn-out free physical eraseblock. If the erase
735                  * counters differ much enough, start wear-leveling.
736                  */
737                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
738                 e2 = get_peb_for_wl(ubi);
739                 if (!e2)
740                         goto out_cancel;
741
742                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
743                         dbg_wl("no WL needed: min used EC %d, max free EC %d",
744                                e1->ec, e2->ec);
745
746                         /* Give the unused PEB back */
747                         wl_tree_add(e2, &ubi->free);
748                         ubi->free_count++;
749                         goto out_cancel;
750                 }
751                 self_check_in_wl_tree(ubi, e1, &ubi->used);
752                 rb_erase(&e1->u.rb, &ubi->used);
753                 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
754                        e1->pnum, e1->ec, e2->pnum, e2->ec);
755         } else {
756                 /* Perform scrubbing */
757                 scrubbing = 1;
758                 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
759                 e2 = get_peb_for_wl(ubi);
760                 if (!e2)
761                         goto out_cancel;
762
763                 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
764                 rb_erase(&e1->u.rb, &ubi->scrub);
765                 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
766         }
767
768         ubi->move_from = e1;
769         ubi->move_to = e2;
770         spin_unlock(&ubi->wl_lock);
771
772         /*
773          * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
774          * We so far do not know which logical eraseblock our physical
775          * eraseblock (@e1) belongs to. We have to read the volume identifier
776          * header first.
777          *
778          * Note, we are protected from this PEB being unmapped and erased. The
779          * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
780          * which is being moved was unmapped.
781          */
782
783         err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
784         if (err && err != UBI_IO_BITFLIPS) {
785                 if (err == UBI_IO_FF) {
786                         /*
787                          * We are trying to move PEB without a VID header. UBI
788                          * always write VID headers shortly after the PEB was
789                          * given, so we have a situation when it has not yet
790                          * had a chance to write it, because it was preempted.
791                          * So add this PEB to the protection queue so far,
792                          * because presumably more data will be written there
793                          * (including the missing VID header), and then we'll
794                          * move it.
795                          */
796                         dbg_wl("PEB %d has no VID header", e1->pnum);
797                         protect = 1;
798                         goto out_not_moved;
799                 } else if (err == UBI_IO_FF_BITFLIPS) {
800                         /*
801                          * The same situation as %UBI_IO_FF, but bit-flips were
802                          * detected. It is better to schedule this PEB for
803                          * scrubbing.
804                          */
805                         dbg_wl("PEB %d has no VID header but has bit-flips",
806                                e1->pnum);
807                         scrubbing = 1;
808                         goto out_not_moved;
809                 }
810
811                 ubi_err(ubi, "error %d while reading VID header from PEB %d",
812                         err, e1->pnum);
813                 goto out_error;
814         }
815
816         vol_id = be32_to_cpu(vid_hdr->vol_id);
817         lnum = be32_to_cpu(vid_hdr->lnum);
818
819         err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
820         if (err) {
821                 if (err == MOVE_CANCEL_RACE) {
822                         /*
823                          * The LEB has not been moved because the volume is
824                          * being deleted or the PEB has been put meanwhile. We
825                          * should prevent this PEB from being selected for
826                          * wear-leveling movement again, so put it to the
827                          * protection queue.
828                          */
829                         protect = 1;
830                         goto out_not_moved;
831                 }
832                 if (err == MOVE_RETRY) {
833                         scrubbing = 1;
834                         goto out_not_moved;
835                 }
836                 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
837                     err == MOVE_TARGET_RD_ERR) {
838                         /*
839                          * Target PEB had bit-flips or write error - torture it.
840                          */
841                         torture = 1;
842                         goto out_not_moved;
843                 }
844
845                 if (err == MOVE_SOURCE_RD_ERR) {
846                         /*
847                          * An error happened while reading the source PEB. Do
848                          * not switch to R/O mode in this case, and give the
849                          * upper layers a possibility to recover from this,
850                          * e.g. by unmapping corresponding LEB. Instead, just
851                          * put this PEB to the @ubi->erroneous list to prevent
852                          * UBI from trying to move it over and over again.
853                          */
854                         if (ubi->erroneous_peb_count > ubi->max_erroneous) {
855                                 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
856                                         ubi->erroneous_peb_count);
857                                 goto out_error;
858                         }
859                         erroneous = 1;
860                         goto out_not_moved;
861                 }
862
863                 if (err < 0)
864                         goto out_error;
865
866                 ubi_assert(0);
867         }
868
869         /* The PEB has been successfully moved */
870         if (scrubbing)
871                 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
872                         e1->pnum, vol_id, lnum, e2->pnum);
873         ubi_free_vid_hdr(ubi, vid_hdr);
874
875         spin_lock(&ubi->wl_lock);
876         if (!ubi->move_to_put) {
877                 wl_tree_add(e2, &ubi->used);
878                 e2 = NULL;
879         }
880         ubi->move_from = ubi->move_to = NULL;
881         ubi->move_to_put = ubi->wl_scheduled = 0;
882         spin_unlock(&ubi->wl_lock);
883
884         err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
885         if (err) {
886                 if (e2)
887                         wl_entry_destroy(ubi, e2);
888                 goto out_ro;
889         }
890
891         if (e2) {
892                 /*
893                  * Well, the target PEB was put meanwhile, schedule it for
894                  * erasure.
895                  */
896                 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
897                        e2->pnum, vol_id, lnum);
898                 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
899                 if (err)
900                         goto out_ro;
901         }
902
903         dbg_wl("done");
904         mutex_unlock(&ubi->move_mutex);
905         return 0;
906
907         /*
908          * For some reasons the LEB was not moved, might be an error, might be
909          * something else. @e1 was not changed, so return it back. @e2 might
910          * have been changed, schedule it for erasure.
911          */
912 out_not_moved:
913         if (vol_id != -1)
914                 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
915                        e1->pnum, vol_id, lnum, e2->pnum, err);
916         else
917                 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
918                        e1->pnum, e2->pnum, err);
919         spin_lock(&ubi->wl_lock);
920         if (protect)
921                 prot_queue_add(ubi, e1);
922         else if (erroneous) {
923                 wl_tree_add(e1, &ubi->erroneous);
924                 ubi->erroneous_peb_count += 1;
925         } else if (scrubbing)
926                 wl_tree_add(e1, &ubi->scrub);
927         else
928                 wl_tree_add(e1, &ubi->used);
929         ubi_assert(!ubi->move_to_put);
930         ubi->move_from = ubi->move_to = NULL;
931         ubi->wl_scheduled = 0;
932         spin_unlock(&ubi->wl_lock);
933
934         ubi_free_vid_hdr(ubi, vid_hdr);
935         err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
936         if (err)
937                 goto out_ro;
938
939         mutex_unlock(&ubi->move_mutex);
940         return 0;
941
942 out_error:
943         if (vol_id != -1)
944                 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
945                         err, e1->pnum, e2->pnum);
946         else
947                 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
948                         err, e1->pnum, vol_id, lnum, e2->pnum);
949         spin_lock(&ubi->wl_lock);
950         ubi->move_from = ubi->move_to = NULL;
951         ubi->move_to_put = ubi->wl_scheduled = 0;
952         spin_unlock(&ubi->wl_lock);
953
954         ubi_free_vid_hdr(ubi, vid_hdr);
955         wl_entry_destroy(ubi, e1);
956         wl_entry_destroy(ubi, e2);
957
958 out_ro:
959         ubi_ro_mode(ubi);
960         mutex_unlock(&ubi->move_mutex);
961         ubi_assert(err != 0);
962         return err < 0 ? err : -EIO;
963
964 out_cancel:
965         ubi->wl_scheduled = 0;
966         spin_unlock(&ubi->wl_lock);
967         mutex_unlock(&ubi->move_mutex);
968         ubi_free_vid_hdr(ubi, vid_hdr);
969         return 0;
970 }
971
972 /**
973  * ensure_wear_leveling - schedule wear-leveling if it is needed.
974  * @ubi: UBI device description object
975  * @nested: set to non-zero if this function is called from UBI worker
976  *
977  * This function checks if it is time to start wear-leveling and schedules it
978  * if yes. This function returns zero in case of success and a negative error
979  * code in case of failure.
980  */
981 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
982 {
983         int err = 0;
984         struct ubi_wl_entry *e1;
985         struct ubi_wl_entry *e2;
986         struct ubi_work *wrk;
987
988         spin_lock(&ubi->wl_lock);
989         if (ubi->wl_scheduled)
990                 /* Wear-leveling is already in the work queue */
991                 goto out_unlock;
992
993         /*
994          * If the ubi->scrub tree is not empty, scrubbing is needed, and the
995          * the WL worker has to be scheduled anyway.
996          */
997         if (!ubi->scrub.rb_node) {
998                 if (!ubi->used.rb_node || !ubi->free.rb_node)
999                         /* No physical eraseblocks - no deal */
1000                         goto out_unlock;
1001
1002                 /*
1003                  * We schedule wear-leveling only if the difference between the
1004                  * lowest erase counter of used physical eraseblocks and a high
1005                  * erase counter of free physical eraseblocks is greater than
1006                  * %UBI_WL_THRESHOLD.
1007                  */
1008                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1009                 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1010
1011                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1012                         goto out_unlock;
1013                 dbg_wl("schedule wear-leveling");
1014         } else
1015                 dbg_wl("schedule scrubbing");
1016
1017         ubi->wl_scheduled = 1;
1018         spin_unlock(&ubi->wl_lock);
1019
1020         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1021         if (!wrk) {
1022                 err = -ENOMEM;
1023                 goto out_cancel;
1024         }
1025
1026         wrk->anchor = 0;
1027         wrk->func = &wear_leveling_worker;
1028         if (nested)
1029                 __schedule_ubi_work(ubi, wrk);
1030 #ifndef __UBOOT__
1031         else
1032                 schedule_ubi_work(ubi, wrk);
1033 #else
1034         else {
1035                 schedule_ubi_work(ubi, wrk);
1036                 ubi_do_worker(ubi);
1037         }
1038 #endif
1039         return err;
1040
1041 out_cancel:
1042         spin_lock(&ubi->wl_lock);
1043         ubi->wl_scheduled = 0;
1044 out_unlock:
1045         spin_unlock(&ubi->wl_lock);
1046         return err;
1047 }
1048
1049 /**
1050  * erase_worker - physical eraseblock erase worker function.
1051  * @ubi: UBI device description object
1052  * @wl_wrk: the work object
1053  * @shutdown: non-zero if the worker has to free memory and exit
1054  * because the WL sub-system is shutting down
1055  *
1056  * This function erases a physical eraseblock and perform torture testing if
1057  * needed. It also takes care about marking the physical eraseblock bad if
1058  * needed. Returns zero in case of success and a negative error code in case of
1059  * failure.
1060  */
1061 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1062                         int shutdown)
1063 {
1064         struct ubi_wl_entry *e = wl_wrk->e;
1065         int pnum = e->pnum;
1066         int vol_id = wl_wrk->vol_id;
1067         int lnum = wl_wrk->lnum;
1068         int err, available_consumed = 0;
1069
1070         if (shutdown) {
1071                 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1072                 kfree(wl_wrk);
1073                 wl_entry_destroy(ubi, e);
1074                 return 0;
1075         }
1076
1077         dbg_wl("erase PEB %d EC %d LEB %d:%d",
1078                pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1079
1080         err = sync_erase(ubi, e, wl_wrk->torture);
1081         if (!err) {
1082                 /* Fine, we've erased it successfully */
1083                 kfree(wl_wrk);
1084
1085                 spin_lock(&ubi->wl_lock);
1086                 wl_tree_add(e, &ubi->free);
1087                 ubi->free_count++;
1088                 spin_unlock(&ubi->wl_lock);
1089
1090                 /*
1091                  * One more erase operation has happened, take care about
1092                  * protected physical eraseblocks.
1093                  */
1094                 serve_prot_queue(ubi);
1095
1096                 /* And take care about wear-leveling */
1097                 err = ensure_wear_leveling(ubi, 1);
1098                 return err;
1099         }
1100
1101         ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1102         kfree(wl_wrk);
1103
1104         if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1105             err == -EBUSY) {
1106                 int err1;
1107
1108                 /* Re-schedule the LEB for erasure */
1109                 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1110                 if (err1) {
1111                         err = err1;
1112                         goto out_ro;
1113                 }
1114                 return err;
1115         }
1116
1117         wl_entry_destroy(ubi, e);
1118         if (err != -EIO)
1119                 /*
1120                  * If this is not %-EIO, we have no idea what to do. Scheduling
1121                  * this physical eraseblock for erasure again would cause
1122                  * errors again and again. Well, lets switch to R/O mode.
1123                  */
1124                 goto out_ro;
1125
1126         /* It is %-EIO, the PEB went bad */
1127
1128         if (!ubi->bad_allowed) {
1129                 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1130                 goto out_ro;
1131         }
1132
1133         spin_lock(&ubi->volumes_lock);
1134         if (ubi->beb_rsvd_pebs == 0) {
1135                 if (ubi->avail_pebs == 0) {
1136                         spin_unlock(&ubi->volumes_lock);
1137                         ubi_err(ubi, "no reserved/available physical eraseblocks");
1138                         goto out_ro;
1139                 }
1140                 ubi->avail_pebs -= 1;
1141                 available_consumed = 1;
1142         }
1143         spin_unlock(&ubi->volumes_lock);
1144
1145         ubi_msg(ubi, "mark PEB %d as bad", pnum);
1146         err = ubi_io_mark_bad(ubi, pnum);
1147         if (err)
1148                 goto out_ro;
1149
1150         spin_lock(&ubi->volumes_lock);
1151         if (ubi->beb_rsvd_pebs > 0) {
1152                 if (available_consumed) {
1153                         /*
1154                          * The amount of reserved PEBs increased since we last
1155                          * checked.
1156                          */
1157                         ubi->avail_pebs += 1;
1158                         available_consumed = 0;
1159                 }
1160                 ubi->beb_rsvd_pebs -= 1;
1161         }
1162         ubi->bad_peb_count += 1;
1163         ubi->good_peb_count -= 1;
1164         ubi_calculate_reserved(ubi);
1165         if (available_consumed)
1166                 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1167         else if (ubi->beb_rsvd_pebs)
1168                 ubi_msg(ubi, "%d PEBs left in the reserve",
1169                         ubi->beb_rsvd_pebs);
1170         else
1171                 ubi_warn(ubi, "last PEB from the reserve was used");
1172         spin_unlock(&ubi->volumes_lock);
1173
1174         return err;
1175
1176 out_ro:
1177         if (available_consumed) {
1178                 spin_lock(&ubi->volumes_lock);
1179                 ubi->avail_pebs += 1;
1180                 spin_unlock(&ubi->volumes_lock);
1181         }
1182         ubi_ro_mode(ubi);
1183         return err;
1184 }
1185
1186 /**
1187  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1188  * @ubi: UBI device description object
1189  * @vol_id: the volume ID that last used this PEB
1190  * @lnum: the last used logical eraseblock number for the PEB
1191  * @pnum: physical eraseblock to return
1192  * @torture: if this physical eraseblock has to be tortured
1193  *
1194  * This function is called to return physical eraseblock @pnum to the pool of
1195  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1196  * occurred to this @pnum and it has to be tested. This function returns zero
1197  * in case of success, and a negative error code in case of failure.
1198  */
1199 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1200                    int pnum, int torture)
1201 {
1202         int err;
1203         struct ubi_wl_entry *e;
1204
1205         dbg_wl("PEB %d", pnum);
1206         ubi_assert(pnum >= 0);
1207         ubi_assert(pnum < ubi->peb_count);
1208
1209         down_read(&ubi->fm_protect);
1210
1211 retry:
1212         spin_lock(&ubi->wl_lock);
1213         e = ubi->lookuptbl[pnum];
1214         if (e == ubi->move_from) {
1215                 /*
1216                  * User is putting the physical eraseblock which was selected to
1217                  * be moved. It will be scheduled for erasure in the
1218                  * wear-leveling worker.
1219                  */
1220                 dbg_wl("PEB %d is being moved, wait", pnum);
1221                 spin_unlock(&ubi->wl_lock);
1222
1223                 /* Wait for the WL worker by taking the @ubi->move_mutex */
1224                 mutex_lock(&ubi->move_mutex);
1225                 mutex_unlock(&ubi->move_mutex);
1226                 goto retry;
1227         } else if (e == ubi->move_to) {
1228                 /*
1229                  * User is putting the physical eraseblock which was selected
1230                  * as the target the data is moved to. It may happen if the EBA
1231                  * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1232                  * but the WL sub-system has not put the PEB to the "used" tree
1233                  * yet, but it is about to do this. So we just set a flag which
1234                  * will tell the WL worker that the PEB is not needed anymore
1235                  * and should be scheduled for erasure.
1236                  */
1237                 dbg_wl("PEB %d is the target of data moving", pnum);
1238                 ubi_assert(!ubi->move_to_put);
1239                 ubi->move_to_put = 1;
1240                 spin_unlock(&ubi->wl_lock);
1241                 up_read(&ubi->fm_protect);
1242                 return 0;
1243         } else {
1244                 if (in_wl_tree(e, &ubi->used)) {
1245                         self_check_in_wl_tree(ubi, e, &ubi->used);
1246                         rb_erase(&e->u.rb, &ubi->used);
1247                 } else if (in_wl_tree(e, &ubi->scrub)) {
1248                         self_check_in_wl_tree(ubi, e, &ubi->scrub);
1249                         rb_erase(&e->u.rb, &ubi->scrub);
1250                 } else if (in_wl_tree(e, &ubi->erroneous)) {
1251                         self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1252                         rb_erase(&e->u.rb, &ubi->erroneous);
1253                         ubi->erroneous_peb_count -= 1;
1254                         ubi_assert(ubi->erroneous_peb_count >= 0);
1255                         /* Erroneous PEBs should be tortured */
1256                         torture = 1;
1257                 } else {
1258                         err = prot_queue_del(ubi, e->pnum);
1259                         if (err) {
1260                                 ubi_err(ubi, "PEB %d not found", pnum);
1261                                 ubi_ro_mode(ubi);
1262                                 spin_unlock(&ubi->wl_lock);
1263                                 up_read(&ubi->fm_protect);
1264                                 return err;
1265                         }
1266                 }
1267         }
1268         spin_unlock(&ubi->wl_lock);
1269
1270         err = schedule_erase(ubi, e, vol_id, lnum, torture);
1271         if (err) {
1272                 spin_lock(&ubi->wl_lock);
1273                 wl_tree_add(e, &ubi->used);
1274                 spin_unlock(&ubi->wl_lock);
1275         }
1276
1277         up_read(&ubi->fm_protect);
1278         return err;
1279 }
1280
1281 /**
1282  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1283  * @ubi: UBI device description object
1284  * @pnum: the physical eraseblock to schedule
1285  *
1286  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1287  * needs scrubbing. This function schedules a physical eraseblock for
1288  * scrubbing which is done in background. This function returns zero in case of
1289  * success and a negative error code in case of failure.
1290  */
1291 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1292 {
1293         struct ubi_wl_entry *e;
1294
1295         ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1296
1297 retry:
1298         spin_lock(&ubi->wl_lock);
1299         e = ubi->lookuptbl[pnum];
1300         if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1301                                    in_wl_tree(e, &ubi->erroneous)) {
1302                 spin_unlock(&ubi->wl_lock);
1303                 return 0;
1304         }
1305
1306         if (e == ubi->move_to) {
1307                 /*
1308                  * This physical eraseblock was used to move data to. The data
1309                  * was moved but the PEB was not yet inserted to the proper
1310                  * tree. We should just wait a little and let the WL worker
1311                  * proceed.
1312                  */
1313                 spin_unlock(&ubi->wl_lock);
1314                 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1315                 yield();
1316                 goto retry;
1317         }
1318
1319         if (in_wl_tree(e, &ubi->used)) {
1320                 self_check_in_wl_tree(ubi, e, &ubi->used);
1321                 rb_erase(&e->u.rb, &ubi->used);
1322         } else {
1323                 int err;
1324
1325                 err = prot_queue_del(ubi, e->pnum);
1326                 if (err) {
1327                         ubi_err(ubi, "PEB %d not found", pnum);
1328                         ubi_ro_mode(ubi);
1329                         spin_unlock(&ubi->wl_lock);
1330                         return err;
1331                 }
1332         }
1333
1334         wl_tree_add(e, &ubi->scrub);
1335         spin_unlock(&ubi->wl_lock);
1336
1337         /*
1338          * Technically scrubbing is the same as wear-leveling, so it is done
1339          * by the WL worker.
1340          */
1341         return ensure_wear_leveling(ubi, 0);
1342 }
1343
1344 /**
1345  * ubi_wl_flush - flush all pending works.
1346  * @ubi: UBI device description object
1347  * @vol_id: the volume id to flush for
1348  * @lnum: the logical eraseblock number to flush for
1349  *
1350  * This function executes all pending works for a particular volume id /
1351  * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1352  * acts as a wildcard for all of the corresponding volume numbers or logical
1353  * eraseblock numbers. It returns zero in case of success and a negative error
1354  * code in case of failure.
1355  */
1356 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1357 {
1358         int err = 0;
1359         int found = 1;
1360
1361         /*
1362          * Erase while the pending works queue is not empty, but not more than
1363          * the number of currently pending works.
1364          */
1365         dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1366                vol_id, lnum, ubi->works_count);
1367
1368         while (found) {
1369                 struct ubi_work *wrk, *tmp;
1370                 found = 0;
1371
1372                 down_read(&ubi->work_sem);
1373                 spin_lock(&ubi->wl_lock);
1374                 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1375                         if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1376                             (lnum == UBI_ALL || wrk->lnum == lnum)) {
1377                                 list_del(&wrk->list);
1378                                 ubi->works_count -= 1;
1379                                 ubi_assert(ubi->works_count >= 0);
1380                                 spin_unlock(&ubi->wl_lock);
1381
1382                                 err = wrk->func(ubi, wrk, 0);
1383                                 if (err) {
1384                                         up_read(&ubi->work_sem);
1385                                         return err;
1386                                 }
1387
1388                                 spin_lock(&ubi->wl_lock);
1389                                 found = 1;
1390                                 break;
1391                         }
1392                 }
1393                 spin_unlock(&ubi->wl_lock);
1394                 up_read(&ubi->work_sem);
1395         }
1396
1397         /*
1398          * Make sure all the works which have been done in parallel are
1399          * finished.
1400          */
1401         down_write(&ubi->work_sem);
1402         up_write(&ubi->work_sem);
1403
1404         return err;
1405 }
1406
1407 /**
1408  * tree_destroy - destroy an RB-tree.
1409  * @ubi: UBI device description object
1410  * @root: the root of the tree to destroy
1411  */
1412 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1413 {
1414         struct rb_node *rb;
1415         struct ubi_wl_entry *e;
1416
1417         rb = root->rb_node;
1418         while (rb) {
1419                 if (rb->rb_left)
1420                         rb = rb->rb_left;
1421                 else if (rb->rb_right)
1422                         rb = rb->rb_right;
1423                 else {
1424                         e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1425
1426                         rb = rb_parent(rb);
1427                         if (rb) {
1428                                 if (rb->rb_left == &e->u.rb)
1429                                         rb->rb_left = NULL;
1430                                 else
1431                                         rb->rb_right = NULL;
1432                         }
1433
1434                         wl_entry_destroy(ubi, e);
1435                 }
1436         }
1437 }
1438
1439 /**
1440  * ubi_thread - UBI background thread.
1441  * @u: the UBI device description object pointer
1442  */
1443 int ubi_thread(void *u)
1444 {
1445         int failures = 0;
1446         struct ubi_device *ubi = u;
1447
1448         ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1449                 ubi->bgt_name, task_pid_nr(current));
1450
1451         set_freezable();
1452         for (;;) {
1453                 int err;
1454
1455                 if (kthread_should_stop())
1456                         break;
1457
1458                 if (try_to_freeze())
1459                         continue;
1460
1461                 spin_lock(&ubi->wl_lock);
1462                 if (list_empty(&ubi->works) || ubi->ro_mode ||
1463                     !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1464                         set_current_state(TASK_INTERRUPTIBLE);
1465                         spin_unlock(&ubi->wl_lock);
1466                         schedule();
1467                         continue;
1468                 }
1469                 spin_unlock(&ubi->wl_lock);
1470
1471                 err = do_work(ubi);
1472                 if (err) {
1473                         ubi_err(ubi, "%s: work failed with error code %d",
1474                                 ubi->bgt_name, err);
1475                         if (failures++ > WL_MAX_FAILURES) {
1476                                 /*
1477                                  * Too many failures, disable the thread and
1478                                  * switch to read-only mode.
1479                                  */
1480                                 ubi_msg(ubi, "%s: %d consecutive failures",
1481                                         ubi->bgt_name, WL_MAX_FAILURES);
1482                                 ubi_ro_mode(ubi);
1483                                 ubi->thread_enabled = 0;
1484                                 continue;
1485                         }
1486                 } else
1487                         failures = 0;
1488
1489                 cond_resched();
1490         }
1491
1492         dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1493         return 0;
1494 }
1495
1496 /**
1497  * shutdown_work - shutdown all pending works.
1498  * @ubi: UBI device description object
1499  */
1500 static void shutdown_work(struct ubi_device *ubi)
1501 {
1502 #ifdef CONFIG_MTD_UBI_FASTMAP
1503 #ifndef __UBOOT__
1504         flush_work(&ubi->fm_work);
1505 #else
1506         /* in U-Boot, we have all work done */
1507 #endif
1508 #endif
1509         while (!list_empty(&ubi->works)) {
1510                 struct ubi_work *wrk;
1511
1512                 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1513                 list_del(&wrk->list);
1514                 wrk->func(ubi, wrk, 1);
1515                 ubi->works_count -= 1;
1516                 ubi_assert(ubi->works_count >= 0);
1517         }
1518 }
1519
1520 /**
1521  * ubi_wl_init - initialize the WL sub-system using attaching information.
1522  * @ubi: UBI device description object
1523  * @ai: attaching information
1524  *
1525  * This function returns zero in case of success, and a negative error code in
1526  * case of failure.
1527  */
1528 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1529 {
1530         int err, i, reserved_pebs, found_pebs = 0;
1531         struct rb_node *rb1, *rb2;
1532         struct ubi_ainf_volume *av;
1533         struct ubi_ainf_peb *aeb, *tmp;
1534         struct ubi_wl_entry *e;
1535
1536         ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1537         spin_lock_init(&ubi->wl_lock);
1538         mutex_init(&ubi->move_mutex);
1539         init_rwsem(&ubi->work_sem);
1540         ubi->max_ec = ai->max_ec;
1541         INIT_LIST_HEAD(&ubi->works);
1542
1543         sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1544
1545         err = -ENOMEM;
1546         ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1547         if (!ubi->lookuptbl)
1548                 return err;
1549
1550         for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1551                 INIT_LIST_HEAD(&ubi->pq[i]);
1552         ubi->pq_head = 0;
1553
1554         ubi->free_count = 0;
1555         list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1556                 cond_resched();
1557
1558                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1559                 if (!e)
1560                         goto out_free;
1561
1562                 e->pnum = aeb->pnum;
1563                 e->ec = aeb->ec;
1564                 ubi->lookuptbl[e->pnum] = e;
1565                 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1566                         wl_entry_destroy(ubi, e);
1567                         goto out_free;
1568                 }
1569
1570                 found_pebs++;
1571         }
1572
1573         list_for_each_entry(aeb, &ai->free, u.list) {
1574                 cond_resched();
1575
1576                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1577                 if (!e)
1578                         goto out_free;
1579
1580                 e->pnum = aeb->pnum;
1581                 e->ec = aeb->ec;
1582                 ubi_assert(e->ec >= 0);
1583
1584                 wl_tree_add(e, &ubi->free);
1585                 ubi->free_count++;
1586
1587                 ubi->lookuptbl[e->pnum] = e;
1588
1589                 found_pebs++;
1590         }
1591
1592         ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1593                 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1594                         cond_resched();
1595
1596                         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1597                         if (!e)
1598                                 goto out_free;
1599
1600                         e->pnum = aeb->pnum;
1601                         e->ec = aeb->ec;
1602                         ubi->lookuptbl[e->pnum] = e;
1603
1604                         if (!aeb->scrub) {
1605                                 dbg_wl("add PEB %d EC %d to the used tree",
1606                                        e->pnum, e->ec);
1607                                 wl_tree_add(e, &ubi->used);
1608                         } else {
1609                                 dbg_wl("add PEB %d EC %d to the scrub tree",
1610                                        e->pnum, e->ec);
1611                                 wl_tree_add(e, &ubi->scrub);
1612                         }
1613
1614                         found_pebs++;
1615                 }
1616         }
1617
1618         dbg_wl("found %i PEBs", found_pebs);
1619
1620         if (ubi->fm) {
1621                 ubi_assert(ubi->good_peb_count ==
1622                            found_pebs + ubi->fm->used_blocks);
1623
1624                 for (i = 0; i < ubi->fm->used_blocks; i++) {
1625                         e = ubi->fm->e[i];
1626                         ubi->lookuptbl[e->pnum] = e;
1627                 }
1628         }
1629         else
1630                 ubi_assert(ubi->good_peb_count == found_pebs);
1631
1632         reserved_pebs = WL_RESERVED_PEBS;
1633         ubi_fastmap_init(ubi, &reserved_pebs);
1634
1635         if (ubi->avail_pebs < reserved_pebs) {
1636                 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1637                         ubi->avail_pebs, reserved_pebs);
1638                 if (ubi->corr_peb_count)
1639                         ubi_err(ubi, "%d PEBs are corrupted and not used",
1640                                 ubi->corr_peb_count);
1641                 goto out_free;
1642         }
1643         ubi->avail_pebs -= reserved_pebs;
1644         ubi->rsvd_pebs += reserved_pebs;
1645
1646         /* Schedule wear-leveling if needed */
1647         err = ensure_wear_leveling(ubi, 0);
1648         if (err)
1649                 goto out_free;
1650
1651         return 0;
1652
1653 out_free:
1654         shutdown_work(ubi);
1655         tree_destroy(ubi, &ubi->used);
1656         tree_destroy(ubi, &ubi->free);
1657         tree_destroy(ubi, &ubi->scrub);
1658         kfree(ubi->lookuptbl);
1659         return err;
1660 }
1661
1662 /**
1663  * protection_queue_destroy - destroy the protection queue.
1664  * @ubi: UBI device description object
1665  */
1666 static void protection_queue_destroy(struct ubi_device *ubi)
1667 {
1668         int i;
1669         struct ubi_wl_entry *e, *tmp;
1670
1671         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1672                 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1673                         list_del(&e->u.list);
1674                         wl_entry_destroy(ubi, e);
1675                 }
1676         }
1677 }
1678
1679 /**
1680  * ubi_wl_close - close the wear-leveling sub-system.
1681  * @ubi: UBI device description object
1682  */
1683 void ubi_wl_close(struct ubi_device *ubi)
1684 {
1685         dbg_wl("close the WL sub-system");
1686         ubi_fastmap_close(ubi);
1687         shutdown_work(ubi);
1688         protection_queue_destroy(ubi);
1689         tree_destroy(ubi, &ubi->used);
1690         tree_destroy(ubi, &ubi->erroneous);
1691         tree_destroy(ubi, &ubi->free);
1692         tree_destroy(ubi, &ubi->scrub);
1693         kfree(ubi->lookuptbl);
1694 }
1695
1696 /**
1697  * self_check_ec - make sure that the erase counter of a PEB is correct.
1698  * @ubi: UBI device description object
1699  * @pnum: the physical eraseblock number to check
1700  * @ec: the erase counter to check
1701  *
1702  * This function returns zero if the erase counter of physical eraseblock @pnum
1703  * is equivalent to @ec, and a negative error code if not or if an error
1704  * occurred.
1705  */
1706 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1707 {
1708         int err;
1709         long long read_ec;
1710         struct ubi_ec_hdr *ec_hdr;
1711
1712         if (!ubi_dbg_chk_gen(ubi))
1713                 return 0;
1714
1715         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1716         if (!ec_hdr)
1717                 return -ENOMEM;
1718
1719         err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1720         if (err && err != UBI_IO_BITFLIPS) {
1721                 /* The header does not have to exist */
1722                 err = 0;
1723                 goto out_free;
1724         }
1725
1726         read_ec = be64_to_cpu(ec_hdr->ec);
1727         if (ec != read_ec && read_ec - ec > 1) {
1728                 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1729                 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1730                 dump_stack();
1731                 err = 1;
1732         } else
1733                 err = 0;
1734
1735 out_free:
1736         kfree(ec_hdr);
1737         return err;
1738 }
1739
1740 /**
1741  * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1742  * @ubi: UBI device description object
1743  * @e: the wear-leveling entry to check
1744  * @root: the root of the tree
1745  *
1746  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
1747  * is not.
1748  */
1749 static int self_check_in_wl_tree(const struct ubi_device *ubi,
1750                                  struct ubi_wl_entry *e, struct rb_root *root)
1751 {
1752         if (!ubi_dbg_chk_gen(ubi))
1753                 return 0;
1754
1755         if (in_wl_tree(e, root))
1756                 return 0;
1757
1758         ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1759                 e->pnum, e->ec, root);
1760         dump_stack();
1761         return -EINVAL;
1762 }
1763
1764 /**
1765  * self_check_in_pq - check if wear-leveling entry is in the protection
1766  *                        queue.
1767  * @ubi: UBI device description object
1768  * @e: the wear-leveling entry to check
1769  *
1770  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
1771  */
1772 static int self_check_in_pq(const struct ubi_device *ubi,
1773                             struct ubi_wl_entry *e)
1774 {
1775         struct ubi_wl_entry *p;
1776         int i;
1777
1778         if (!ubi_dbg_chk_gen(ubi))
1779                 return 0;
1780
1781         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1782                 list_for_each_entry(p, &ubi->pq[i], u.list)
1783                         if (p == e)
1784                                 return 0;
1785
1786         ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
1787                 e->pnum, e->ec);
1788         dump_stack();
1789         return -EINVAL;
1790 }
1791 #ifndef CONFIG_MTD_UBI_FASTMAP
1792 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
1793 {
1794         struct ubi_wl_entry *e;
1795
1796         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1797         self_check_in_wl_tree(ubi, e, &ubi->free);
1798         ubi->free_count--;
1799         ubi_assert(ubi->free_count >= 0);
1800         rb_erase(&e->u.rb, &ubi->free);
1801
1802         return e;
1803 }
1804
1805 /**
1806  * produce_free_peb - produce a free physical eraseblock.
1807  * @ubi: UBI device description object
1808  *
1809  * This function tries to make a free PEB by means of synchronous execution of
1810  * pending works. This may be needed if, for example the background thread is
1811  * disabled. Returns zero in case of success and a negative error code in case
1812  * of failure.
1813  */
1814 static int produce_free_peb(struct ubi_device *ubi)
1815 {
1816         int err;
1817
1818         while (!ubi->free.rb_node && ubi->works_count) {
1819                 spin_unlock(&ubi->wl_lock);
1820
1821                 dbg_wl("do one work synchronously");
1822                 err = do_work(ubi);
1823
1824                 spin_lock(&ubi->wl_lock);
1825                 if (err)
1826                         return err;
1827         }
1828
1829         return 0;
1830 }
1831
1832 /**
1833  * ubi_wl_get_peb - get a physical eraseblock.
1834  * @ubi: UBI device description object
1835  *
1836  * This function returns a physical eraseblock in case of success and a
1837  * negative error code in case of failure.
1838  * Returns with ubi->fm_eba_sem held in read mode!
1839  */
1840 int ubi_wl_get_peb(struct ubi_device *ubi)
1841 {
1842         int err;
1843         struct ubi_wl_entry *e;
1844
1845 retry:
1846         down_read(&ubi->fm_eba_sem);
1847         spin_lock(&ubi->wl_lock);
1848         if (!ubi->free.rb_node) {
1849                 if (ubi->works_count == 0) {
1850                         ubi_err(ubi, "no free eraseblocks");
1851                         ubi_assert(list_empty(&ubi->works));
1852                         spin_unlock(&ubi->wl_lock);
1853                         return -ENOSPC;
1854                 }
1855
1856                 err = produce_free_peb(ubi);
1857                 if (err < 0) {
1858                         spin_unlock(&ubi->wl_lock);
1859                         return err;
1860                 }
1861                 spin_unlock(&ubi->wl_lock);
1862                 up_read(&ubi->fm_eba_sem);
1863                 goto retry;
1864
1865         }
1866         e = wl_get_wle(ubi);
1867         prot_queue_add(ubi, e);
1868         spin_unlock(&ubi->wl_lock);
1869
1870         err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
1871                                     ubi->peb_size - ubi->vid_hdr_aloffset);
1872         if (err) {
1873                 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
1874                 return err;
1875         }
1876
1877         return e->pnum;
1878 }
1879 #else
1880 #include "fastmap-wl.c"
1881 #endif