]> git.sur5r.net Git - u-boot/blob - drivers/mtd/ubi/wl.c
2987ffc093338ab04a54f10efa8819dc2d0e00c5
[u-boot] / drivers / mtd / ubi / wl.c
1 /*
2  * Copyright (c) International Business Machines Corp., 2006
3  *
4  * SPDX-License-Identifier:     GPL-2.0+
5  *
6  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
7  */
8
9 /*
10  * UBI wear-leveling sub-system.
11  *
12  * This sub-system is responsible for wear-leveling. It works in terms of
13  * physical eraseblocks and erase counters and knows nothing about logical
14  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
15  * eraseblocks are of two types - used and free. Used physical eraseblocks are
16  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
17  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
18  *
19  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
20  * header. The rest of the physical eraseblock contains only %0xFF bytes.
21  *
22  * When physical eraseblocks are returned to the WL sub-system by means of the
23  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
24  * done asynchronously in context of the per-UBI device background thread,
25  * which is also managed by the WL sub-system.
26  *
27  * The wear-leveling is ensured by means of moving the contents of used
28  * physical eraseblocks with low erase counter to free physical eraseblocks
29  * with high erase counter.
30  *
31  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
32  * bad.
33  *
34  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
35  * in a physical eraseblock, it has to be moved. Technically this is the same
36  * as moving it for wear-leveling reasons.
37  *
38  * As it was said, for the UBI sub-system all physical eraseblocks are either
39  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
40  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
41  * RB-trees, as well as (temporarily) in the @wl->pq queue.
42  *
43  * When the WL sub-system returns a physical eraseblock, the physical
44  * eraseblock is protected from being moved for some "time". For this reason,
45  * the physical eraseblock is not directly moved from the @wl->free tree to the
46  * @wl->used tree. There is a protection queue in between where this
47  * physical eraseblock is temporarily stored (@wl->pq).
48  *
49  * All this protection stuff is needed because:
50  *  o we don't want to move physical eraseblocks just after we have given them
51  *    to the user; instead, we first want to let users fill them up with data;
52  *
53  *  o there is a chance that the user will put the physical eraseblock very
54  *    soon, so it makes sense not to move it for some time, but wait.
55  *
56  * Physical eraseblocks stay protected only for limited time. But the "time" is
57  * measured in erase cycles in this case. This is implemented with help of the
58  * protection queue. Eraseblocks are put to the tail of this queue when they
59  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
60  * head of the queue on each erase operation (for any eraseblock). So the
61  * length of the queue defines how may (global) erase cycles PEBs are protected.
62  *
63  * To put it differently, each physical eraseblock has 2 main states: free and
64  * used. The former state corresponds to the @wl->free tree. The latter state
65  * is split up on several sub-states:
66  * o the WL movement is allowed (@wl->used tree);
67  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
68  *   erroneous - e.g., there was a read error;
69  * o the WL movement is temporarily prohibited (@wl->pq queue);
70  * o scrubbing is needed (@wl->scrub tree).
71  *
72  * Depending on the sub-state, wear-leveling entries of the used physical
73  * eraseblocks may be kept in one of those structures.
74  *
75  * Note, in this implementation, we keep a small in-RAM object for each physical
76  * eraseblock. This is surely not a scalable solution. But it appears to be good
77  * enough for moderately large flashes and it is simple. In future, one may
78  * re-work this sub-system and make it more scalable.
79  *
80  * At the moment this sub-system does not utilize the sequence number, which
81  * was introduced relatively recently. But it would be wise to do this because
82  * the sequence number of a logical eraseblock characterizes how old is it. For
83  * example, when we move a PEB with low erase counter, and we need to pick the
84  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
85  * pick target PEB with an average EC if our PEB is not very "old". This is a
86  * room for future re-works of the WL sub-system.
87  */
88
89 #define __UBOOT__
90 #ifndef __UBOOT__
91 #include <linux/slab.h>
92 #include <linux/crc32.h>
93 #include <linux/freezer.h>
94 #include <linux/kthread.h>
95 #else
96 #include <ubi_uboot.h>
97 #endif
98
99 #include "ubi.h"
100
101 /* Number of physical eraseblocks reserved for wear-leveling purposes */
102 #define WL_RESERVED_PEBS 1
103
104 /*
105  * Maximum difference between two erase counters. If this threshold is
106  * exceeded, the WL sub-system starts moving data from used physical
107  * eraseblocks with low erase counter to free physical eraseblocks with high
108  * erase counter.
109  */
110 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
111
112 /*
113  * When a physical eraseblock is moved, the WL sub-system has to pick the target
114  * physical eraseblock to move to. The simplest way would be just to pick the
115  * one with the highest erase counter. But in certain workloads this could lead
116  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
117  * situation when the picked physical eraseblock is constantly erased after the
118  * data is written to it. So, we have a constant which limits the highest erase
119  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
120  * does not pick eraseblocks with erase counter greater than the lowest erase
121  * counter plus %WL_FREE_MAX_DIFF.
122  */
123 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
124
125 /*
126  * Maximum number of consecutive background thread failures which is enough to
127  * switch to read-only mode.
128  */
129 #define WL_MAX_FAILURES 32
130
131 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
132 static int self_check_in_wl_tree(const struct ubi_device *ubi,
133                                  struct ubi_wl_entry *e, struct rb_root *root);
134 static int self_check_in_pq(const struct ubi_device *ubi,
135                             struct ubi_wl_entry *e);
136
137 #ifdef CONFIG_MTD_UBI_FASTMAP
138 #ifndef __UBOOT__
139 /**
140  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
141  * @wrk: the work description object
142  */
143 static void update_fastmap_work_fn(struct work_struct *wrk)
144 {
145         struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
146         ubi_update_fastmap(ubi);
147 }
148 #endif
149
150 /**
151  *  ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
152  *  @ubi: UBI device description object
153  *  @pnum: the to be checked PEB
154  */
155 static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
156 {
157         int i;
158
159         if (!ubi->fm)
160                 return 0;
161
162         for (i = 0; i < ubi->fm->used_blocks; i++)
163                 if (ubi->fm->e[i]->pnum == pnum)
164                         return 1;
165
166         return 0;
167 }
168 #else
169 static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
170 {
171         return 0;
172 }
173 #endif
174
175 /**
176  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
177  * @e: the wear-leveling entry to add
178  * @root: the root of the tree
179  *
180  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
181  * the @ubi->used and @ubi->free RB-trees.
182  */
183 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
184 {
185         struct rb_node **p, *parent = NULL;
186
187         p = &root->rb_node;
188         while (*p) {
189                 struct ubi_wl_entry *e1;
190
191                 parent = *p;
192                 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
193
194                 if (e->ec < e1->ec)
195                         p = &(*p)->rb_left;
196                 else if (e->ec > e1->ec)
197                         p = &(*p)->rb_right;
198                 else {
199                         ubi_assert(e->pnum != e1->pnum);
200                         if (e->pnum < e1->pnum)
201                                 p = &(*p)->rb_left;
202                         else
203                                 p = &(*p)->rb_right;
204                 }
205         }
206
207         rb_link_node(&e->u.rb, parent, p);
208         rb_insert_color(&e->u.rb, root);
209 }
210
211 /**
212  * do_work - do one pending work.
213  * @ubi: UBI device description object
214  *
215  * This function returns zero in case of success and a negative error code in
216  * case of failure.
217  */
218 static int do_work(struct ubi_device *ubi)
219 {
220         int err;
221         struct ubi_work *wrk;
222
223         cond_resched();
224
225         /*
226          * @ubi->work_sem is used to synchronize with the workers. Workers take
227          * it in read mode, so many of them may be doing works at a time. But
228          * the queue flush code has to be sure the whole queue of works is
229          * done, and it takes the mutex in write mode.
230          */
231         down_read(&ubi->work_sem);
232         spin_lock(&ubi->wl_lock);
233         if (list_empty(&ubi->works)) {
234                 spin_unlock(&ubi->wl_lock);
235                 up_read(&ubi->work_sem);
236                 return 0;
237         }
238
239         wrk = list_entry(ubi->works.next, struct ubi_work, list);
240         list_del(&wrk->list);
241         ubi->works_count -= 1;
242         ubi_assert(ubi->works_count >= 0);
243         spin_unlock(&ubi->wl_lock);
244
245         /*
246          * Call the worker function. Do not touch the work structure
247          * after this call as it will have been freed or reused by that
248          * time by the worker function.
249          */
250         err = wrk->func(ubi, wrk, 0);
251         if (err)
252                 ubi_err("work failed with error code %d", err);
253         up_read(&ubi->work_sem);
254
255         return err;
256 }
257
258 /**
259  * produce_free_peb - produce a free physical eraseblock.
260  * @ubi: UBI device description object
261  *
262  * This function tries to make a free PEB by means of synchronous execution of
263  * pending works. This may be needed if, for example the background thread is
264  * disabled. Returns zero in case of success and a negative error code in case
265  * of failure.
266  */
267 static int produce_free_peb(struct ubi_device *ubi)
268 {
269         int err;
270
271         while (!ubi->free.rb_node) {
272                 spin_unlock(&ubi->wl_lock);
273
274                 dbg_wl("do one work synchronously");
275                 err = do_work(ubi);
276
277                 spin_lock(&ubi->wl_lock);
278                 if (err)
279                         return err;
280         }
281
282         return 0;
283 }
284
285 /**
286  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
287  * @e: the wear-leveling entry to check
288  * @root: the root of the tree
289  *
290  * This function returns non-zero if @e is in the @root RB-tree and zero if it
291  * is not.
292  */
293 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
294 {
295         struct rb_node *p;
296
297         p = root->rb_node;
298         while (p) {
299                 struct ubi_wl_entry *e1;
300
301                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
302
303                 if (e->pnum == e1->pnum) {
304                         ubi_assert(e == e1);
305                         return 1;
306                 }
307
308                 if (e->ec < e1->ec)
309                         p = p->rb_left;
310                 else if (e->ec > e1->ec)
311                         p = p->rb_right;
312                 else {
313                         ubi_assert(e->pnum != e1->pnum);
314                         if (e->pnum < e1->pnum)
315                                 p = p->rb_left;
316                         else
317                                 p = p->rb_right;
318                 }
319         }
320
321         return 0;
322 }
323
324 /**
325  * prot_queue_add - add physical eraseblock to the protection queue.
326  * @ubi: UBI device description object
327  * @e: the physical eraseblock to add
328  *
329  * This function adds @e to the tail of the protection queue @ubi->pq, where
330  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
331  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
332  * be locked.
333  */
334 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
335 {
336         int pq_tail = ubi->pq_head - 1;
337
338         if (pq_tail < 0)
339                 pq_tail = UBI_PROT_QUEUE_LEN - 1;
340         ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
341         list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
342         dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
343 }
344
345 /**
346  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
347  * @ubi: UBI device description object
348  * @root: the RB-tree where to look for
349  * @diff: maximum possible difference from the smallest erase counter
350  *
351  * This function looks for a wear leveling entry with erase counter closest to
352  * min + @diff, where min is the smallest erase counter.
353  */
354 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
355                                           struct rb_root *root, int diff)
356 {
357         struct rb_node *p;
358         struct ubi_wl_entry *e, *prev_e = NULL;
359         int max;
360
361         e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
362         max = e->ec + diff;
363
364         p = root->rb_node;
365         while (p) {
366                 struct ubi_wl_entry *e1;
367
368                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
369                 if (e1->ec >= max)
370                         p = p->rb_left;
371                 else {
372                         p = p->rb_right;
373                         prev_e = e;
374                         e = e1;
375                 }
376         }
377
378         /* If no fastmap has been written and this WL entry can be used
379          * as anchor PEB, hold it back and return the second best WL entry
380          * such that fastmap can use the anchor PEB later. */
381         if (prev_e && !ubi->fm_disabled &&
382             !ubi->fm && e->pnum < UBI_FM_MAX_START)
383                 return prev_e;
384
385         return e;
386 }
387
388 /**
389  * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
390  * @ubi: UBI device description object
391  * @root: the RB-tree where to look for
392  *
393  * This function looks for a wear leveling entry with medium erase counter,
394  * but not greater or equivalent than the lowest erase counter plus
395  * %WL_FREE_MAX_DIFF/2.
396  */
397 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
398                                                struct rb_root *root)
399 {
400         struct ubi_wl_entry *e, *first, *last;
401
402         first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
403         last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
404
405         if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
406                 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
407
408 #ifdef CONFIG_MTD_UBI_FASTMAP
409                 /* If no fastmap has been written and this WL entry can be used
410                  * as anchor PEB, hold it back and return the second best
411                  * WL entry such that fastmap can use the anchor PEB later. */
412                 if (e && !ubi->fm_disabled && !ubi->fm &&
413                     e->pnum < UBI_FM_MAX_START)
414                         e = rb_entry(rb_next(root->rb_node),
415                                      struct ubi_wl_entry, u.rb);
416 #endif
417         } else
418                 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
419
420         return e;
421 }
422
423 #ifdef CONFIG_MTD_UBI_FASTMAP
424 /**
425  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
426  * @root: the RB-tree where to look for
427  */
428 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
429 {
430         struct rb_node *p;
431         struct ubi_wl_entry *e, *victim = NULL;
432         int max_ec = UBI_MAX_ERASECOUNTER;
433
434         ubi_rb_for_each_entry(p, e, root, u.rb) {
435                 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
436                         victim = e;
437                         max_ec = e->ec;
438                 }
439         }
440
441         return victim;
442 }
443
444 static int anchor_pebs_avalible(struct rb_root *root)
445 {
446         struct rb_node *p;
447         struct ubi_wl_entry *e;
448
449         ubi_rb_for_each_entry(p, e, root, u.rb)
450                 if (e->pnum < UBI_FM_MAX_START)
451                         return 1;
452
453         return 0;
454 }
455
456 /**
457  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
458  * @ubi: UBI device description object
459  * @anchor: This PEB will be used as anchor PEB by fastmap
460  *
461  * The function returns a physical erase block with a given maximal number
462  * and removes it from the wl subsystem.
463  * Must be called with wl_lock held!
464  */
465 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
466 {
467         struct ubi_wl_entry *e = NULL;
468
469         if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
470                 goto out;
471
472         if (anchor)
473                 e = find_anchor_wl_entry(&ubi->free);
474         else
475                 e = find_mean_wl_entry(ubi, &ubi->free);
476
477         if (!e)
478                 goto out;
479
480         self_check_in_wl_tree(ubi, e, &ubi->free);
481
482         /* remove it from the free list,
483          * the wl subsystem does no longer know this erase block */
484         rb_erase(&e->u.rb, &ubi->free);
485         ubi->free_count--;
486 out:
487         return e;
488 }
489 #endif
490
491 /**
492  * __wl_get_peb - get a physical eraseblock.
493  * @ubi: UBI device description object
494  *
495  * This function returns a physical eraseblock in case of success and a
496  * negative error code in case of failure.
497  */
498 static int __wl_get_peb(struct ubi_device *ubi)
499 {
500         int err;
501         struct ubi_wl_entry *e;
502
503 retry:
504         if (!ubi->free.rb_node) {
505                 if (ubi->works_count == 0) {
506                         ubi_err("no free eraseblocks");
507                         ubi_assert(list_empty(&ubi->works));
508                         return -ENOSPC;
509                 }
510
511                 err = produce_free_peb(ubi);
512                 if (err < 0)
513                         return err;
514                 goto retry;
515         }
516
517         e = find_mean_wl_entry(ubi, &ubi->free);
518         if (!e) {
519                 ubi_err("no free eraseblocks");
520                 return -ENOSPC;
521         }
522
523         self_check_in_wl_tree(ubi, e, &ubi->free);
524
525         /*
526          * Move the physical eraseblock to the protection queue where it will
527          * be protected from being moved for some time.
528          */
529         rb_erase(&e->u.rb, &ubi->free);
530         ubi->free_count--;
531         dbg_wl("PEB %d EC %d", e->pnum, e->ec);
532 #ifndef CONFIG_MTD_UBI_FASTMAP
533         /* We have to enqueue e only if fastmap is disabled,
534          * is fastmap enabled prot_queue_add() will be called by
535          * ubi_wl_get_peb() after removing e from the pool. */
536         prot_queue_add(ubi, e);
537 #endif
538         return e->pnum;
539 }
540
541 #ifdef CONFIG_MTD_UBI_FASTMAP
542 /**
543  * return_unused_pool_pebs - returns unused PEB to the free tree.
544  * @ubi: UBI device description object
545  * @pool: fastmap pool description object
546  */
547 static void return_unused_pool_pebs(struct ubi_device *ubi,
548                                     struct ubi_fm_pool *pool)
549 {
550         int i;
551         struct ubi_wl_entry *e;
552
553         for (i = pool->used; i < pool->size; i++) {
554                 e = ubi->lookuptbl[pool->pebs[i]];
555                 wl_tree_add(e, &ubi->free);
556                 ubi->free_count++;
557         }
558 }
559
560 /**
561  * refill_wl_pool - refills all the fastmap pool used by the
562  * WL sub-system.
563  * @ubi: UBI device description object
564  */
565 static void refill_wl_pool(struct ubi_device *ubi)
566 {
567         struct ubi_wl_entry *e;
568         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
569
570         return_unused_pool_pebs(ubi, pool);
571
572         for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
573                 if (!ubi->free.rb_node ||
574                    (ubi->free_count - ubi->beb_rsvd_pebs < 5))
575                         break;
576
577                 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
578                 self_check_in_wl_tree(ubi, e, &ubi->free);
579                 rb_erase(&e->u.rb, &ubi->free);
580                 ubi->free_count--;
581
582                 pool->pebs[pool->size] = e->pnum;
583         }
584         pool->used = 0;
585 }
586
587 /**
588  * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
589  * @ubi: UBI device description object
590  */
591 static void refill_wl_user_pool(struct ubi_device *ubi)
592 {
593         struct ubi_fm_pool *pool = &ubi->fm_pool;
594
595         return_unused_pool_pebs(ubi, pool);
596
597         for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
598                 pool->pebs[pool->size] = __wl_get_peb(ubi);
599                 if (pool->pebs[pool->size] < 0)
600                         break;
601         }
602         pool->used = 0;
603 }
604
605 /**
606  * ubi_refill_pools - refills all fastmap PEB pools.
607  * @ubi: UBI device description object
608  */
609 void ubi_refill_pools(struct ubi_device *ubi)
610 {
611         spin_lock(&ubi->wl_lock);
612         refill_wl_pool(ubi);
613         refill_wl_user_pool(ubi);
614         spin_unlock(&ubi->wl_lock);
615 }
616
617 /* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
618  * the fastmap pool.
619  */
620 int ubi_wl_get_peb(struct ubi_device *ubi)
621 {
622         int ret;
623         struct ubi_fm_pool *pool = &ubi->fm_pool;
624         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
625
626         if (!pool->size || !wl_pool->size || pool->used == pool->size ||
627             wl_pool->used == wl_pool->size)
628                 ubi_update_fastmap(ubi);
629
630         /* we got not a single free PEB */
631         if (!pool->size)
632                 ret = -ENOSPC;
633         else {
634                 spin_lock(&ubi->wl_lock);
635                 ret = pool->pebs[pool->used++];
636                 prot_queue_add(ubi, ubi->lookuptbl[ret]);
637                 spin_unlock(&ubi->wl_lock);
638         }
639
640         return ret;
641 }
642
643 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
644  *
645  * @ubi: UBI device description object
646  */
647 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
648 {
649         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
650         int pnum;
651
652         if (pool->used == pool->size || !pool->size) {
653                 /* We cannot update the fastmap here because this
654                  * function is called in atomic context.
655                  * Let's fail here and refill/update it as soon as possible. */
656 #ifndef __UBOOT__
657                 schedule_work(&ubi->fm_work);
658 #else
659                 /* In U-Boot we must call this directly */
660                 ubi_update_fastmap(ubi);
661 #endif
662                 return NULL;
663         } else {
664                 pnum = pool->pebs[pool->used++];
665                 return ubi->lookuptbl[pnum];
666         }
667 }
668 #else
669 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
670 {
671         struct ubi_wl_entry *e;
672
673         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
674         self_check_in_wl_tree(ubi, e, &ubi->free);
675         rb_erase(&e->u.rb, &ubi->free);
676
677         return e;
678 }
679
680 int ubi_wl_get_peb(struct ubi_device *ubi)
681 {
682         int peb, err;
683
684         spin_lock(&ubi->wl_lock);
685         peb = __wl_get_peb(ubi);
686         spin_unlock(&ubi->wl_lock);
687
688         err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
689                                     ubi->peb_size - ubi->vid_hdr_aloffset);
690         if (err) {
691                 ubi_err("new PEB %d does not contain all 0xFF bytes", peb);
692                 return err;
693         }
694
695         return peb;
696 }
697 #endif
698
699 /**
700  * prot_queue_del - remove a physical eraseblock from the protection queue.
701  * @ubi: UBI device description object
702  * @pnum: the physical eraseblock to remove
703  *
704  * This function deletes PEB @pnum from the protection queue and returns zero
705  * in case of success and %-ENODEV if the PEB was not found.
706  */
707 static int prot_queue_del(struct ubi_device *ubi, int pnum)
708 {
709         struct ubi_wl_entry *e;
710
711         e = ubi->lookuptbl[pnum];
712         if (!e)
713                 return -ENODEV;
714
715         if (self_check_in_pq(ubi, e))
716                 return -ENODEV;
717
718         list_del(&e->u.list);
719         dbg_wl("deleted PEB %d from the protection queue", e->pnum);
720         return 0;
721 }
722
723 /**
724  * sync_erase - synchronously erase a physical eraseblock.
725  * @ubi: UBI device description object
726  * @e: the the physical eraseblock to erase
727  * @torture: if the physical eraseblock has to be tortured
728  *
729  * This function returns zero in case of success and a negative error code in
730  * case of failure.
731  */
732 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
733                       int torture)
734 {
735         int err;
736         struct ubi_ec_hdr *ec_hdr;
737         unsigned long long ec = e->ec;
738
739         dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
740
741         err = self_check_ec(ubi, e->pnum, e->ec);
742         if (err)
743                 return -EINVAL;
744
745         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
746         if (!ec_hdr)
747                 return -ENOMEM;
748
749         err = ubi_io_sync_erase(ubi, e->pnum, torture);
750         if (err < 0)
751                 goto out_free;
752
753         ec += err;
754         if (ec > UBI_MAX_ERASECOUNTER) {
755                 /*
756                  * Erase counter overflow. Upgrade UBI and use 64-bit
757                  * erase counters internally.
758                  */
759                 ubi_err("erase counter overflow at PEB %d, EC %llu",
760                         e->pnum, ec);
761                 err = -EINVAL;
762                 goto out_free;
763         }
764
765         dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
766
767         ec_hdr->ec = cpu_to_be64(ec);
768
769         err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
770         if (err)
771                 goto out_free;
772
773         e->ec = ec;
774         spin_lock(&ubi->wl_lock);
775         if (e->ec > ubi->max_ec)
776                 ubi->max_ec = e->ec;
777         spin_unlock(&ubi->wl_lock);
778
779 out_free:
780         kfree(ec_hdr);
781         return err;
782 }
783
784 /**
785  * serve_prot_queue - check if it is time to stop protecting PEBs.
786  * @ubi: UBI device description object
787  *
788  * This function is called after each erase operation and removes PEBs from the
789  * tail of the protection queue. These PEBs have been protected for long enough
790  * and should be moved to the used tree.
791  */
792 static void serve_prot_queue(struct ubi_device *ubi)
793 {
794         struct ubi_wl_entry *e, *tmp;
795         int count;
796
797         /*
798          * There may be several protected physical eraseblock to remove,
799          * process them all.
800          */
801 repeat:
802         count = 0;
803         spin_lock(&ubi->wl_lock);
804         list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
805                 dbg_wl("PEB %d EC %d protection over, move to used tree",
806                         e->pnum, e->ec);
807
808                 list_del(&e->u.list);
809                 wl_tree_add(e, &ubi->used);
810                 if (count++ > 32) {
811                         /*
812                          * Let's be nice and avoid holding the spinlock for
813                          * too long.
814                          */
815                         spin_unlock(&ubi->wl_lock);
816                         cond_resched();
817                         goto repeat;
818                 }
819         }
820
821         ubi->pq_head += 1;
822         if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
823                 ubi->pq_head = 0;
824         ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
825         spin_unlock(&ubi->wl_lock);
826 }
827
828 /**
829  * __schedule_ubi_work - schedule a work.
830  * @ubi: UBI device description object
831  * @wrk: the work to schedule
832  *
833  * This function adds a work defined by @wrk to the tail of the pending works
834  * list. Can only be used of ubi->work_sem is already held in read mode!
835  */
836 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
837 {
838         spin_lock(&ubi->wl_lock);
839         list_add_tail(&wrk->list, &ubi->works);
840         ubi_assert(ubi->works_count >= 0);
841         ubi->works_count += 1;
842 #ifndef __UBOOT__
843         if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
844                 wake_up_process(ubi->bgt_thread);
845 #else
846         /*
847          * U-Boot special: We have no bgt_thread in U-Boot!
848          * So just call do_work() here directly.
849          */
850         do_work(ubi);
851 #endif
852         spin_unlock(&ubi->wl_lock);
853 }
854
855 /**
856  * schedule_ubi_work - schedule a work.
857  * @ubi: UBI device description object
858  * @wrk: the work to schedule
859  *
860  * This function adds a work defined by @wrk to the tail of the pending works
861  * list.
862  */
863 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
864 {
865         down_read(&ubi->work_sem);
866         __schedule_ubi_work(ubi, wrk);
867         up_read(&ubi->work_sem);
868 }
869
870 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
871                         int cancel);
872
873 #ifdef CONFIG_MTD_UBI_FASTMAP
874 /**
875  * ubi_is_erase_work - checks whether a work is erase work.
876  * @wrk: The work object to be checked
877  */
878 int ubi_is_erase_work(struct ubi_work *wrk)
879 {
880         return wrk->func == erase_worker;
881 }
882 #endif
883
884 /**
885  * schedule_erase - schedule an erase work.
886  * @ubi: UBI device description object
887  * @e: the WL entry of the physical eraseblock to erase
888  * @vol_id: the volume ID that last used this PEB
889  * @lnum: the last used logical eraseblock number for the PEB
890  * @torture: if the physical eraseblock has to be tortured
891  *
892  * This function returns zero in case of success and a %-ENOMEM in case of
893  * failure.
894  */
895 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
896                           int vol_id, int lnum, int torture)
897 {
898         struct ubi_work *wl_wrk;
899
900         ubi_assert(e);
901         ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
902
903         dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
904                e->pnum, e->ec, torture);
905
906         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
907         if (!wl_wrk)
908                 return -ENOMEM;
909
910         wl_wrk->func = &erase_worker;
911         wl_wrk->e = e;
912         wl_wrk->vol_id = vol_id;
913         wl_wrk->lnum = lnum;
914         wl_wrk->torture = torture;
915
916         schedule_ubi_work(ubi, wl_wrk);
917         return 0;
918 }
919
920 /**
921  * do_sync_erase - run the erase worker synchronously.
922  * @ubi: UBI device description object
923  * @e: the WL entry of the physical eraseblock to erase
924  * @vol_id: the volume ID that last used this PEB
925  * @lnum: the last used logical eraseblock number for the PEB
926  * @torture: if the physical eraseblock has to be tortured
927  *
928  */
929 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
930                          int vol_id, int lnum, int torture)
931 {
932         struct ubi_work *wl_wrk;
933
934         dbg_wl("sync erase of PEB %i", e->pnum);
935
936         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
937         if (!wl_wrk)
938                 return -ENOMEM;
939
940         wl_wrk->e = e;
941         wl_wrk->vol_id = vol_id;
942         wl_wrk->lnum = lnum;
943         wl_wrk->torture = torture;
944
945         return erase_worker(ubi, wl_wrk, 0);
946 }
947
948 #ifdef CONFIG_MTD_UBI_FASTMAP
949 /**
950  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
951  * sub-system.
952  * see: ubi_wl_put_peb()
953  *
954  * @ubi: UBI device description object
955  * @fm_e: physical eraseblock to return
956  * @lnum: the last used logical eraseblock number for the PEB
957  * @torture: if this physical eraseblock has to be tortured
958  */
959 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
960                       int lnum, int torture)
961 {
962         struct ubi_wl_entry *e;
963         int vol_id, pnum = fm_e->pnum;
964
965         dbg_wl("PEB %d", pnum);
966
967         ubi_assert(pnum >= 0);
968         ubi_assert(pnum < ubi->peb_count);
969
970         spin_lock(&ubi->wl_lock);
971         e = ubi->lookuptbl[pnum];
972
973         /* This can happen if we recovered from a fastmap the very
974          * first time and writing now a new one. In this case the wl system
975          * has never seen any PEB used by the original fastmap.
976          */
977         if (!e) {
978                 e = fm_e;
979                 ubi_assert(e->ec >= 0);
980                 ubi->lookuptbl[pnum] = e;
981         } else {
982                 e->ec = fm_e->ec;
983                 kfree(fm_e);
984         }
985
986         spin_unlock(&ubi->wl_lock);
987
988         vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
989         return schedule_erase(ubi, e, vol_id, lnum, torture);
990 }
991 #endif
992
993 /**
994  * wear_leveling_worker - wear-leveling worker function.
995  * @ubi: UBI device description object
996  * @wrk: the work object
997  * @cancel: non-zero if the worker has to free memory and exit
998  *
999  * This function copies a more worn out physical eraseblock to a less worn out
1000  * one. Returns zero in case of success and a negative error code in case of
1001  * failure.
1002  */
1003 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1004                                 int cancel)
1005 {
1006         int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
1007         int vol_id = -1, uninitialized_var(lnum);
1008 #ifdef CONFIG_MTD_UBI_FASTMAP
1009         int anchor = wrk->anchor;
1010 #endif
1011         struct ubi_wl_entry *e1, *e2;
1012         struct ubi_vid_hdr *vid_hdr;
1013
1014         kfree(wrk);
1015         if (cancel)
1016                 return 0;
1017
1018         vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
1019         if (!vid_hdr)
1020                 return -ENOMEM;
1021
1022         mutex_lock(&ubi->move_mutex);
1023         spin_lock(&ubi->wl_lock);
1024         ubi_assert(!ubi->move_from && !ubi->move_to);
1025         ubi_assert(!ubi->move_to_put);
1026
1027         if (!ubi->free.rb_node ||
1028             (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
1029                 /*
1030                  * No free physical eraseblocks? Well, they must be waiting in
1031                  * the queue to be erased. Cancel movement - it will be
1032                  * triggered again when a free physical eraseblock appears.
1033                  *
1034                  * No used physical eraseblocks? They must be temporarily
1035                  * protected from being moved. They will be moved to the
1036                  * @ubi->used tree later and the wear-leveling will be
1037                  * triggered again.
1038                  */
1039                 dbg_wl("cancel WL, a list is empty: free %d, used %d",
1040                        !ubi->free.rb_node, !ubi->used.rb_node);
1041                 goto out_cancel;
1042         }
1043
1044 #ifdef CONFIG_MTD_UBI_FASTMAP
1045         /* Check whether we need to produce an anchor PEB */
1046         if (!anchor)
1047                 anchor = !anchor_pebs_avalible(&ubi->free);
1048
1049         if (anchor) {
1050                 e1 = find_anchor_wl_entry(&ubi->used);
1051                 if (!e1)
1052                         goto out_cancel;
1053                 e2 = get_peb_for_wl(ubi);
1054                 if (!e2)
1055                         goto out_cancel;
1056
1057                 self_check_in_wl_tree(ubi, e1, &ubi->used);
1058                 rb_erase(&e1->u.rb, &ubi->used);
1059                 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
1060         } else if (!ubi->scrub.rb_node) {
1061 #else
1062         if (!ubi->scrub.rb_node) {
1063 #endif
1064                 /*
1065                  * Now pick the least worn-out used physical eraseblock and a
1066                  * highly worn-out free physical eraseblock. If the erase
1067                  * counters differ much enough, start wear-leveling.
1068                  */
1069                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1070                 e2 = get_peb_for_wl(ubi);
1071                 if (!e2)
1072                         goto out_cancel;
1073
1074                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
1075                         dbg_wl("no WL needed: min used EC %d, max free EC %d",
1076                                e1->ec, e2->ec);
1077
1078                         /* Give the unused PEB back */
1079                         wl_tree_add(e2, &ubi->free);
1080                         goto out_cancel;
1081                 }
1082                 self_check_in_wl_tree(ubi, e1, &ubi->used);
1083                 rb_erase(&e1->u.rb, &ubi->used);
1084                 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
1085                        e1->pnum, e1->ec, e2->pnum, e2->ec);
1086         } else {
1087                 /* Perform scrubbing */
1088                 scrubbing = 1;
1089                 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
1090                 e2 = get_peb_for_wl(ubi);
1091                 if (!e2)
1092                         goto out_cancel;
1093
1094                 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
1095                 rb_erase(&e1->u.rb, &ubi->scrub);
1096                 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
1097         }
1098
1099         ubi->move_from = e1;
1100         ubi->move_to = e2;
1101         spin_unlock(&ubi->wl_lock);
1102
1103         /*
1104          * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
1105          * We so far do not know which logical eraseblock our physical
1106          * eraseblock (@e1) belongs to. We have to read the volume identifier
1107          * header first.
1108          *
1109          * Note, we are protected from this PEB being unmapped and erased. The
1110          * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
1111          * which is being moved was unmapped.
1112          */
1113
1114         err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
1115         if (err && err != UBI_IO_BITFLIPS) {
1116                 if (err == UBI_IO_FF) {
1117                         /*
1118                          * We are trying to move PEB without a VID header. UBI
1119                          * always write VID headers shortly after the PEB was
1120                          * given, so we have a situation when it has not yet
1121                          * had a chance to write it, because it was preempted.
1122                          * So add this PEB to the protection queue so far,
1123                          * because presumably more data will be written there
1124                          * (including the missing VID header), and then we'll
1125                          * move it.
1126                          */
1127                         dbg_wl("PEB %d has no VID header", e1->pnum);
1128                         protect = 1;
1129                         goto out_not_moved;
1130                 } else if (err == UBI_IO_FF_BITFLIPS) {
1131                         /*
1132                          * The same situation as %UBI_IO_FF, but bit-flips were
1133                          * detected. It is better to schedule this PEB for
1134                          * scrubbing.
1135                          */
1136                         dbg_wl("PEB %d has no VID header but has bit-flips",
1137                                e1->pnum);
1138                         scrubbing = 1;
1139                         goto out_not_moved;
1140                 }
1141
1142                 ubi_err("error %d while reading VID header from PEB %d",
1143                         err, e1->pnum);
1144                 goto out_error;
1145         }
1146
1147         vol_id = be32_to_cpu(vid_hdr->vol_id);
1148         lnum = be32_to_cpu(vid_hdr->lnum);
1149
1150         err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
1151         if (err) {
1152                 if (err == MOVE_CANCEL_RACE) {
1153                         /*
1154                          * The LEB has not been moved because the volume is
1155                          * being deleted or the PEB has been put meanwhile. We
1156                          * should prevent this PEB from being selected for
1157                          * wear-leveling movement again, so put it to the
1158                          * protection queue.
1159                          */
1160                         protect = 1;
1161                         goto out_not_moved;
1162                 }
1163                 if (err == MOVE_RETRY) {
1164                         scrubbing = 1;
1165                         goto out_not_moved;
1166                 }
1167                 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
1168                     err == MOVE_TARGET_RD_ERR) {
1169                         /*
1170                          * Target PEB had bit-flips or write error - torture it.
1171                          */
1172                         torture = 1;
1173                         goto out_not_moved;
1174                 }
1175
1176                 if (err == MOVE_SOURCE_RD_ERR) {
1177                         /*
1178                          * An error happened while reading the source PEB. Do
1179                          * not switch to R/O mode in this case, and give the
1180                          * upper layers a possibility to recover from this,
1181                          * e.g. by unmapping corresponding LEB. Instead, just
1182                          * put this PEB to the @ubi->erroneous list to prevent
1183                          * UBI from trying to move it over and over again.
1184                          */
1185                         if (ubi->erroneous_peb_count > ubi->max_erroneous) {
1186                                 ubi_err("too many erroneous eraseblocks (%d)",
1187                                         ubi->erroneous_peb_count);
1188                                 goto out_error;
1189                         }
1190                         erroneous = 1;
1191                         goto out_not_moved;
1192                 }
1193
1194                 if (err < 0)
1195                         goto out_error;
1196
1197                 ubi_assert(0);
1198         }
1199
1200         /* The PEB has been successfully moved */
1201         if (scrubbing)
1202                 ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
1203                         e1->pnum, vol_id, lnum, e2->pnum);
1204         ubi_free_vid_hdr(ubi, vid_hdr);
1205
1206         spin_lock(&ubi->wl_lock);
1207         if (!ubi->move_to_put) {
1208                 wl_tree_add(e2, &ubi->used);
1209                 e2 = NULL;
1210         }
1211         ubi->move_from = ubi->move_to = NULL;
1212         ubi->move_to_put = ubi->wl_scheduled = 0;
1213         spin_unlock(&ubi->wl_lock);
1214
1215         err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
1216         if (err) {
1217                 kmem_cache_free(ubi_wl_entry_slab, e1);
1218                 if (e2)
1219                         kmem_cache_free(ubi_wl_entry_slab, e2);
1220                 goto out_ro;
1221         }
1222
1223         if (e2) {
1224                 /*
1225                  * Well, the target PEB was put meanwhile, schedule it for
1226                  * erasure.
1227                  */
1228                 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
1229                        e2->pnum, vol_id, lnum);
1230                 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
1231                 if (err) {
1232                         kmem_cache_free(ubi_wl_entry_slab, e2);
1233                         goto out_ro;
1234                 }
1235         }
1236
1237         dbg_wl("done");
1238         mutex_unlock(&ubi->move_mutex);
1239         return 0;
1240
1241         /*
1242          * For some reasons the LEB was not moved, might be an error, might be
1243          * something else. @e1 was not changed, so return it back. @e2 might
1244          * have been changed, schedule it for erasure.
1245          */
1246 out_not_moved:
1247         if (vol_id != -1)
1248                 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
1249                        e1->pnum, vol_id, lnum, e2->pnum, err);
1250         else
1251                 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
1252                        e1->pnum, e2->pnum, err);
1253         spin_lock(&ubi->wl_lock);
1254         if (protect)
1255                 prot_queue_add(ubi, e1);
1256         else if (erroneous) {
1257                 wl_tree_add(e1, &ubi->erroneous);
1258                 ubi->erroneous_peb_count += 1;
1259         } else if (scrubbing)
1260                 wl_tree_add(e1, &ubi->scrub);
1261         else
1262                 wl_tree_add(e1, &ubi->used);
1263         ubi_assert(!ubi->move_to_put);
1264         ubi->move_from = ubi->move_to = NULL;
1265         ubi->wl_scheduled = 0;
1266         spin_unlock(&ubi->wl_lock);
1267
1268         ubi_free_vid_hdr(ubi, vid_hdr);
1269         err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
1270         if (err) {
1271                 kmem_cache_free(ubi_wl_entry_slab, e2);
1272                 goto out_ro;
1273         }
1274         mutex_unlock(&ubi->move_mutex);
1275         return 0;
1276
1277 out_error:
1278         if (vol_id != -1)
1279                 ubi_err("error %d while moving PEB %d to PEB %d",
1280                         err, e1->pnum, e2->pnum);
1281         else
1282                 ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
1283                         err, e1->pnum, vol_id, lnum, e2->pnum);
1284         spin_lock(&ubi->wl_lock);
1285         ubi->move_from = ubi->move_to = NULL;
1286         ubi->move_to_put = ubi->wl_scheduled = 0;
1287         spin_unlock(&ubi->wl_lock);
1288
1289         ubi_free_vid_hdr(ubi, vid_hdr);
1290         kmem_cache_free(ubi_wl_entry_slab, e1);
1291         kmem_cache_free(ubi_wl_entry_slab, e2);
1292
1293 out_ro:
1294         ubi_ro_mode(ubi);
1295         mutex_unlock(&ubi->move_mutex);
1296         ubi_assert(err != 0);
1297         return err < 0 ? err : -EIO;
1298
1299 out_cancel:
1300         ubi->wl_scheduled = 0;
1301         spin_unlock(&ubi->wl_lock);
1302         mutex_unlock(&ubi->move_mutex);
1303         ubi_free_vid_hdr(ubi, vid_hdr);
1304         return 0;
1305 }
1306
1307 /**
1308  * ensure_wear_leveling - schedule wear-leveling if it is needed.
1309  * @ubi: UBI device description object
1310  * @nested: set to non-zero if this function is called from UBI worker
1311  *
1312  * This function checks if it is time to start wear-leveling and schedules it
1313  * if yes. This function returns zero in case of success and a negative error
1314  * code in case of failure.
1315  */
1316 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1317 {
1318         int err = 0;
1319         struct ubi_wl_entry *e1;
1320         struct ubi_wl_entry *e2;
1321         struct ubi_work *wrk;
1322
1323         spin_lock(&ubi->wl_lock);
1324         if (ubi->wl_scheduled)
1325                 /* Wear-leveling is already in the work queue */
1326                 goto out_unlock;
1327
1328         /*
1329          * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1330          * the WL worker has to be scheduled anyway.
1331          */
1332         if (!ubi->scrub.rb_node) {
1333                 if (!ubi->used.rb_node || !ubi->free.rb_node)
1334                         /* No physical eraseblocks - no deal */
1335                         goto out_unlock;
1336
1337                 /*
1338                  * We schedule wear-leveling only if the difference between the
1339                  * lowest erase counter of used physical eraseblocks and a high
1340                  * erase counter of free physical eraseblocks is greater than
1341                  * %UBI_WL_THRESHOLD.
1342                  */
1343                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1344                 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1345
1346                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1347                         goto out_unlock;
1348                 dbg_wl("schedule wear-leveling");
1349         } else
1350                 dbg_wl("schedule scrubbing");
1351
1352         ubi->wl_scheduled = 1;
1353         spin_unlock(&ubi->wl_lock);
1354
1355         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1356         if (!wrk) {
1357                 err = -ENOMEM;
1358                 goto out_cancel;
1359         }
1360
1361         wrk->anchor = 0;
1362         wrk->func = &wear_leveling_worker;
1363         if (nested)
1364                 __schedule_ubi_work(ubi, wrk);
1365         else
1366                 schedule_ubi_work(ubi, wrk);
1367         return err;
1368
1369 out_cancel:
1370         spin_lock(&ubi->wl_lock);
1371         ubi->wl_scheduled = 0;
1372 out_unlock:
1373         spin_unlock(&ubi->wl_lock);
1374         return err;
1375 }
1376
1377 #ifdef CONFIG_MTD_UBI_FASTMAP
1378 /**
1379  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
1380  * @ubi: UBI device description object
1381  */
1382 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
1383 {
1384         struct ubi_work *wrk;
1385
1386         spin_lock(&ubi->wl_lock);
1387         if (ubi->wl_scheduled) {
1388                 spin_unlock(&ubi->wl_lock);
1389                 return 0;
1390         }
1391         ubi->wl_scheduled = 1;
1392         spin_unlock(&ubi->wl_lock);
1393
1394         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1395         if (!wrk) {
1396                 spin_lock(&ubi->wl_lock);
1397                 ubi->wl_scheduled = 0;
1398                 spin_unlock(&ubi->wl_lock);
1399                 return -ENOMEM;
1400         }
1401
1402         wrk->anchor = 1;
1403         wrk->func = &wear_leveling_worker;
1404         schedule_ubi_work(ubi, wrk);
1405         return 0;
1406 }
1407 #endif
1408
1409 /**
1410  * erase_worker - physical eraseblock erase worker function.
1411  * @ubi: UBI device description object
1412  * @wl_wrk: the work object
1413  * @cancel: non-zero if the worker has to free memory and exit
1414  *
1415  * This function erases a physical eraseblock and perform torture testing if
1416  * needed. It also takes care about marking the physical eraseblock bad if
1417  * needed. Returns zero in case of success and a negative error code in case of
1418  * failure.
1419  */
1420 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1421                         int cancel)
1422 {
1423         struct ubi_wl_entry *e = wl_wrk->e;
1424         int pnum = e->pnum;
1425         int vol_id = wl_wrk->vol_id;
1426         int lnum = wl_wrk->lnum;
1427         int err, available_consumed = 0;
1428
1429         if (cancel) {
1430                 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1431                 kfree(wl_wrk);
1432                 kmem_cache_free(ubi_wl_entry_slab, e);
1433                 return 0;
1434         }
1435
1436         dbg_wl("erase PEB %d EC %d LEB %d:%d",
1437                pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1438
1439         ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1440
1441         err = sync_erase(ubi, e, wl_wrk->torture);
1442         if (!err) {
1443                 /* Fine, we've erased it successfully */
1444                 kfree(wl_wrk);
1445
1446                 spin_lock(&ubi->wl_lock);
1447                 wl_tree_add(e, &ubi->free);
1448                 ubi->free_count++;
1449                 spin_unlock(&ubi->wl_lock);
1450
1451                 /*
1452                  * One more erase operation has happened, take care about
1453                  * protected physical eraseblocks.
1454                  */
1455                 serve_prot_queue(ubi);
1456
1457                 /* And take care about wear-leveling */
1458                 err = ensure_wear_leveling(ubi, 1);
1459                 return err;
1460         }
1461
1462         ubi_err("failed to erase PEB %d, error %d", pnum, err);
1463         kfree(wl_wrk);
1464
1465         if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1466             err == -EBUSY) {
1467                 int err1;
1468
1469                 /* Re-schedule the LEB for erasure */
1470                 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1471                 if (err1) {
1472                         err = err1;
1473                         goto out_ro;
1474                 }
1475                 return err;
1476         }
1477
1478         kmem_cache_free(ubi_wl_entry_slab, e);
1479         if (err != -EIO)
1480                 /*
1481                  * If this is not %-EIO, we have no idea what to do. Scheduling
1482                  * this physical eraseblock for erasure again would cause
1483                  * errors again and again. Well, lets switch to R/O mode.
1484                  */
1485                 goto out_ro;
1486
1487         /* It is %-EIO, the PEB went bad */
1488
1489         if (!ubi->bad_allowed) {
1490                 ubi_err("bad physical eraseblock %d detected", pnum);
1491                 goto out_ro;
1492         }
1493
1494         spin_lock(&ubi->volumes_lock);
1495         if (ubi->beb_rsvd_pebs == 0) {
1496                 if (ubi->avail_pebs == 0) {
1497                         spin_unlock(&ubi->volumes_lock);
1498                         ubi_err("no reserved/available physical eraseblocks");
1499                         goto out_ro;
1500                 }
1501                 ubi->avail_pebs -= 1;
1502                 available_consumed = 1;
1503         }
1504         spin_unlock(&ubi->volumes_lock);
1505
1506         ubi_msg("mark PEB %d as bad", pnum);
1507         err = ubi_io_mark_bad(ubi, pnum);
1508         if (err)
1509                 goto out_ro;
1510
1511         spin_lock(&ubi->volumes_lock);
1512         if (ubi->beb_rsvd_pebs > 0) {
1513                 if (available_consumed) {
1514                         /*
1515                          * The amount of reserved PEBs increased since we last
1516                          * checked.
1517                          */
1518                         ubi->avail_pebs += 1;
1519                         available_consumed = 0;
1520                 }
1521                 ubi->beb_rsvd_pebs -= 1;
1522         }
1523         ubi->bad_peb_count += 1;
1524         ubi->good_peb_count -= 1;
1525         ubi_calculate_reserved(ubi);
1526         if (available_consumed)
1527                 ubi_warn("no PEBs in the reserved pool, used an available PEB");
1528         else if (ubi->beb_rsvd_pebs)
1529                 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1530         else
1531                 ubi_warn("last PEB from the reserve was used");
1532         spin_unlock(&ubi->volumes_lock);
1533
1534         return err;
1535
1536 out_ro:
1537         if (available_consumed) {
1538                 spin_lock(&ubi->volumes_lock);
1539                 ubi->avail_pebs += 1;
1540                 spin_unlock(&ubi->volumes_lock);
1541         }
1542         ubi_ro_mode(ubi);
1543         return err;
1544 }
1545
1546 /**
1547  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1548  * @ubi: UBI device description object
1549  * @vol_id: the volume ID that last used this PEB
1550  * @lnum: the last used logical eraseblock number for the PEB
1551  * @pnum: physical eraseblock to return
1552  * @torture: if this physical eraseblock has to be tortured
1553  *
1554  * This function is called to return physical eraseblock @pnum to the pool of
1555  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1556  * occurred to this @pnum and it has to be tested. This function returns zero
1557  * in case of success, and a negative error code in case of failure.
1558  */
1559 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1560                    int pnum, int torture)
1561 {
1562         int err;
1563         struct ubi_wl_entry *e;
1564
1565         dbg_wl("PEB %d", pnum);
1566         ubi_assert(pnum >= 0);
1567         ubi_assert(pnum < ubi->peb_count);
1568
1569 retry:
1570         spin_lock(&ubi->wl_lock);
1571         e = ubi->lookuptbl[pnum];
1572         if (e == ubi->move_from) {
1573                 /*
1574                  * User is putting the physical eraseblock which was selected to
1575                  * be moved. It will be scheduled for erasure in the
1576                  * wear-leveling worker.
1577                  */
1578                 dbg_wl("PEB %d is being moved, wait", pnum);
1579                 spin_unlock(&ubi->wl_lock);
1580
1581                 /* Wait for the WL worker by taking the @ubi->move_mutex */
1582                 mutex_lock(&ubi->move_mutex);
1583                 mutex_unlock(&ubi->move_mutex);
1584                 goto retry;
1585         } else if (e == ubi->move_to) {
1586                 /*
1587                  * User is putting the physical eraseblock which was selected
1588                  * as the target the data is moved to. It may happen if the EBA
1589                  * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1590                  * but the WL sub-system has not put the PEB to the "used" tree
1591                  * yet, but it is about to do this. So we just set a flag which
1592                  * will tell the WL worker that the PEB is not needed anymore
1593                  * and should be scheduled for erasure.
1594                  */
1595                 dbg_wl("PEB %d is the target of data moving", pnum);
1596                 ubi_assert(!ubi->move_to_put);
1597                 ubi->move_to_put = 1;
1598                 spin_unlock(&ubi->wl_lock);
1599                 return 0;
1600         } else {
1601                 if (in_wl_tree(e, &ubi->used)) {
1602                         self_check_in_wl_tree(ubi, e, &ubi->used);
1603                         rb_erase(&e->u.rb, &ubi->used);
1604                 } else if (in_wl_tree(e, &ubi->scrub)) {
1605                         self_check_in_wl_tree(ubi, e, &ubi->scrub);
1606                         rb_erase(&e->u.rb, &ubi->scrub);
1607                 } else if (in_wl_tree(e, &ubi->erroneous)) {
1608                         self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1609                         rb_erase(&e->u.rb, &ubi->erroneous);
1610                         ubi->erroneous_peb_count -= 1;
1611                         ubi_assert(ubi->erroneous_peb_count >= 0);
1612                         /* Erroneous PEBs should be tortured */
1613                         torture = 1;
1614                 } else {
1615                         err = prot_queue_del(ubi, e->pnum);
1616                         if (err) {
1617                                 ubi_err("PEB %d not found", pnum);
1618                                 ubi_ro_mode(ubi);
1619                                 spin_unlock(&ubi->wl_lock);
1620                                 return err;
1621                         }
1622                 }
1623         }
1624         spin_unlock(&ubi->wl_lock);
1625
1626         err = schedule_erase(ubi, e, vol_id, lnum, torture);
1627         if (err) {
1628                 spin_lock(&ubi->wl_lock);
1629                 wl_tree_add(e, &ubi->used);
1630                 spin_unlock(&ubi->wl_lock);
1631         }
1632
1633         return err;
1634 }
1635
1636 /**
1637  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1638  * @ubi: UBI device description object
1639  * @pnum: the physical eraseblock to schedule
1640  *
1641  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1642  * needs scrubbing. This function schedules a physical eraseblock for
1643  * scrubbing which is done in background. This function returns zero in case of
1644  * success and a negative error code in case of failure.
1645  */
1646 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1647 {
1648         struct ubi_wl_entry *e;
1649
1650         ubi_msg("schedule PEB %d for scrubbing", pnum);
1651
1652 retry:
1653         spin_lock(&ubi->wl_lock);
1654         e = ubi->lookuptbl[pnum];
1655         if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1656                                    in_wl_tree(e, &ubi->erroneous)) {
1657                 spin_unlock(&ubi->wl_lock);
1658                 return 0;
1659         }
1660
1661         if (e == ubi->move_to) {
1662                 /*
1663                  * This physical eraseblock was used to move data to. The data
1664                  * was moved but the PEB was not yet inserted to the proper
1665                  * tree. We should just wait a little and let the WL worker
1666                  * proceed.
1667                  */
1668                 spin_unlock(&ubi->wl_lock);
1669                 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1670                 yield();
1671                 goto retry;
1672         }
1673
1674         if (in_wl_tree(e, &ubi->used)) {
1675                 self_check_in_wl_tree(ubi, e, &ubi->used);
1676                 rb_erase(&e->u.rb, &ubi->used);
1677         } else {
1678                 int err;
1679
1680                 err = prot_queue_del(ubi, e->pnum);
1681                 if (err) {
1682                         ubi_err("PEB %d not found", pnum);
1683                         ubi_ro_mode(ubi);
1684                         spin_unlock(&ubi->wl_lock);
1685                         return err;
1686                 }
1687         }
1688
1689         wl_tree_add(e, &ubi->scrub);
1690         spin_unlock(&ubi->wl_lock);
1691
1692         /*
1693          * Technically scrubbing is the same as wear-leveling, so it is done
1694          * by the WL worker.
1695          */
1696         return ensure_wear_leveling(ubi, 0);
1697 }
1698
1699 /**
1700  * ubi_wl_flush - flush all pending works.
1701  * @ubi: UBI device description object
1702  * @vol_id: the volume id to flush for
1703  * @lnum: the logical eraseblock number to flush for
1704  *
1705  * This function executes all pending works for a particular volume id /
1706  * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1707  * acts as a wildcard for all of the corresponding volume numbers or logical
1708  * eraseblock numbers. It returns zero in case of success and a negative error
1709  * code in case of failure.
1710  */
1711 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1712 {
1713         int err = 0;
1714         int found = 1;
1715
1716         /*
1717          * Erase while the pending works queue is not empty, but not more than
1718          * the number of currently pending works.
1719          */
1720         dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1721                vol_id, lnum, ubi->works_count);
1722
1723         while (found) {
1724                 struct ubi_work *wrk;
1725                 found = 0;
1726
1727                 down_read(&ubi->work_sem);
1728                 spin_lock(&ubi->wl_lock);
1729                 list_for_each_entry(wrk, &ubi->works, list) {
1730                         if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1731                             (lnum == UBI_ALL || wrk->lnum == lnum)) {
1732                                 list_del(&wrk->list);
1733                                 ubi->works_count -= 1;
1734                                 ubi_assert(ubi->works_count >= 0);
1735                                 spin_unlock(&ubi->wl_lock);
1736
1737                                 err = wrk->func(ubi, wrk, 0);
1738                                 if (err) {
1739                                         up_read(&ubi->work_sem);
1740                                         return err;
1741                                 }
1742
1743                                 spin_lock(&ubi->wl_lock);
1744                                 found = 1;
1745                                 break;
1746                         }
1747                 }
1748                 spin_unlock(&ubi->wl_lock);
1749                 up_read(&ubi->work_sem);
1750         }
1751
1752         /*
1753          * Make sure all the works which have been done in parallel are
1754          * finished.
1755          */
1756         down_write(&ubi->work_sem);
1757         up_write(&ubi->work_sem);
1758
1759         return err;
1760 }
1761
1762 /**
1763  * tree_destroy - destroy an RB-tree.
1764  * @root: the root of the tree to destroy
1765  */
1766 static void tree_destroy(struct rb_root *root)
1767 {
1768         struct rb_node *rb;
1769         struct ubi_wl_entry *e;
1770
1771         rb = root->rb_node;
1772         while (rb) {
1773                 if (rb->rb_left)
1774                         rb = rb->rb_left;
1775                 else if (rb->rb_right)
1776                         rb = rb->rb_right;
1777                 else {
1778                         e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1779
1780                         rb = rb_parent(rb);
1781                         if (rb) {
1782                                 if (rb->rb_left == &e->u.rb)
1783                                         rb->rb_left = NULL;
1784                                 else
1785                                         rb->rb_right = NULL;
1786                         }
1787
1788                         kmem_cache_free(ubi_wl_entry_slab, e);
1789                 }
1790         }
1791 }
1792
1793 /**
1794  * ubi_thread - UBI background thread.
1795  * @u: the UBI device description object pointer
1796  */
1797 int ubi_thread(void *u)
1798 {
1799         int failures = 0;
1800         struct ubi_device *ubi = u;
1801
1802         ubi_msg("background thread \"%s\" started, PID %d",
1803                 ubi->bgt_name, task_pid_nr(current));
1804
1805         set_freezable();
1806         for (;;) {
1807                 int err;
1808
1809                 if (kthread_should_stop())
1810                         break;
1811
1812                 if (try_to_freeze())
1813                         continue;
1814
1815                 spin_lock(&ubi->wl_lock);
1816                 if (list_empty(&ubi->works) || ubi->ro_mode ||
1817                     !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1818                         set_current_state(TASK_INTERRUPTIBLE);
1819                         spin_unlock(&ubi->wl_lock);
1820                         schedule();
1821                         continue;
1822                 }
1823                 spin_unlock(&ubi->wl_lock);
1824
1825                 err = do_work(ubi);
1826                 if (err) {
1827                         ubi_err("%s: work failed with error code %d",
1828                                 ubi->bgt_name, err);
1829                         if (failures++ > WL_MAX_FAILURES) {
1830                                 /*
1831                                  * Too many failures, disable the thread and
1832                                  * switch to read-only mode.
1833                                  */
1834                                 ubi_msg("%s: %d consecutive failures",
1835                                         ubi->bgt_name, WL_MAX_FAILURES);
1836                                 ubi_ro_mode(ubi);
1837                                 ubi->thread_enabled = 0;
1838                                 continue;
1839                         }
1840                 } else
1841                         failures = 0;
1842
1843                 cond_resched();
1844         }
1845
1846         dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1847         return 0;
1848 }
1849
1850 /**
1851  * cancel_pending - cancel all pending works.
1852  * @ubi: UBI device description object
1853  */
1854 static void cancel_pending(struct ubi_device *ubi)
1855 {
1856         while (!list_empty(&ubi->works)) {
1857                 struct ubi_work *wrk;
1858
1859                 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1860                 list_del(&wrk->list);
1861                 wrk->func(ubi, wrk, 1);
1862                 ubi->works_count -= 1;
1863                 ubi_assert(ubi->works_count >= 0);
1864         }
1865 }
1866
1867 /**
1868  * ubi_wl_init - initialize the WL sub-system using attaching information.
1869  * @ubi: UBI device description object
1870  * @ai: attaching information
1871  *
1872  * This function returns zero in case of success, and a negative error code in
1873  * case of failure.
1874  */
1875 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1876 {
1877         int err, i, reserved_pebs, found_pebs = 0;
1878         struct rb_node *rb1, *rb2;
1879         struct ubi_ainf_volume *av;
1880         struct ubi_ainf_peb *aeb, *tmp;
1881         struct ubi_wl_entry *e;
1882
1883         ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1884         spin_lock_init(&ubi->wl_lock);
1885         mutex_init(&ubi->move_mutex);
1886         init_rwsem(&ubi->work_sem);
1887         ubi->max_ec = ai->max_ec;
1888         INIT_LIST_HEAD(&ubi->works);
1889 #ifndef __UBOOT__
1890 #ifdef CONFIG_MTD_UBI_FASTMAP
1891         INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
1892 #endif
1893 #endif
1894
1895         sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1896
1897         err = -ENOMEM;
1898         ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1899         if (!ubi->lookuptbl)
1900                 return err;
1901
1902         for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1903                 INIT_LIST_HEAD(&ubi->pq[i]);
1904         ubi->pq_head = 0;
1905
1906         list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1907                 cond_resched();
1908
1909                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1910                 if (!e)
1911                         goto out_free;
1912
1913                 e->pnum = aeb->pnum;
1914                 e->ec = aeb->ec;
1915                 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1916                 ubi->lookuptbl[e->pnum] = e;
1917                 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1918                         kmem_cache_free(ubi_wl_entry_slab, e);
1919                         goto out_free;
1920                 }
1921
1922                 found_pebs++;
1923         }
1924
1925         ubi->free_count = 0;
1926         list_for_each_entry(aeb, &ai->free, u.list) {
1927                 cond_resched();
1928
1929                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1930                 if (!e)
1931                         goto out_free;
1932
1933                 e->pnum = aeb->pnum;
1934                 e->ec = aeb->ec;
1935                 ubi_assert(e->ec >= 0);
1936                 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1937
1938                 wl_tree_add(e, &ubi->free);
1939                 ubi->free_count++;
1940
1941                 ubi->lookuptbl[e->pnum] = e;
1942
1943                 found_pebs++;
1944         }
1945
1946         ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1947                 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1948                         cond_resched();
1949
1950                         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1951                         if (!e)
1952                                 goto out_free;
1953
1954                         e->pnum = aeb->pnum;
1955                         e->ec = aeb->ec;
1956                         ubi->lookuptbl[e->pnum] = e;
1957
1958                         if (!aeb->scrub) {
1959                                 dbg_wl("add PEB %d EC %d to the used tree",
1960                                        e->pnum, e->ec);
1961                                 wl_tree_add(e, &ubi->used);
1962                         } else {
1963                                 dbg_wl("add PEB %d EC %d to the scrub tree",
1964                                        e->pnum, e->ec);
1965                                 wl_tree_add(e, &ubi->scrub);
1966                         }
1967
1968                         found_pebs++;
1969                 }
1970         }
1971
1972         dbg_wl("found %i PEBs", found_pebs);
1973
1974         if (ubi->fm)
1975                 ubi_assert(ubi->good_peb_count == \
1976                            found_pebs + ubi->fm->used_blocks);
1977         else
1978                 ubi_assert(ubi->good_peb_count == found_pebs);
1979
1980         reserved_pebs = WL_RESERVED_PEBS;
1981 #ifdef CONFIG_MTD_UBI_FASTMAP
1982         /* Reserve enough LEBs to store two fastmaps. */
1983         reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
1984 #endif
1985
1986         if (ubi->avail_pebs < reserved_pebs) {
1987                 ubi_err("no enough physical eraseblocks (%d, need %d)",
1988                         ubi->avail_pebs, reserved_pebs);
1989                 if (ubi->corr_peb_count)
1990                         ubi_err("%d PEBs are corrupted and not used",
1991                                 ubi->corr_peb_count);
1992                 goto out_free;
1993         }
1994         ubi->avail_pebs -= reserved_pebs;
1995         ubi->rsvd_pebs += reserved_pebs;
1996
1997         /* Schedule wear-leveling if needed */
1998         err = ensure_wear_leveling(ubi, 0);
1999         if (err)
2000                 goto out_free;
2001
2002         return 0;
2003
2004 out_free:
2005         cancel_pending(ubi);
2006         tree_destroy(&ubi->used);
2007         tree_destroy(&ubi->free);
2008         tree_destroy(&ubi->scrub);
2009         kfree(ubi->lookuptbl);
2010         return err;
2011 }
2012
2013 /**
2014  * protection_queue_destroy - destroy the protection queue.
2015  * @ubi: UBI device description object
2016  */
2017 static void protection_queue_destroy(struct ubi_device *ubi)
2018 {
2019         int i;
2020         struct ubi_wl_entry *e, *tmp;
2021
2022         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
2023                 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
2024                         list_del(&e->u.list);
2025                         kmem_cache_free(ubi_wl_entry_slab, e);
2026                 }
2027         }
2028 }
2029
2030 /**
2031  * ubi_wl_close - close the wear-leveling sub-system.
2032  * @ubi: UBI device description object
2033  */
2034 void ubi_wl_close(struct ubi_device *ubi)
2035 {
2036         dbg_wl("close the WL sub-system");
2037         cancel_pending(ubi);
2038         protection_queue_destroy(ubi);
2039         tree_destroy(&ubi->used);
2040         tree_destroy(&ubi->erroneous);
2041         tree_destroy(&ubi->free);
2042         tree_destroy(&ubi->scrub);
2043         kfree(ubi->lookuptbl);
2044 }
2045
2046 /**
2047  * self_check_ec - make sure that the erase counter of a PEB is correct.
2048  * @ubi: UBI device description object
2049  * @pnum: the physical eraseblock number to check
2050  * @ec: the erase counter to check
2051  *
2052  * This function returns zero if the erase counter of physical eraseblock @pnum
2053  * is equivalent to @ec, and a negative error code if not or if an error
2054  * occurred.
2055  */
2056 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
2057 {
2058         int err;
2059         long long read_ec;
2060         struct ubi_ec_hdr *ec_hdr;
2061
2062         if (!ubi_dbg_chk_gen(ubi))
2063                 return 0;
2064
2065         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
2066         if (!ec_hdr)
2067                 return -ENOMEM;
2068
2069         err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
2070         if (err && err != UBI_IO_BITFLIPS) {
2071                 /* The header does not have to exist */
2072                 err = 0;
2073                 goto out_free;
2074         }
2075
2076         read_ec = be64_to_cpu(ec_hdr->ec);
2077         if (ec != read_ec && read_ec - ec > 1) {
2078                 ubi_err("self-check failed for PEB %d", pnum);
2079                 ubi_err("read EC is %lld, should be %d", read_ec, ec);
2080                 dump_stack();
2081                 err = 1;
2082         } else
2083                 err = 0;
2084
2085 out_free:
2086         kfree(ec_hdr);
2087         return err;
2088 }
2089
2090 /**
2091  * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
2092  * @ubi: UBI device description object
2093  * @e: the wear-leveling entry to check
2094  * @root: the root of the tree
2095  *
2096  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
2097  * is not.
2098  */
2099 static int self_check_in_wl_tree(const struct ubi_device *ubi,
2100                                  struct ubi_wl_entry *e, struct rb_root *root)
2101 {
2102         if (!ubi_dbg_chk_gen(ubi))
2103                 return 0;
2104
2105         if (in_wl_tree(e, root))
2106                 return 0;
2107
2108         ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
2109                 e->pnum, e->ec, root);
2110         dump_stack();
2111         return -EINVAL;
2112 }
2113
2114 /**
2115  * self_check_in_pq - check if wear-leveling entry is in the protection
2116  *                        queue.
2117  * @ubi: UBI device description object
2118  * @e: the wear-leveling entry to check
2119  *
2120  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2121  */
2122 static int self_check_in_pq(const struct ubi_device *ubi,
2123                             struct ubi_wl_entry *e)
2124 {
2125         struct ubi_wl_entry *p;
2126         int i;
2127
2128         if (!ubi_dbg_chk_gen(ubi))
2129                 return 0;
2130
2131         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
2132                 list_for_each_entry(p, &ubi->pq[i], u.list)
2133                         if (p == e)
2134                                 return 0;
2135
2136         ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
2137                 e->pnum, e->ec);
2138         dump_stack();
2139         return -EINVAL;
2140 }