]> git.sur5r.net Git - u-boot/blob - drivers/mtd/ubi/fastmap-wl.c
mtd: nand: mxs_nand: move structs into header file
[u-boot] / drivers / mtd / ubi / fastmap-wl.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (c) 2012 Linutronix GmbH
4  * Copyright (c) 2014 sigma star gmbh
5  * Author: Richard Weinberger <richard@nod.at>
6  *
7  */
8
9 /**
10  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
11  * @wrk: the work description object
12  */
13 #ifndef __UBOOT__
14 static void update_fastmap_work_fn(struct work_struct *wrk)
15 #else
16 void update_fastmap_work_fn(struct ubi_device *ubi)
17 #endif
18 {
19 #ifndef __UBOOT__
20         struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
21 #endif
22
23         ubi_update_fastmap(ubi);
24         spin_lock(&ubi->wl_lock);
25         ubi->fm_work_scheduled = 0;
26         spin_unlock(&ubi->wl_lock);
27 }
28
29 /**
30  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
31  * @root: the RB-tree where to look for
32  */
33 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
34 {
35         struct rb_node *p;
36         struct ubi_wl_entry *e, *victim = NULL;
37         int max_ec = UBI_MAX_ERASECOUNTER;
38
39         ubi_rb_for_each_entry(p, e, root, u.rb) {
40                 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
41                         victim = e;
42                         max_ec = e->ec;
43                 }
44         }
45
46         return victim;
47 }
48
49 /**
50  * return_unused_pool_pebs - returns unused PEB to the free tree.
51  * @ubi: UBI device description object
52  * @pool: fastmap pool description object
53  */
54 static void return_unused_pool_pebs(struct ubi_device *ubi,
55                                     struct ubi_fm_pool *pool)
56 {
57         int i;
58         struct ubi_wl_entry *e;
59
60         for (i = pool->used; i < pool->size; i++) {
61                 e = ubi->lookuptbl[pool->pebs[i]];
62                 wl_tree_add(e, &ubi->free);
63                 ubi->free_count++;
64         }
65 }
66
67 static int anchor_pebs_avalible(struct rb_root *root)
68 {
69         struct rb_node *p;
70         struct ubi_wl_entry *e;
71
72         ubi_rb_for_each_entry(p, e, root, u.rb)
73                 if (e->pnum < UBI_FM_MAX_START)
74                         return 1;
75
76         return 0;
77 }
78
79 /**
80  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
81  * @ubi: UBI device description object
82  * @anchor: This PEB will be used as anchor PEB by fastmap
83  *
84  * The function returns a physical erase block with a given maximal number
85  * and removes it from the wl subsystem.
86  * Must be called with wl_lock held!
87  */
88 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
89 {
90         struct ubi_wl_entry *e = NULL;
91
92         if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
93                 goto out;
94
95         if (anchor)
96                 e = find_anchor_wl_entry(&ubi->free);
97         else
98                 e = find_mean_wl_entry(ubi, &ubi->free);
99
100         if (!e)
101                 goto out;
102
103         self_check_in_wl_tree(ubi, e, &ubi->free);
104
105         /* remove it from the free list,
106          * the wl subsystem does no longer know this erase block */
107         rb_erase(&e->u.rb, &ubi->free);
108         ubi->free_count--;
109 out:
110         return e;
111 }
112
113 /**
114  * ubi_refill_pools - refills all fastmap PEB pools.
115  * @ubi: UBI device description object
116  */
117 void ubi_refill_pools(struct ubi_device *ubi)
118 {
119         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
120         struct ubi_fm_pool *pool = &ubi->fm_pool;
121         struct ubi_wl_entry *e;
122         int enough;
123
124         spin_lock(&ubi->wl_lock);
125
126         return_unused_pool_pebs(ubi, wl_pool);
127         return_unused_pool_pebs(ubi, pool);
128
129         wl_pool->size = 0;
130         pool->size = 0;
131
132         for (;;) {
133                 enough = 0;
134                 if (pool->size < pool->max_size) {
135                         if (!ubi->free.rb_node)
136                                 break;
137
138                         e = wl_get_wle(ubi);
139                         if (!e)
140                                 break;
141
142                         pool->pebs[pool->size] = e->pnum;
143                         pool->size++;
144                 } else
145                         enough++;
146
147                 if (wl_pool->size < wl_pool->max_size) {
148                         if (!ubi->free.rb_node ||
149                            (ubi->free_count - ubi->beb_rsvd_pebs < 5))
150                                 break;
151
152                         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
153                         self_check_in_wl_tree(ubi, e, &ubi->free);
154                         rb_erase(&e->u.rb, &ubi->free);
155                         ubi->free_count--;
156
157                         wl_pool->pebs[wl_pool->size] = e->pnum;
158                         wl_pool->size++;
159                 } else
160                         enough++;
161
162                 if (enough == 2)
163                         break;
164         }
165
166         wl_pool->used = 0;
167         pool->used = 0;
168
169         spin_unlock(&ubi->wl_lock);
170 }
171
172 /**
173  * produce_free_peb - produce a free physical eraseblock.
174  * @ubi: UBI device description object
175  *
176  * This function tries to make a free PEB by means of synchronous execution of
177  * pending works. This may be needed if, for example the background thread is
178  * disabled. Returns zero in case of success and a negative error code in case
179  * of failure.
180  */
181 static int produce_free_peb(struct ubi_device *ubi)
182 {
183         int err;
184
185         while (!ubi->free.rb_node && ubi->works_count) {
186                 dbg_wl("do one work synchronously");
187                 err = do_work(ubi);
188
189                 if (err)
190                         return err;
191         }
192
193         return 0;
194 }
195
196 /**
197  * ubi_wl_get_peb - get a physical eraseblock.
198  * @ubi: UBI device description object
199  *
200  * This function returns a physical eraseblock in case of success and a
201  * negative error code in case of failure.
202  * Returns with ubi->fm_eba_sem held in read mode!
203  */
204 int ubi_wl_get_peb(struct ubi_device *ubi)
205 {
206         int ret, retried = 0;
207         struct ubi_fm_pool *pool = &ubi->fm_pool;
208         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
209
210 again:
211         down_read(&ubi->fm_eba_sem);
212         spin_lock(&ubi->wl_lock);
213
214         /* We check here also for the WL pool because at this point we can
215          * refill the WL pool synchronous. */
216         if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
217                 spin_unlock(&ubi->wl_lock);
218                 up_read(&ubi->fm_eba_sem);
219                 ret = ubi_update_fastmap(ubi);
220                 if (ret) {
221                         ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
222                         down_read(&ubi->fm_eba_sem);
223                         return -ENOSPC;
224                 }
225                 down_read(&ubi->fm_eba_sem);
226                 spin_lock(&ubi->wl_lock);
227         }
228
229         if (pool->used == pool->size) {
230                 spin_unlock(&ubi->wl_lock);
231                 if (retried) {
232                         ubi_err(ubi, "Unable to get a free PEB from user WL pool");
233                         ret = -ENOSPC;
234                         goto out;
235                 }
236                 retried = 1;
237                 up_read(&ubi->fm_eba_sem);
238                 ret = produce_free_peb(ubi);
239                 if (ret < 0) {
240                         down_read(&ubi->fm_eba_sem);
241                         goto out;
242                 }
243                 goto again;
244         }
245
246         ubi_assert(pool->used < pool->size);
247         ret = pool->pebs[pool->used++];
248         prot_queue_add(ubi, ubi->lookuptbl[ret]);
249         spin_unlock(&ubi->wl_lock);
250 out:
251         return ret;
252 }
253
254 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
255  *
256  * @ubi: UBI device description object
257  */
258 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
259 {
260         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
261         int pnum;
262
263         if (pool->used == pool->size) {
264 #ifndef __UBOOT__
265                 /* We cannot update the fastmap here because this
266                  * function is called in atomic context.
267                  * Let's fail here and refill/update it as soon as possible. */
268                 if (!ubi->fm_work_scheduled) {
269                         ubi->fm_work_scheduled = 1;
270                         schedule_work(&ubi->fm_work);
271                 }
272                 return NULL;
273 #else
274                 /*
275                  * No work queues in U-Boot, we must do this immediately
276                  */
277                 update_fastmap_work_fn(ubi);
278 #endif
279         }
280
281         pnum = pool->pebs[pool->used++];
282         return ubi->lookuptbl[pnum];
283 }
284
285 /**
286  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
287  * @ubi: UBI device description object
288  */
289 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
290 {
291         struct ubi_work *wrk;
292
293         spin_lock(&ubi->wl_lock);
294         if (ubi->wl_scheduled) {
295                 spin_unlock(&ubi->wl_lock);
296                 return 0;
297         }
298         ubi->wl_scheduled = 1;
299         spin_unlock(&ubi->wl_lock);
300
301         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
302         if (!wrk) {
303                 spin_lock(&ubi->wl_lock);
304                 ubi->wl_scheduled = 0;
305                 spin_unlock(&ubi->wl_lock);
306                 return -ENOMEM;
307         }
308
309         wrk->anchor = 1;
310         wrk->func = &wear_leveling_worker;
311         schedule_ubi_work(ubi, wrk);
312         return 0;
313 }
314
315 /**
316  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
317  * sub-system.
318  * see: ubi_wl_put_peb()
319  *
320  * @ubi: UBI device description object
321  * @fm_e: physical eraseblock to return
322  * @lnum: the last used logical eraseblock number for the PEB
323  * @torture: if this physical eraseblock has to be tortured
324  */
325 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
326                       int lnum, int torture)
327 {
328         struct ubi_wl_entry *e;
329         int vol_id, pnum = fm_e->pnum;
330
331         dbg_wl("PEB %d", pnum);
332
333         ubi_assert(pnum >= 0);
334         ubi_assert(pnum < ubi->peb_count);
335
336         spin_lock(&ubi->wl_lock);
337         e = ubi->lookuptbl[pnum];
338
339         /* This can happen if we recovered from a fastmap the very
340          * first time and writing now a new one. In this case the wl system
341          * has never seen any PEB used by the original fastmap.
342          */
343         if (!e) {
344                 e = fm_e;
345                 ubi_assert(e->ec >= 0);
346                 ubi->lookuptbl[pnum] = e;
347         }
348
349         spin_unlock(&ubi->wl_lock);
350
351         vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
352         return schedule_erase(ubi, e, vol_id, lnum, torture);
353 }
354
355 /**
356  * ubi_is_erase_work - checks whether a work is erase work.
357  * @wrk: The work object to be checked
358  */
359 int ubi_is_erase_work(struct ubi_work *wrk)
360 {
361         return wrk->func == erase_worker;
362 }
363
364 static void ubi_fastmap_close(struct ubi_device *ubi)
365 {
366         int i;
367
368         return_unused_pool_pebs(ubi, &ubi->fm_pool);
369         return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
370
371         if (ubi->fm) {
372                 for (i = 0; i < ubi->fm->used_blocks; i++)
373                         kfree(ubi->fm->e[i]);
374         }
375         kfree(ubi->fm);
376 }
377
378 /**
379  * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
380  * See find_mean_wl_entry()
381  *
382  * @ubi: UBI device description object
383  * @e: physical eraseblock to return
384  * @root: RB tree to test against.
385  */
386 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
387                                            struct ubi_wl_entry *e,
388                                            struct rb_root *root) {
389         if (e && !ubi->fm_disabled && !ubi->fm &&
390             e->pnum < UBI_FM_MAX_START)
391                 e = rb_entry(rb_next(root->rb_node),
392                              struct ubi_wl_entry, u.rb);
393
394         return e;
395 }