]> git.sur5r.net Git - u-boot/blob - drivers/mtd/ubi/fastmap-wl.c
ubi: Ensure no fastmap flush after uif_close
[u-boot] / drivers / mtd / ubi / fastmap-wl.c
1 /*
2  * Copyright (c) 2012 Linutronix GmbH
3  * Copyright (c) 2014 sigma star gmbh
4  * Author: Richard Weinberger <richard@nod.at>
5  *
6  * SPDX-License-Identifier:     GPL-2.0+
7  *
8  */
9
10 /**
11  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
12  * @wrk: the work description object
13  */
14 #ifndef __UBOOT__
15 static void update_fastmap_work_fn(struct work_struct *wrk)
16 #else
17 void update_fastmap_work_fn(struct ubi_device *ubi)
18 #endif
19 {
20 #ifndef __UBOOT__
21         struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
22 #endif
23
24         ubi_update_fastmap(ubi);
25         spin_lock(&ubi->wl_lock);
26         ubi->fm_work_scheduled = 0;
27         spin_unlock(&ubi->wl_lock);
28 }
29
30 /**
31  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
32  * @root: the RB-tree where to look for
33  */
34 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
35 {
36         struct rb_node *p;
37         struct ubi_wl_entry *e, *victim = NULL;
38         int max_ec = UBI_MAX_ERASECOUNTER;
39
40         ubi_rb_for_each_entry(p, e, root, u.rb) {
41                 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
42                         victim = e;
43                         max_ec = e->ec;
44                 }
45         }
46
47         return victim;
48 }
49
50 /**
51  * return_unused_pool_pebs - returns unused PEB to the free tree.
52  * @ubi: UBI device description object
53  * @pool: fastmap pool description object
54  */
55 static void return_unused_pool_pebs(struct ubi_device *ubi,
56                                     struct ubi_fm_pool *pool)
57 {
58         int i;
59         struct ubi_wl_entry *e;
60
61         for (i = pool->used; i < pool->size; i++) {
62                 e = ubi->lookuptbl[pool->pebs[i]];
63                 wl_tree_add(e, &ubi->free);
64                 ubi->free_count++;
65         }
66 }
67
68 static int anchor_pebs_avalible(struct rb_root *root)
69 {
70         struct rb_node *p;
71         struct ubi_wl_entry *e;
72
73         ubi_rb_for_each_entry(p, e, root, u.rb)
74                 if (e->pnum < UBI_FM_MAX_START)
75                         return 1;
76
77         return 0;
78 }
79
80 /**
81  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
82  * @ubi: UBI device description object
83  * @anchor: This PEB will be used as anchor PEB by fastmap
84  *
85  * The function returns a physical erase block with a given maximal number
86  * and removes it from the wl subsystem.
87  * Must be called with wl_lock held!
88  */
89 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
90 {
91         struct ubi_wl_entry *e = NULL;
92
93         if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
94                 goto out;
95
96         if (anchor)
97                 e = find_anchor_wl_entry(&ubi->free);
98         else
99                 e = find_mean_wl_entry(ubi, &ubi->free);
100
101         if (!e)
102                 goto out;
103
104         self_check_in_wl_tree(ubi, e, &ubi->free);
105
106         /* remove it from the free list,
107          * the wl subsystem does no longer know this erase block */
108         rb_erase(&e->u.rb, &ubi->free);
109         ubi->free_count--;
110 out:
111         return e;
112 }
113
114 /**
115  * ubi_refill_pools - refills all fastmap PEB pools.
116  * @ubi: UBI device description object
117  */
118 void ubi_refill_pools(struct ubi_device *ubi)
119 {
120         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
121         struct ubi_fm_pool *pool = &ubi->fm_pool;
122         struct ubi_wl_entry *e;
123         int enough;
124
125         spin_lock(&ubi->wl_lock);
126
127         return_unused_pool_pebs(ubi, wl_pool);
128         return_unused_pool_pebs(ubi, pool);
129
130         wl_pool->size = 0;
131         pool->size = 0;
132
133         for (;;) {
134                 enough = 0;
135                 if (pool->size < pool->max_size) {
136                         if (!ubi->free.rb_node)
137                                 break;
138
139                         e = wl_get_wle(ubi);
140                         if (!e)
141                                 break;
142
143                         pool->pebs[pool->size] = e->pnum;
144                         pool->size++;
145                 } else
146                         enough++;
147
148                 if (wl_pool->size < wl_pool->max_size) {
149                         if (!ubi->free.rb_node ||
150                            (ubi->free_count - ubi->beb_rsvd_pebs < 5))
151                                 break;
152
153                         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
154                         self_check_in_wl_tree(ubi, e, &ubi->free);
155                         rb_erase(&e->u.rb, &ubi->free);
156                         ubi->free_count--;
157
158                         wl_pool->pebs[wl_pool->size] = e->pnum;
159                         wl_pool->size++;
160                 } else
161                         enough++;
162
163                 if (enough == 2)
164                         break;
165         }
166
167         wl_pool->used = 0;
168         pool->used = 0;
169
170         spin_unlock(&ubi->wl_lock);
171 }
172
173 /**
174  * ubi_wl_get_peb - get a physical eraseblock.
175  * @ubi: UBI device description object
176  *
177  * This function returns a physical eraseblock in case of success and a
178  * negative error code in case of failure.
179  * Returns with ubi->fm_eba_sem held in read mode!
180  */
181 int ubi_wl_get_peb(struct ubi_device *ubi)
182 {
183         int ret, retried = 0;
184         struct ubi_fm_pool *pool = &ubi->fm_pool;
185         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
186
187 again:
188         down_read(&ubi->fm_eba_sem);
189         spin_lock(&ubi->wl_lock);
190
191         /* We check here also for the WL pool because at this point we can
192          * refill the WL pool synchronous. */
193         if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
194                 spin_unlock(&ubi->wl_lock);
195                 up_read(&ubi->fm_eba_sem);
196                 ret = ubi_update_fastmap(ubi);
197                 if (ret) {
198                         ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
199                         down_read(&ubi->fm_eba_sem);
200                         return -ENOSPC;
201                 }
202                 down_read(&ubi->fm_eba_sem);
203                 spin_lock(&ubi->wl_lock);
204         }
205
206         if (pool->used == pool->size) {
207                 spin_unlock(&ubi->wl_lock);
208                 if (retried) {
209                         ubi_err(ubi, "Unable to get a free PEB from user WL pool");
210                         ret = -ENOSPC;
211                         goto out;
212                 }
213                 retried = 1;
214                 up_read(&ubi->fm_eba_sem);
215                 goto again;
216         }
217
218         ubi_assert(pool->used < pool->size);
219         ret = pool->pebs[pool->used++];
220         prot_queue_add(ubi, ubi->lookuptbl[ret]);
221         spin_unlock(&ubi->wl_lock);
222 out:
223         return ret;
224 }
225
226 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
227  *
228  * @ubi: UBI device description object
229  */
230 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
231 {
232         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
233         int pnum;
234
235         if (pool->used == pool->size) {
236 #ifndef __UBOOT__
237                 /* We cannot update the fastmap here because this
238                  * function is called in atomic context.
239                  * Let's fail here and refill/update it as soon as possible. */
240                 if (!ubi->fm_work_scheduled) {
241                         ubi->fm_work_scheduled = 1;
242                         schedule_work(&ubi->fm_work);
243                 }
244                 return NULL;
245 #else
246                 /*
247                  * No work queues in U-Boot, we must do this immediately
248                  */
249                 update_fastmap_work_fn(ubi);
250 #endif
251         }
252
253         pnum = pool->pebs[pool->used++];
254         return ubi->lookuptbl[pnum];
255 }
256
257 /**
258  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
259  * @ubi: UBI device description object
260  */
261 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
262 {
263         struct ubi_work *wrk;
264
265         spin_lock(&ubi->wl_lock);
266         if (ubi->wl_scheduled) {
267                 spin_unlock(&ubi->wl_lock);
268                 return 0;
269         }
270         ubi->wl_scheduled = 1;
271         spin_unlock(&ubi->wl_lock);
272
273         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
274         if (!wrk) {
275                 spin_lock(&ubi->wl_lock);
276                 ubi->wl_scheduled = 0;
277                 spin_unlock(&ubi->wl_lock);
278                 return -ENOMEM;
279         }
280
281         wrk->anchor = 1;
282         wrk->func = &wear_leveling_worker;
283         schedule_ubi_work(ubi, wrk);
284         return 0;
285 }
286
287 /**
288  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
289  * sub-system.
290  * see: ubi_wl_put_peb()
291  *
292  * @ubi: UBI device description object
293  * @fm_e: physical eraseblock to return
294  * @lnum: the last used logical eraseblock number for the PEB
295  * @torture: if this physical eraseblock has to be tortured
296  */
297 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
298                       int lnum, int torture)
299 {
300         struct ubi_wl_entry *e;
301         int vol_id, pnum = fm_e->pnum;
302
303         dbg_wl("PEB %d", pnum);
304
305         ubi_assert(pnum >= 0);
306         ubi_assert(pnum < ubi->peb_count);
307
308         spin_lock(&ubi->wl_lock);
309         e = ubi->lookuptbl[pnum];
310
311         /* This can happen if we recovered from a fastmap the very
312          * first time and writing now a new one. In this case the wl system
313          * has never seen any PEB used by the original fastmap.
314          */
315         if (!e) {
316                 e = fm_e;
317                 ubi_assert(e->ec >= 0);
318                 ubi->lookuptbl[pnum] = e;
319         }
320
321         spin_unlock(&ubi->wl_lock);
322
323         vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
324         return schedule_erase(ubi, e, vol_id, lnum, torture);
325 }
326
327 /**
328  * ubi_is_erase_work - checks whether a work is erase work.
329  * @wrk: The work object to be checked
330  */
331 int ubi_is_erase_work(struct ubi_work *wrk)
332 {
333         return wrk->func == erase_worker;
334 }
335
336 static void ubi_fastmap_close(struct ubi_device *ubi)
337 {
338         int i;
339
340         return_unused_pool_pebs(ubi, &ubi->fm_pool);
341         return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
342
343         if (ubi->fm) {
344                 for (i = 0; i < ubi->fm->used_blocks; i++)
345                         kfree(ubi->fm->e[i]);
346         }
347         kfree(ubi->fm);
348 }
349
350 /**
351  * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
352  * See find_mean_wl_entry()
353  *
354  * @ubi: UBI device description object
355  * @e: physical eraseblock to return
356  * @root: RB tree to test against.
357  */
358 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
359                                            struct ubi_wl_entry *e,
360                                            struct rb_root *root) {
361         if (e && !ubi->fm_disabled && !ubi->fm &&
362             e->pnum < UBI_FM_MAX_START)
363                 e = rb_entry(rb_next(root->rb_node),
364                              struct ubi_wl_entry, u.rb);
365
366         return e;
367 }