ebh.c revision 1.1 1 /* $NetBSD: ebh.c,v 1.1 2011/11/24 15:51:32 ahoka Exp $ */
2
3 /*-
4 * Copyright (c) 2010 Department of Software Engineering,
5 * University of Szeged, Hungary
6 * Copyright (C) 2009 Ferenc Havasi <havasi (at) inf.u-szeged.hu>
7 * Copyright (C) 2009 Zoltan Sogor <weth (at) inf.u-szeged.hu>
8 * Copyright (C) 2009 David Tengeri <dtengeri (at) inf.u-szeged.hu>
9 * Copyright (C) 2009 Tamas Toth <ttoth (at) inf.u-szeged.hu>
10 * Copyright (C) 2010 Adam Hoka <ahoka (at) NetBSD.org>
11 * All rights reserved.
12 *
13 * This code is derived from software contributed to The NetBSD Foundation
14 * by the Department of Software Engineering, University of Szeged, Hungary
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
26 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 #include "ebh.h"
39
40 /*****************************************************************************/
41 /* Flash specific operations */
42 /*****************************************************************************/
43 int nor_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr);
44 int nand_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr);
45 int nor_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset);
46 int nand_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset);
47 int nor_read_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr);
48 int nand_read_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr);
49 int nor_write_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr);
50 int nand_write_eb_hdr(struct chfs_ebh *ebh, int pebnr,struct chfs_eb_hdr *ebhdr);
51 int nor_check_eb_hdr(struct chfs_ebh *ebh, void *buf);
52 int nand_check_eb_hdr(struct chfs_ebh *ebh, void *buf);
53 int nor_mark_eb_hdr_dirty_flash(struct chfs_ebh *ebh, int pebnr, int lid);
54 int nor_invalidate_eb_hdr(struct chfs_ebh *ebh, int pebnr);
55 int mark_eb_hdr_free(struct chfs_ebh *ebh, int pebnr, int ec);
56
57 int ltree_entry_cmp(struct chfs_ltree_entry *le1, struct chfs_ltree_entry *le2);
58 int peb_in_use_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2);
59 int peb_free_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2);
60 int add_peb_to_erase_queue(struct chfs_ebh *ebh, int pebnr, int ec,struct peb_queue *queue);
61 struct chfs_peb * find_peb_in_use(struct chfs_ebh *ebh, int pebnr);
62 int add_peb_to_free(struct chfs_ebh *ebh, int pebnr, int ec);
63 int add_peb_to_in_use(struct chfs_ebh *ebh, int pebnr, int ec);
64 void erase_callback(struct flash_erase_instruction *ei);
65 int free_peb(struct chfs_ebh *ebh);
66 int release_peb(struct chfs_ebh *ebh, int pebnr);
67 void erase_thread(void *data);
68 static void erase_thread_start(struct chfs_ebh *ebh);
69 static void erase_thread_stop(struct chfs_ebh *ebh);
70 int scan_leb_used_cmp(struct chfs_scan_leb *sleb1, struct chfs_scan_leb *sleb2);
71 int nor_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,struct chfs_eb_hdr *ebhdr, int pebnr, int leb_status);
72 int nor_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
73 int pebnr, struct chfs_eb_hdr *ebhdr);
74 int nand_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,struct chfs_eb_hdr *ebhdr, int pebnr);
75 int nand_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
76 int pebnr, struct chfs_eb_hdr *ebhdr);
77 struct chfs_scan_info *chfs_scan(struct chfs_ebh *ebh);
78 void scan_info_destroy(struct chfs_scan_info *si);
79 int scan_media(struct chfs_ebh *ebh);
80 int get_peb(struct chfs_ebh *ebh);
81 /**
82 * nor_create_eb_hdr - creates an eraseblock header for NOR flash
83 * @ebhdr: ebhdr to set
84 * @lnr: LEB number
85 */
86 int
87 nor_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr)
88 {
89 ebhdr->u.nor_hdr.lid = htole32(lnr);
90 return 0;
91 }
92
93 /**
94 * nand_create_eb_hdr - creates an eraseblock header for NAND flash
95 * @ebhdr: ebhdr to set
96 * @lnr: LEB number
97 */
98 int
99 nand_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr)
100 {
101 ebhdr->u.nand_hdr.lid = htole32(lnr);
102 return 0;
103 }
104
105 /**
106 * nor_calc_data_offs - calculates data offset on NOR flash
107 * @ebh: chfs eraseblock handler
108 * @pebnr: eraseblock number
109 * @offset: offset within the eraseblock
110 */
111 int
112 nor_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset)
113 {
114 return pebnr * ebh->flash_if->erasesize + offset +
115 CHFS_EB_EC_HDR_SIZE + CHFS_EB_HDR_NOR_SIZE;
116 }
117
118 /**
119 * nand_calc_data_offs - calculates data offset on NAND flash
120 * @ebh: chfs eraseblock handler
121 * @pebnr: eraseblock number
122 * @offset: offset within the eraseblock
123 */
124 int
125 nand_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset)
126 {
127 return pebnr * ebh->flash_if->erasesize + offset +
128 2 * ebh->flash_if->page_size;
129 }
130
131 /**
132 * nor_read_eb_hdr - read ereaseblock header from NOR flash
133 *
134 * @ebh: chfs eraseblock handler
135 * @pebnr: eraseblock number
136 * @ebhdr: whereto store the data
137 *
138 * Reads the eraseblock header from media.
139 * Returns zero in case of success, error code in case of fail.
140 */
141 int
142 nor_read_eb_hdr(struct chfs_ebh *ebh,
143 int pebnr, struct chfs_eb_hdr *ebhdr)
144 {
145 int ret;
146 size_t retlen;
147 off_t ofs = pebnr * ebh->flash_if->erasesize;
148
149 KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
150
151 ret = flash_read(ebh->flash_dev,
152 ofs, CHFS_EB_EC_HDR_SIZE,
153 &retlen, (unsigned char *) &ebhdr->ec_hdr);
154
155 if (ret || retlen != CHFS_EB_EC_HDR_SIZE)
156 return ret;
157
158 ofs += CHFS_EB_EC_HDR_SIZE;
159 ret = flash_read(ebh->flash_dev,
160 ofs, CHFS_EB_HDR_NOR_SIZE,
161 &retlen, (unsigned char *) &ebhdr->u.nor_hdr);
162
163 if (ret || retlen != CHFS_EB_HDR_NOR_SIZE)
164 return ret;
165
166 return 0;
167 }
168
169 /**
170 * nand_read_eb_hdr - read ereaseblock header from NAND flash
171 *
172 * @ebh: chfs eraseblock handler
173 * @pebnr: eraseblock number
174 * @ebhdr: whereto store the data
175 *
176 * Reads the eraseblock header from media. It is on the first two page.
177 * Returns zero in case of success, error code in case of fail.
178 */
179 int
180 nand_read_eb_hdr(struct chfs_ebh *ebh, int pebnr,
181 struct chfs_eb_hdr *ebhdr)
182 {
183 int ret;
184 size_t retlen;
185 off_t ofs;
186
187 KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
188
189 /* Read erase counter header from the first page. */
190 ofs = pebnr * ebh->flash_if->erasesize;
191 ret = flash_read(ebh->flash_dev,
192 ofs, CHFS_EB_EC_HDR_SIZE, &retlen,
193 (unsigned char *) &ebhdr->ec_hdr);
194 if (ret || retlen != CHFS_EB_EC_HDR_SIZE)
195 return ret;
196
197 /* Read NAND eraseblock header from the second page */
198 ofs += ebh->flash_if->page_size;
199 ret = flash_read(ebh->flash_dev,
200 ofs, CHFS_EB_HDR_NAND_SIZE, &retlen,
201 (unsigned char *) &ebhdr->u.nand_hdr);
202 if (ret || retlen != CHFS_EB_HDR_NAND_SIZE)
203 return ret;
204
205 return 0;
206 }
207
208 /**
209 * nor_write_eb_hdr - write ereaseblock header to NOR flash
210 *
211 * @ebh: chfs eraseblock handler
212 * @pebnr: eraseblock number whereto write
213 * @ebh: ebh to write
214 *
215 * Writes the eraseblock header to media.
216 * Returns zero in case of success, error code in case of fail.
217 */
218 int
219 nor_write_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr)
220 {
221 int ret, crc;
222 size_t retlen;
223
224 off_t ofs = pebnr * ebh->flash_if->erasesize + CHFS_EB_EC_HDR_SIZE;
225
226 ebhdr->u.nor_hdr.lid = ebhdr->u.nor_hdr.lid
227 | htole32(CHFS_LID_NOT_DIRTY_BIT);
228
229 crc = crc32(0, (uint8_t *)&ebhdr->u.nor_hdr + 4,
230 CHFS_EB_HDR_NOR_SIZE - 4);
231 ebhdr->u.nand_hdr.crc = htole32(crc);
232
233 KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
234
235 ret = flash_write(ebh->flash_dev,
236 ofs, CHFS_EB_HDR_NOR_SIZE, &retlen,
237 (unsigned char *) &ebhdr->u.nor_hdr);
238
239 if (ret || retlen != CHFS_EB_HDR_NOR_SIZE)
240 return ret;
241
242 return 0;
243 }
244
245 /**
246 * nand_write_eb_hdr - write ereaseblock header to NAND flash
247 *
248 * @ebh: chfs eraseblock handler
249 * @pebnr: eraseblock number whereto write
250 * @ebh: ebh to write
251 *
252 * Writes the eraseblock header to media.
253 * Returns zero in case of success, error code in case of fail.
254 */
255 int
256 nand_write_eb_hdr(struct chfs_ebh *ebh, int pebnr,
257 struct chfs_eb_hdr *ebhdr)
258 {
259 int ret, crc;
260 size_t retlen;
261 flash_off_t ofs;
262
263 KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
264
265 ofs = pebnr * ebh->flash_if->erasesize +
266 ebh->flash_if->page_size;
267
268 ebhdr->u.nand_hdr.serial = htole64(++(*ebh->max_serial));
269
270 crc = crc32(0, (uint8_t *)&ebhdr->u.nand_hdr + 4,
271 CHFS_EB_HDR_NAND_SIZE - 4);
272 ebhdr->u.nand_hdr.crc = htole32(crc);
273
274 ret = flash_write(ebh->flash_dev, ofs,
275 CHFS_EB_HDR_NAND_SIZE, &retlen,
276 (unsigned char *) &ebhdr->u.nand_hdr);
277
278 if (ret || retlen != CHFS_EB_HDR_NAND_SIZE)
279 return ret;
280
281 return 0;
282 }
283
284 /**
285 * nor_check_eb_hdr - check ereaseblock header read from NOR flash
286 *
287 * @ebh: chfs eraseblock handler
288 * @buf: eraseblock header to check
289 *
290 * Returns eraseblock header status.
291 */
292 int
293 nor_check_eb_hdr(struct chfs_ebh *ebh, void *buf)
294 {
295 uint32_t magic, crc, hdr_crc;
296 struct chfs_eb_hdr *ebhdr = buf;
297 le32 lid_save;
298
299 //check is there a header
300 if (check_pattern((void *) &ebhdr->ec_hdr,
301 0xFF, 0, CHFS_EB_EC_HDR_SIZE)) {
302 dbg_ebh("no header found\n");
303 return EBHDR_LEB_NO_HDR;
304 }
305
306 // check magic
307 magic = le32toh(ebhdr->ec_hdr.magic);
308 if (magic != CHFS_MAGIC_BITMASK) {
309 dbg_ebh("bad magic bitmask(exp: %x found %x)\n",
310 CHFS_MAGIC_BITMASK, magic);
311 return EBHDR_LEB_BADMAGIC;
312 }
313
314 // check CRC_EC
315 hdr_crc = le32toh(ebhdr->ec_hdr.crc_ec);
316 crc = crc32(0, (uint8_t *) &ebhdr->ec_hdr + 8, 4);
317 if (hdr_crc != crc) {
318 dbg_ebh("bad crc_ec found\n");
319 return EBHDR_LEB_BADCRC;
320 }
321
322 /* check if the PEB is free: magic, crc_ec and erase_cnt is good and
323 * everything else is FFF..
324 */
325 if (check_pattern((void *) &ebhdr->u.nor_hdr, 0xFF, 0,
326 CHFS_EB_HDR_NOR_SIZE)) {
327 dbg_ebh("free peb found\n");
328 return EBHDR_LEB_FREE;
329 }
330
331 // check invalidated (CRC == LID == 0)
332 if (ebhdr->u.nor_hdr.crc == 0 && ebhdr->u.nor_hdr.lid == 0) {
333 dbg_ebh("invalidated ebhdr found\n");
334 return EBHDR_LEB_INVALIDATED;
335 }
336
337 // check CRC
338 hdr_crc = le32toh(ebhdr->u.nor_hdr.crc);
339 lid_save = ebhdr->u.nor_hdr.lid;
340
341 // mark lid as not dirty for crc calc
342 ebhdr->u.nor_hdr.lid = ebhdr->u.nor_hdr.lid | htole32(
343 CHFS_LID_NOT_DIRTY_BIT);
344 crc = crc32(0, (uint8_t *) &ebhdr->u.nor_hdr + 4,
345 CHFS_EB_HDR_NOR_SIZE - 4);
346 // restore the original lid value in ebh
347 ebhdr->u.nor_hdr.lid = lid_save;
348
349 if (crc != hdr_crc) {
350 dbg_ebh("bad crc found\n");
351 return EBHDR_LEB_BADCRC;
352 }
353
354 // check dirty
355 if (!(le32toh(lid_save) & CHFS_LID_NOT_DIRTY_BIT)) {
356 dbg_ebh("dirty ebhdr found\n");
357 return EBHDR_LEB_DIRTY;
358 }
359
360 return EBHDR_LEB_OK;
361 }
362
363 /**
364 * nand_check_eb_hdr - check ereaseblock header read from NAND flash
365 *
366 * @ebh: chfs eraseblock handler
367 * @buf: eraseblock header to check
368 *
369 * Returns eraseblock header status.
370 */
371 int
372 nand_check_eb_hdr(struct chfs_ebh *ebh, void *buf)
373 {
374 uint32_t magic, crc, hdr_crc;
375 struct chfs_eb_hdr *ebhdr = buf;
376
377 //check is there a header
378 if (check_pattern((void *) &ebhdr->ec_hdr,
379 0xFF, 0, CHFS_EB_EC_HDR_SIZE)) {
380 dbg_ebh("no header found\n");
381 return EBHDR_LEB_NO_HDR;
382 }
383
384 // check magic
385 magic = le32toh(ebhdr->ec_hdr.magic);
386 if (magic != CHFS_MAGIC_BITMASK) {
387 dbg_ebh("bad magic bitmask(exp: %x found %x)\n",
388 CHFS_MAGIC_BITMASK, magic);
389 return EBHDR_LEB_BADMAGIC;
390 }
391
392 // check CRC_EC
393 hdr_crc = le32toh(ebhdr->ec_hdr.crc_ec);
394 crc = crc32(0, (uint8_t *) &ebhdr->ec_hdr + 8, 4);
395 if (hdr_crc != crc) {
396 dbg_ebh("bad crc_ec found\n");
397 return EBHDR_LEB_BADCRC;
398 }
399
400 /* check if the PEB is free: magic, crc_ec and erase_cnt is good and
401 * everything else is FFF..
402 */
403 if (check_pattern((void *) &ebhdr->u.nand_hdr, 0xFF, 0,
404 CHFS_EB_HDR_NAND_SIZE)) {
405 dbg_ebh("free peb found\n");
406 return EBHDR_LEB_FREE;
407 }
408
409 // check CRC
410 hdr_crc = le32toh(ebhdr->u.nand_hdr.crc);
411
412 crc = crc32(0, (uint8_t *) &ebhdr->u.nand_hdr + 4,
413 CHFS_EB_HDR_NAND_SIZE - 4);
414
415 if (crc != hdr_crc) {
416 dbg_ebh("bad crc found\n");
417 return EBHDR_LEB_BADCRC;
418 }
419
420 return EBHDR_LEB_OK;
421 }
422
423 /**
424 * nor_mark_eb_hdr_dirty_flash- mark ereaseblock header dirty on NOR flash
425 *
426 * @ebh: chfs eraseblock handler
427 * @pebnr: eraseblock number
428 * @lid: leb id (it's bit number 31 will be set to 0)
429 *
430 * It pulls the CHFS_LID_NOT_DIRTY_BIT to zero on flash.
431 *
432 * Returns zero in case of success, error code in case of fail.
433 */
434 int
435 nor_mark_eb_hdr_dirty_flash(struct chfs_ebh *ebh, int pebnr, int lid)
436 {
437 int ret;
438 size_t retlen;
439 off_t ofs;
440
441 /* mark leb id dirty */
442 lid = htole32(lid & CHFS_LID_DIRTY_BIT_MASK);
443
444 /* calculate position */
445 ofs = pebnr * ebh->flash_if->erasesize + CHFS_EB_EC_HDR_SIZE
446 + CHFS_GET_MEMBER_POS(struct chfs_nor_eb_hdr , lid);
447
448 ret = flash_write(ebh->flash_dev, ofs, sizeof(lid), &retlen,
449 (unsigned char *) &lid);
450 if (ret || retlen != sizeof(lid)) {
451 chfs_err("can't mark peb dirty");
452 return ret;
453 }
454
455 return 0;
456 }
457
458 /**
459 * nor_invalidate_eb_hdr - invalidate ereaseblock header on NOR flash
460 *
461 * @ebh: chfs eraseblock handler
462 * @pebnr: eraseblock number
463 *
464 * Sets crc and lip field to zero.
465 * Returns zero in case of success, error code in case of fail.
466 */
467 int
468 nor_invalidate_eb_hdr(struct chfs_ebh *ebh, int pebnr)
469 {
470 int ret;
471 size_t retlen;
472 off_t ofs;
473 char zero_buf[CHFS_INVALIDATE_SIZE];
474
475 /* fill with zero */
476 memset(zero_buf, 0x0, CHFS_INVALIDATE_SIZE);
477
478 /* calculate position (!!! lid is directly behind crc !!!) */
479 ofs = pebnr * ebh->flash_if->erasesize + CHFS_EB_EC_HDR_SIZE
480 + CHFS_GET_MEMBER_POS(struct chfs_nor_eb_hdr, crc);
481
482 ret = flash_write(ebh->flash_dev,
483 ofs, CHFS_INVALIDATE_SIZE, &retlen,
484 (unsigned char *) &zero_buf);
485 if (ret || retlen != CHFS_INVALIDATE_SIZE) {
486 chfs_err("can't invalidate peb");
487 return ret;
488 }
489
490 return 0;
491 }
492
493 /**
494 * mark_eb_hdr_free - free ereaseblock header on NOR or NAND flash
495 *
496 * @ebh: chfs eraseblock handler
497 * @pebnr: eraseblock number
498 * @ec: erase counter of PEB
499 *
500 * Write out the magic and erase counter to the physical eraseblock.
501 * Returns zero in case of success, error code in case of fail.
502 */
503 int
504 mark_eb_hdr_free(struct chfs_ebh *ebh, int pebnr, int ec)
505 {
506 int ret, crc;
507 size_t retlen;
508 off_t ofs;
509 struct chfs_eb_hdr *ebhdr;
510 ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
511
512 ebhdr->ec_hdr.magic = htole32(CHFS_MAGIC_BITMASK);
513 ebhdr->ec_hdr.erase_cnt = htole32(ec);
514 crc = crc32(0, (uint8_t *) &ebhdr->ec_hdr + 8, 4);
515 ebhdr->ec_hdr.crc_ec = htole32(crc);
516
517 ofs = pebnr * ebh->flash_if->erasesize;
518
519 KASSERT(sizeof(ebhdr->ec_hdr) == CHFS_EB_EC_HDR_SIZE);
520
521 ret = flash_write(ebh->flash_dev,
522 ofs, CHFS_EB_EC_HDR_SIZE, &retlen,
523 (unsigned char *) &ebhdr->ec_hdr);
524
525 if (ret || retlen != CHFS_EB_EC_HDR_SIZE) {
526 chfs_err("can't mark peb as free: %d\n", pebnr);
527 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
528 return ret;
529 }
530
531 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
532 return 0;
533 }
534
535 /*****************************************************************************/
536 /* End of Flash specific operations */
537 /*****************************************************************************/
538
539 /*****************************************************************************/
540 /* Lock Tree */
541 /*****************************************************************************/
542
543 int
544 ltree_entry_cmp(struct chfs_ltree_entry *le1,
545 struct chfs_ltree_entry *le2)
546 {
547 return (le1->lnr - le2->lnr);
548 }
549
550 /* Generate functions for Lock tree's red-black tree */
551 RB_PROTOTYPE( ltree_rbtree, chfs_ltree_entry, rb, ltree_entry_cmp);
552 RB_GENERATE( ltree_rbtree, chfs_ltree_entry, rb, ltree_entry_cmp);
553
554
555 /**
556 * ltree_lookup - looks up a logical eraseblock in the lock tree
557 * @ebh: chfs eraseblock handler
558 * @lid: identifier of the logical eraseblock
559 *
560 * This function returns a pointer to the wanted &struct chfs_ltree_entry
561 * if the logical eraseblock is in the lock tree, so it is locked, NULL
562 * otherwise.
563 * @ebh->ltree_lock has to be locked!
564 */
565 static struct chfs_ltree_entry *
566 ltree_lookup(struct chfs_ebh *ebh, int lnr)
567 {
568 struct chfs_ltree_entry le, *result;
569 le.lnr = lnr;
570 result = RB_FIND(ltree_rbtree, &ebh->ltree, &le);
571 return result;
572 }
573
574 /**
575 * ltree_add_entry - add an entry to the lock tree
576 * @ebh: chfs eraseblock handler
577 * @lnr: identifier of the logical eraseblock
578 *
579 * This function adds a new logical eraseblock entry identified with @lnr to the
580 * lock tree. If the entry is already in the tree, it increases the user
581 * counter.
582 * Returns NULL if can not allocate memory for lock tree entry, or a pointer
583 * to the inserted entry otherwise.
584 */
585 static struct chfs_ltree_entry *
586 ltree_add_entry(struct chfs_ebh *ebh, int lnr)
587 {
588 struct chfs_ltree_entry *le, *result;
589
590 le = kmem_alloc(sizeof(struct chfs_ltree_entry), KM_SLEEP);
591
592 le->lnr = lnr;
593 le->users = 1;
594 rw_init(&le->mutex);
595
596 //dbg_ebh("enter ltree lock\n");
597 mutex_enter(&ebh->ltree_lock);
598 //dbg_ebh("insert\n");
599 result = RB_INSERT(ltree_rbtree, &ebh->ltree, le);
600 //dbg_ebh("inserted\n");
601 if (result) {
602 //The entry is already in the tree
603 result->users++;
604 kmem_free(le, sizeof(struct chfs_ltree_entry));
605 }
606 else {
607 result = le;
608 }
609 mutex_exit(&ebh->ltree_lock);
610
611 return result;
612 }
613
614 /**
615 * leb_read_lock - lock a logical eraseblock for read
616 * @ebh: chfs eraseblock handler
617 * @lnr: identifier of the logical eraseblock
618 *
619 * Returns zero in case of success, error code in case of fail.
620 */
621 static int
622 leb_read_lock(struct chfs_ebh *ebh, int lnr)
623 {
624 struct chfs_ltree_entry *le;
625
626 le = ltree_add_entry(ebh, lnr);
627 if (!le)
628 return ENOMEM;
629
630 rw_enter(&le->mutex, RW_READER);
631 return 0;
632 }
633
634 /**
635 * leb_read_unlock - unlock a logical eraseblock from read
636 * @ebh: chfs eraseblock handler
637 * @lnr: identifier of the logical eraseblock
638 *
639 * This function unlocks a logical eraseblock from read and delete it from the
640 * lock tree is there are no more users of it.
641 */
642 static void
643 leb_read_unlock(struct chfs_ebh *ebh, int lnr)
644 {
645 struct chfs_ltree_entry *le;
646
647 mutex_enter(&ebh->ltree_lock);
648 //dbg_ebh("LOCK: ebh->ltree_lock spin locked in leb_read_unlock()\n");
649 le = ltree_lookup(ebh, lnr);
650 if (!le)
651 goto out;
652
653 le->users -= 1;
654 KASSERT(le->users >= 0);
655 rw_exit(&le->mutex);
656 if (le->users == 0) {
657 le = RB_REMOVE(ltree_rbtree, &ebh->ltree, le);
658 if (le) {
659 KASSERT(!rw_lock_held(&le->mutex));
660 rw_destroy(&le->mutex);
661
662 kmem_free(le, sizeof(struct chfs_ltree_entry));
663 }
664 }
665
666 out:
667 mutex_exit(&ebh->ltree_lock);
668 //dbg_ebh("UNLOCK: ebh->ltree_lock spin unlocked in leb_read_unlock()\n");
669 }
670
671 /**
672 * leb_write_lock - lock a logical eraseblock for write
673 * @ebh: chfs eraseblock handler
674 * @lnr: identifier of the logical eraseblock
675 *
676 * Returns zero in case of success, error code in case of fail.
677 */
678 static int
679 leb_write_lock(struct chfs_ebh *ebh, int lnr)
680 {
681 struct chfs_ltree_entry *le;
682
683 le = ltree_add_entry(ebh, lnr);
684 if (!le)
685 return ENOMEM;
686
687 rw_enter(&le->mutex, RW_WRITER);
688 return 0;
689 }
690
691 /**
692 * leb_write_unlock - unlock a logical eraseblock from write
693 * @ebh: chfs eraseblock handler
694 * @lnr: identifier of the logical eraseblock
695 *
696 * This function unlocks a logical eraseblock from write and delete it from the
697 * lock tree is there are no more users of it.
698 */
699 static void
700 leb_write_unlock(struct chfs_ebh *ebh, int lnr)
701 {
702 struct chfs_ltree_entry *le;
703
704 mutex_enter(&ebh->ltree_lock);
705 //dbg_ebh("LOCK: ebh->ltree_lock spin locked in leb_write_unlock()\n");
706 le = ltree_lookup(ebh, lnr);
707 if (!le)
708 goto out;
709
710 le->users -= 1;
711 KASSERT(le->users >= 0);
712 rw_exit(&le->mutex);
713 if (le->users == 0) {
714 RB_REMOVE(ltree_rbtree, &ebh->ltree, le);
715
716 KASSERT(!rw_lock_held(&le->mutex));
717 rw_destroy(&le->mutex);
718
719 kmem_free(le, sizeof(struct chfs_ltree_entry));
720 }
721
722 out:
723 mutex_exit(&ebh->ltree_lock);
724 //dbg_ebh("UNLOCK: ebh->ltree_lock spin unlocked in leb_write_unlock()\n");
725 }
726
727 /*****************************************************************************/
728 /* End of Lock Tree */
729 /*****************************************************************************/
730
731 /*****************************************************************************/
732 /* Erase related operations */
733 /*****************************************************************************/
734
735 /**
736 * If the first argument is smaller than the second, the function
737 * returns a value smaller than zero. If they are equal, the function re-
738 * turns zero. Otherwise, it should return a value greater than zero.
739 */
740 int
741 peb_in_use_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2)
742 {
743 return (peb1->pebnr - peb2->pebnr);
744 }
745
746 int
747 peb_free_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2)
748 {
749 int comp;
750
751 comp = peb1->erase_cnt - peb2->erase_cnt;
752 if (0 == comp)
753 comp = peb1->pebnr - peb2->pebnr;
754
755 return comp;
756 }
757
758 /* Generate functions for in use PEB's red-black tree */
759 RB_PROTOTYPE(peb_in_use_rbtree, chfs_peb, u.rb, peb_in_use_cmp);
760 RB_GENERATE(peb_in_use_rbtree, chfs_peb, u.rb, peb_in_use_cmp);
761 RB_PROTOTYPE(peb_free_rbtree, chfs_peb, u.rb, peb_free_cmp);
762 RB_GENERATE(peb_free_rbtree, chfs_peb, u.rb, peb_free_cmp);
763
764 /**
765 * add_peb_to_erase_queue: adds a PEB to to_erase/fully_erased queue
766 * @ebh - chfs eraseblock handler
767 * @pebnr - physical eraseblock's number
768 * @ec - erase counter of PEB
769 * @queue: the queue to add to
770 *
771 * This function adds a PEB to the erase queue specified by @queue.
772 * The @ebh->erase_lock must be locked before using this.
773 * Returns zero in case of success, error code in case of fail.
774 */
775 int
776 add_peb_to_erase_queue(struct chfs_ebh *ebh, int pebnr, int ec,
777 struct peb_queue *queue)
778 {
779 struct chfs_peb *peb;
780
781 peb = kmem_alloc(sizeof(struct chfs_peb), KM_SLEEP);
782
783 peb->erase_cnt = ec;
784 peb->pebnr = pebnr;
785
786 TAILQ_INSERT_TAIL(queue, peb, u.queue);
787
788 return 0;
789
790 }
791 //TODO
792 /**
793 * find_peb_in_use - looks up a PEB in the RB-tree of used blocks
794 * @ebh - chfs eraseblock handler
795 *
796 * This function returns a pointer to the PEB found in the tree,
797 * NULL otherwise.
798 * The @ebh->erase_lock must be locked before using this.
799 */
800 struct chfs_peb *
801 find_peb_in_use(struct chfs_ebh *ebh, int pebnr)
802 {
803 struct chfs_peb peb, *result;
804 peb.pebnr = pebnr;
805 result = RB_FIND(peb_in_use_rbtree, &ebh->in_use, &peb);
806 return result;
807 }
808
809 /**
810 * add_peb_to_free - adds a PEB to the RB-tree of free PEBs
811 * @ebh - chfs eraseblock handler
812 * @pebnr - physical eraseblock's number
813 * @ec - erase counter of PEB
814 *
815 *
816 * This function adds a physical eraseblock to the RB-tree of free PEBs
817 * stored in the @ebh. The key is the erase counter and pebnr.
818 * The @ebh->erase_lock must be locked before using this.
819 * Returns zero in case of success, error code in case of fail.
820 */
821 int
822 add_peb_to_free(struct chfs_ebh *ebh, int pebnr, int ec)
823 {
824 struct chfs_peb *peb, *result;
825
826 peb = kmem_alloc(sizeof(struct chfs_peb), KM_SLEEP);
827
828 peb->erase_cnt = ec;
829 peb->pebnr = pebnr;
830 result = RB_INSERT(peb_free_rbtree, &ebh->free, peb);
831 if (result)
832 return 1;
833
834 return 0;
835 }
836
837 /**
838 * add_peb_to_in_use - adds a PEB to the RB-tree of used PEBs
839 * @ebh - chfs eraseblock handler
840 * @pebnr - physical eraseblock's number
841 * @ec - erase counter of PEB
842 *
843 *
844 * This function adds a physical eraseblock to the RB-tree of used PEBs
845 * stored in the @ebh. The key is pebnr.
846 * The @ebh->erase_lock must be locked before using this.
847 * Returns zero in case of success, error code in case of fail.
848 */
849 int
850 add_peb_to_in_use(struct chfs_ebh *ebh, int pebnr, int ec)
851 {
852 struct chfs_peb *peb, *result;
853
854 peb = kmem_alloc(sizeof(struct chfs_peb), KM_SLEEP);
855
856 peb->erase_cnt = ec;
857 peb->pebnr = pebnr;
858 result = RB_INSERT(peb_in_use_rbtree, &ebh->in_use, peb);
859 if (result)
860 return 1;
861
862 return 0;
863 }
864
865 /**
866 * erase_callback - callback function for flash erase
867 * @ei: erase information
868 */
869 void
870 erase_callback(struct flash_erase_instruction *ei)
871 {
872 int err;
873 struct chfs_erase_info_priv *priv = (void *) ei->ei_priv;
874 //dbg_ebh("ERASE_CALLBACK() CALLED\n");
875 struct chfs_ebh *ebh = priv->ebh;
876 struct chfs_peb *peb = priv->peb;
877
878 peb->erase_cnt += 1;
879
880 if (ei->ei_state == FLASH_ERASE_DONE) {
881
882 /* Write out erase counter */
883 err = ebh->ops->mark_eb_hdr_free(ebh,
884 peb->pebnr, peb->erase_cnt);
885 if (err) {
886 /* cannot mark PEB as free,so erase it again */
887 chfs_err(
888 "cannot mark eraseblock as free, PEB: %d\n",
889 peb->pebnr);
890 mutex_enter(&ebh->erase_lock);
891 /*dbg_ebh("LOCK: ebh->erase_lock spin locked in erase_callback() "
892 "after mark ebhdr free\n");*/
893 add_peb_to_erase_queue(ebh, peb->pebnr, peb->erase_cnt,
894 &ebh->to_erase);
895 mutex_exit(&ebh->erase_lock);
896 /*dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_callback() "
897 "after mark ebhdr free\n");*/
898 kmem_free(peb, sizeof(struct chfs_peb));
899 return;
900 }
901
902 mutex_enter(&ebh->erase_lock);
903 /*dbg_ebh("LOCK: ebh->erase_lock spin locked in erase_callback()\n");*/
904 err = add_peb_to_free(ebh, peb->pebnr, peb->erase_cnt);
905 mutex_exit(&ebh->erase_lock);
906 /*dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_callback()\n");*/
907 kmem_free(peb, sizeof(struct chfs_peb));
908 } else {
909 /*
910 * Erase is finished, but there was a problem,
911 * so erase PEB again
912 */
913 chfs_err("erase failed, state is: 0x%x\n", ei->ei_state);
914 add_peb_to_erase_queue(ebh, peb->pebnr, peb->erase_cnt, &ebh->to_erase);
915 kmem_free(peb, sizeof(struct chfs_peb));
916 }
917 }
918
919 /**
920 * free_peb: free a PEB
921 * @ebh: chfs eraseblock handler
922 *
923 * This function erases the first physical eraseblock from one of the erase
924 * lists and adds to the RB-tree of free PEBs.
925 * Returns zero in case of succes, error code in case of fail.
926 */
927 int
928 free_peb(struct chfs_ebh *ebh)
929 {
930 int err, retries = 0;
931 off_t ofs;
932 struct chfs_peb *peb = NULL;
933 struct flash_erase_instruction *ei;
934
935 KASSERT(mutex_owned(&ebh->erase_lock));
936
937 if (!TAILQ_EMPTY(&ebh->fully_erased)) {
938 //dbg_ebh("[FREE PEB] got a fully erased block\n");
939 peb = TAILQ_FIRST(&ebh->fully_erased);
940 TAILQ_REMOVE(&ebh->fully_erased, peb, u.queue);
941 err = ebh->ops->mark_eb_hdr_free(ebh,
942 peb->pebnr, peb->erase_cnt);
943 if (err) {
944 goto out_free;
945 }
946 err = add_peb_to_free(ebh, peb->pebnr, peb->erase_cnt);
947 goto out_free;
948 }
949 /* Erase PEB */
950 //dbg_ebh("[FREE PEB] eraseing a block\n");
951 peb = TAILQ_FIRST(&ebh->to_erase);
952 TAILQ_REMOVE(&ebh->to_erase, peb, u.queue);
953 mutex_exit(&ebh->erase_lock);
954 //dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in free_peb()\n");
955 ofs = peb->pebnr * ebh->flash_if->erasesize;
956
957 /* XXX where do we free this? */
958 ei = kmem_alloc(sizeof(struct flash_erase_instruction)
959 + sizeof(struct chfs_erase_info_priv), KM_SLEEP);
960 retry:
961 memset(ei, 0, sizeof(*ei));
962
963 // ei->ei_if = ebh->flash_if;
964 ei->ei_addr = ofs;
965 ei->ei_len = ebh->flash_if->erasesize;
966 ei->ei_callback = erase_callback;
967 ei->ei_priv = (unsigned long) (&ei[1]);
968
969 ((struct chfs_erase_info_priv *) ei->ei_priv)->ebh = ebh;
970 ((struct chfs_erase_info_priv *) ei->ei_priv)->peb = peb;
971
972 err = flash_erase(ebh->flash_dev, ei);
973 dbg_ebh("erased peb: %d\n", peb->pebnr);
974
975 /* einval would mean we did something wrong */
976 KASSERT(err != EINVAL);
977
978 if (err) {
979 dbg_ebh("errno: %d, ei->ei_state: %d\n", err, ei->ei_state);
980 if (CHFS_MAX_GET_PEB_RETRIES < ++retries &&
981 ei->ei_state == FLASH_ERASE_FAILED) {
982 /* The block went bad mark it */
983 dbg_ebh("ebh markbad! 0x%jx\n", (uintmax_t )ofs);
984 err = flash_block_markbad(ebh->flash_dev, ofs);
985 if (!err) {
986 ebh->peb_nr--;
987 }
988
989 goto out;
990 }
991 chfs_err("can not erase PEB: %d, try again\n", peb->pebnr);
992 goto retry;
993 }
994
995 out:
996 /* lock the erase_lock, because it was locked
997 * when the function was called */
998 mutex_enter(&ebh->erase_lock);
999 return err;
1000
1001 out_free:
1002 kmem_free(peb, sizeof(struct chfs_peb));
1003 return err;
1004 }
1005
1006 /**
1007 * release_peb - schedule an erase for the PEB
1008 * @ebh: chfs eraseblock handler
1009 * @pebnr: physical eraseblock number
1010 *
1011 * This function get the peb identified by @pebnr from the in_use RB-tree of
1012 * @ebh, removes it and schedule an erase for it.
1013 *
1014 * Returns zero on success, error code in case of fail.
1015 */
1016 int
1017 release_peb(struct chfs_ebh *ebh, int pebnr)
1018 {
1019 int err = 0;
1020 struct chfs_peb *peb;
1021
1022 mutex_enter(&ebh->erase_lock);
1023
1024 //dbg_ebh("LOCK: ebh->erase_lock spin locked in release_peb()\n");
1025 peb = find_peb_in_use(ebh, pebnr);
1026 if (!peb) {
1027 chfs_err("LEB is mapped, but is not in the 'in_use' "
1028 "tree of ebh\n");
1029 goto out_unlock;
1030 }
1031 err = add_peb_to_erase_queue(ebh, peb->pebnr, peb->erase_cnt,
1032 &ebh->to_erase);
1033
1034 if (err)
1035 goto out_unlock;
1036
1037 RB_REMOVE(peb_in_use_rbtree, &ebh->in_use, peb);
1038 out_unlock:
1039 mutex_exit(&ebh->erase_lock);
1040 //dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in release_peb()"
1041 // " at out_unlock\n");
1042 return err;
1043 }
1044
1045 /**
1046 * erase_thread - background thread for erasing PEBs
1047 * @data: pointer to the eraseblock handler
1048 */
1049 /*void
1050 erase_thread(void *data)
1051 {
1052 struct chfs_ebh *ebh = data;
1053
1054 dbg_ebh("erase thread started\n");
1055 while (ebh->bg_erase.eth_running) {
1056 int err;
1057
1058 mutex_enter(&ebh->erase_lock);
1059 dbg_ebh("LOCK: ebh->erase_lock spin locked in erase_thread()\n");
1060 if (TAILQ_EMPTY(&ebh->to_erase) && TAILQ_EMPTY(&ebh->fully_erased)) {
1061 dbg_ebh("thread has nothing to do\n");
1062 mutex_exit(&ebh->erase_lock);
1063 mutex_enter(&ebh->bg_erase.eth_thread_mtx);
1064 cv_timedwait_sig(&ebh->bg_erase.eth_wakeup,
1065 &ebh->bg_erase.eth_thread_mtx, mstohz(100));
1066 mutex_exit(&ebh->bg_erase.eth_thread_mtx);
1067
1068 dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_thread()\n");
1069 continue;
1070 }
1071 mutex_exit(&ebh->erase_lock);
1072 dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_thread()\n");
1073
1074 err = free_peb(ebh);
1075 if (err)
1076 chfs_err("freeing PEB failed in the background thread: %d\n", err);
1077
1078 }
1079 dbg_ebh("erase thread stopped\n");
1080 kthread_exit(0);
1081 }*/
1082
1083 /**
1084 * erase_thread - background thread for erasing PEBs
1085 * @data: pointer to the eraseblock handler
1086 */
1087 void
1088 erase_thread(void *data) {
1089 dbg_ebh("[EBH THREAD] erase thread started\n");
1090
1091 struct chfs_ebh *ebh = data;
1092 int err;
1093
1094 mutex_enter(&ebh->erase_lock);
1095 while (ebh->bg_erase.eth_running) {
1096 if (TAILQ_EMPTY(&ebh->to_erase) &&
1097 TAILQ_EMPTY(&ebh->fully_erased)) {
1098 cv_timedwait_sig(&ebh->bg_erase.eth_wakeup,
1099 &ebh->erase_lock, mstohz(100));
1100 } else {
1101 /* XXX exiting this mutex is a bit odd here as
1102 * free_peb instantly reenters it...
1103 */
1104 err = free_peb(ebh);
1105 mutex_exit(&ebh->erase_lock);
1106 if (err) {
1107 chfs_err("freeing PEB failed in the"
1108 " background thread: %d\n", err);
1109 }
1110 mutex_enter(&ebh->erase_lock);
1111 }
1112 }
1113 mutex_exit(&ebh->erase_lock);
1114
1115 dbg_ebh("[EBH THREAD] erase thread stopped\n");
1116 kthread_exit(0);
1117 }
1118
1119 /**
1120 * erase_thread_start - init and start erase thread
1121 * @ebh: eraseblock handler
1122 */
1123 static void
1124 erase_thread_start(struct chfs_ebh *ebh)
1125 {
1126 cv_init(&ebh->bg_erase.eth_wakeup, "ebheracv");
1127
1128 ebh->bg_erase.eth_running = true;
1129 kthread_create(PRI_NONE, KTHREAD_MPSAFE | KTHREAD_MUSTJOIN, NULL,
1130 erase_thread, ebh, &ebh->bg_erase.eth_thread, "ebherase");
1131 }
1132
1133 /**
1134 * erase_thread_stop - stop background erase thread
1135 * @ebh: eraseblock handler
1136 */
1137 static void
1138 erase_thread_stop(struct chfs_ebh *ebh)
1139 {
1140 ebh->bg_erase.eth_running = false;
1141 cv_signal(&ebh->bg_erase.eth_wakeup);
1142 dbg_ebh("[EBH THREAD STOP] signaled\n");
1143
1144 kthread_join(ebh->bg_erase.eth_thread);
1145 #ifdef BROKEN_KTH_JOIN
1146 kpause("chfsebhjointh", false, mstohz(1000), NULL);
1147 #endif
1148
1149 cv_destroy(&ebh->bg_erase.eth_wakeup);
1150 }
1151
1152 /*****************************************************************************/
1153 /* End of Erase related operations */
1154 /*****************************************************************************/
1155
1156 /*****************************************************************************/
1157 /* Scan related operations */
1158 /*****************************************************************************/
1159 int
1160 scan_leb_used_cmp(struct chfs_scan_leb *sleb1, struct chfs_scan_leb *sleb2)
1161 {
1162 return (sleb1->lnr - sleb2->lnr);
1163 }
1164
1165 RB_PROTOTYPE(scan_leb_used_rbtree, chfs_scan_leb, u.rb, scan_leb_used_cmp);
1166 RB_GENERATE(scan_leb_used_rbtree, chfs_scan_leb, u.rb, scan_leb_used_cmp);
1167
1168 /**
1169 * scan_add_to_queue - adds a physical eraseblock to one of the
1170 * eraseblock queue
1171 * @si: chfs scanning information
1172 * @pebnr: physical eraseblock number
1173 * @erase_cnt: erase counter of the physical eraseblock
1174 * @list: the list to add to
1175 *
1176 * This function adds a physical eraseblock to one of the lists in the scanning
1177 * information.
1178 * Returns zero in case of success, negative error code in case of fail.
1179 */
1180 static int
1181 scan_add_to_queue(struct chfs_scan_info *si, int pebnr, int erase_cnt,
1182 struct scan_leb_queue *queue)
1183 {
1184 struct chfs_scan_leb *sleb;
1185
1186 sleb = kmem_alloc(sizeof(struct chfs_scan_leb), KM_SLEEP);
1187
1188 sleb->pebnr = pebnr;
1189 sleb->erase_cnt = erase_cnt;
1190 TAILQ_INSERT_TAIL(queue, sleb, u.queue);
1191 return 0;
1192 }
1193
1194 /*
1195 * nor_scan_add_to_used - add a physical eraseblock to the
1196 * used tree of scan info
1197 * @ebh: chfs eraseblock handler
1198 * @si: chfs scanning information
1199 * @ebhdr: eraseblock header
1200 * @pebnr: physical eraseblock number
1201 * @leb_status: the status of the PEB's eraseblock header
1202 *
1203 * This function adds a PEB to the used tree of the scanning information.
1204 * It handles the situations if there are more physical eraseblock referencing
1205 * to the same logical eraseblock.
1206 * Returns zero in case of success, error code in case of fail.
1207 */
1208 int
1209 nor_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1210 struct chfs_eb_hdr *ebhdr, int pebnr, int leb_status)
1211 {
1212 int err, lnr, ec;
1213 struct chfs_scan_leb *sleb, *old;
1214
1215 lnr = CHFS_GET_LID(ebhdr->u.nor_hdr.lid);
1216 ec = le32toh(ebhdr->ec_hdr.erase_cnt);
1217
1218 sleb = kmem_alloc(sizeof(struct chfs_scan_leb), KM_SLEEP);
1219
1220 sleb->erase_cnt = ec;
1221 sleb->lnr = lnr;
1222 sleb->pebnr = pebnr;
1223 sleb->info = leb_status;
1224
1225 old = RB_INSERT(scan_leb_used_rbtree, &si->used, sleb);
1226 if (old) {
1227 kmem_free(sleb, sizeof(struct chfs_scan_leb));
1228 /* There is already an eraseblock in the used tree */
1229 /* If the new one is bad */
1230 if (EBHDR_LEB_DIRTY == leb_status &&
1231 EBHDR_LEB_OK == old->info) {
1232 return scan_add_to_queue(si, pebnr, ec, &si->erase);
1233 } else {
1234 err = scan_add_to_queue(si, old->pebnr,
1235 old->erase_cnt, &si->erase);
1236 if (err) {
1237 return err;
1238 }
1239
1240 old->erase_cnt = ec;
1241 old->lnr = lnr;
1242 old->pebnr = pebnr;
1243 old->info = leb_status;
1244 return 0;
1245 }
1246 }
1247 return 0;
1248 }
1249
1250 /**
1251 * nor_process eb -read the headers from NOR flash, check them and add to
1252 * the scanning information
1253 * @ebh: chfs eraseblock handler
1254 * @si: chfs scanning information
1255 * @pebnr: physical eraseblock number
1256 *
1257 * Returns zero in case of success, error code in case of fail.
1258 */
1259 int
1260 nor_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1261 int pebnr, struct chfs_eb_hdr *ebhdr)
1262 {
1263 int err, erase_cnt, leb_status;
1264
1265 err = ebh->ops->read_eb_hdr(ebh, pebnr, ebhdr);
1266 if (err)
1267 return err;
1268
1269 erase_cnt = le32toh(ebhdr->ec_hdr.erase_cnt);
1270 dbg_ebh("erase_cnt: %d\n", erase_cnt);
1271 leb_status = ebh->ops->check_eb_hdr(ebh, ebhdr);
1272 if (EBHDR_LEB_BADMAGIC == leb_status ||
1273 EBHDR_LEB_BADCRC == leb_status) {
1274 err = scan_add_to_queue(si, pebnr, erase_cnt, &si->corrupted);
1275 return err;
1276 }
1277 else if (EBHDR_LEB_FREE == leb_status) {
1278 err = scan_add_to_queue(si, pebnr, erase_cnt, &si->free);
1279 goto count_mean;
1280 }
1281 else if (EBHDR_LEB_NO_HDR == leb_status) {
1282 err = scan_add_to_queue(si, pebnr, erase_cnt, &si->erased);
1283 return err;
1284 }
1285 else if (EBHDR_LEB_INVALIDATED == leb_status) {
1286 err = scan_add_to_queue(si, pebnr, erase_cnt, &si->erase);
1287 return err;
1288 }
1289
1290 err = nor_scan_add_to_used(ebh, si, ebhdr, pebnr, leb_status);
1291 if (err)
1292 return err;
1293
1294
1295 count_mean:
1296 si->sum_of_ec += erase_cnt;
1297 si->num_of_eb++;
1298
1299 return err;
1300 }
1301
1302 /*
1303 * nand_scan_add_to_used - add a physical eraseblock to the
1304 * used tree of scan info
1305 * @ebh: chfs eraseblock handler
1306 * @si: chfs scanning information
1307 * @ebhdr: eraseblock header
1308 * @pebnr: physical eraseblock number
1309 * @leb_status: the status of the PEB's eraseblock header
1310 *
1311 * This function adds a PEB to the used tree of the scanning information.
1312 * It handles the situations if there are more physical eraseblock referencing
1313 * to the same logical eraseblock.
1314 * Returns zero in case of success, error code in case of fail.
1315 */
1316 int
1317 nand_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1318 struct chfs_eb_hdr *ebhdr, int pebnr)
1319 {
1320 int err, lnr, ec;
1321 struct chfs_scan_leb *sleb, *old;
1322 uint64_t serial = le64toh(ebhdr->u.nand_hdr.serial);
1323
1324 lnr = CHFS_GET_LID(ebhdr->u.nor_hdr.lid);
1325 ec = le32toh(ebhdr->ec_hdr.erase_cnt);
1326
1327 sleb = kmem_alloc(sizeof(struct chfs_scan_leb), KM_SLEEP);
1328
1329 sleb->erase_cnt = ec;
1330 sleb->lnr = lnr;
1331 sleb->pebnr = pebnr;
1332 sleb->info = serial;
1333
1334 old = RB_INSERT(scan_leb_used_rbtree, &si->used, sleb);
1335 if (old) {
1336 kmem_free(sleb, sizeof(struct chfs_scan_leb));
1337 /* There is already an eraseblock in the used tree */
1338 /* If the new one is bad */
1339 if (serial < old->info)
1340 return scan_add_to_queue(si, pebnr, ec, &si->erase);
1341 else {
1342 err = scan_add_to_queue(si,
1343 old->pebnr, old->erase_cnt, &si->erase);
1344 if (err)
1345 return err;
1346
1347 old->erase_cnt = ec;
1348 old->lnr = lnr;
1349 old->pebnr = pebnr;
1350 old->info = serial;
1351 return 0;
1352 }
1353 }
1354 return 0;
1355 }
1356
1357 /**
1358 * nand_process eb -read the headers from NAND flash, check them and add to the
1359 * scanning information
1360 * @ebh: chfs eraseblock handler
1361 * @si: chfs scanning information
1362 * @pebnr: physical eraseblock number
1363 *
1364 * Returns zero in case of success, error code in case of fail.
1365 */
1366 int
1367 nand_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1368 int pebnr, struct chfs_eb_hdr *ebhdr)
1369 {
1370 int err, erase_cnt, leb_status;
1371 uint64_t max_serial;
1372 bool isbad;
1373
1374 /* Check block is bad */
1375 err = flash_block_isbad(ebh->flash_dev,
1376 pebnr * ebh->flash_if->erasesize, &isbad);
1377 if (err) {
1378 chfs_err("checking block is bad failed\n");
1379 return err;
1380 }
1381 if (isbad) {
1382 si->bad_peb_cnt++;
1383 return 0;
1384 }
1385
1386 err = ebh->ops->read_eb_hdr(ebh, pebnr, ebhdr);
1387 if (err)
1388 return err;
1389
1390 erase_cnt = le32toh(ebhdr->ec_hdr.erase_cnt);
1391 leb_status = ebh->ops->check_eb_hdr(ebh, ebhdr);
1392 if (EBHDR_LEB_BADMAGIC == leb_status ||
1393 EBHDR_LEB_BADCRC == leb_status) {
1394 err = scan_add_to_queue(si, pebnr, erase_cnt, &si->corrupted);
1395 return err;
1396 }
1397 else if (EBHDR_LEB_FREE == leb_status) {
1398 err = scan_add_to_queue(si, pebnr, erase_cnt, &si->free);
1399 goto count_mean;
1400 }
1401 else if (EBHDR_LEB_NO_HDR == leb_status) {
1402 err = scan_add_to_queue(si, pebnr, erase_cnt, &si->erased);
1403 return err;
1404 }
1405
1406 err = nand_scan_add_to_used(ebh, si, ebhdr, pebnr);
1407 if (err)
1408 return err;
1409
1410 max_serial = le64toh(ebhdr->u.nand_hdr.serial);
1411 if (max_serial > *ebh->max_serial) {
1412 *ebh->max_serial = max_serial;
1413 }
1414
1415 count_mean:
1416 si->sum_of_ec += erase_cnt;
1417 si->num_of_eb++;
1418
1419 return err;
1420 }
1421
1422 /**
1423 * chfs_scan - scans the media and returns informations about it
1424 * @ebh: chfs eraseblock handler
1425 *
1426 * This function scans through the media and returns information about it or if
1427 * it fails NULL will be returned.
1428 */
1429 struct chfs_scan_info *
1430 chfs_scan(struct chfs_ebh *ebh)
1431 {
1432 struct chfs_scan_info *si;
1433 struct chfs_eb_hdr *ebhdr;
1434 int pebnr, err;
1435
1436 si = kmem_alloc(sizeof(*si), KM_SLEEP);
1437
1438 TAILQ_INIT(&si->corrupted);
1439 TAILQ_INIT(&si->free);
1440 TAILQ_INIT(&si->erase);
1441 TAILQ_INIT(&si->erased);
1442 RB_INIT(&si->used);
1443 si->bad_peb_cnt = 0;
1444 si->num_of_eb = 0;
1445 si->sum_of_ec = 0;
1446
1447 ebhdr = kmem_alloc(sizeof(*ebhdr), KM_SLEEP);
1448
1449 for (pebnr = 0; pebnr < ebh->peb_nr; pebnr++) {
1450 dbg_ebh("processing PEB %d\n", pebnr);
1451 err = ebh->ops->process_eb(ebh, si, pebnr, ebhdr);
1452 if (err < 0)
1453 goto out_ebhdr;
1454 }
1455 kmem_free(ebhdr, sizeof(*ebhdr));
1456 dbg_ebh("[CHFS_SCAN] scanning information collected\n");
1457 return si;
1458
1459 out_ebhdr:
1460 kmem_free(ebhdr, sizeof(*ebhdr));
1461 kmem_free(si, sizeof(*si));
1462 return NULL;
1463 }
1464
1465 /**
1466 * scan_info_destroy - frees all lists and trees in the scanning information
1467 * @si: the scanning information
1468 */
1469 void
1470 scan_info_destroy(struct chfs_scan_info *si)
1471 {
1472 EBH_QUEUE_DESTROY(&si->corrupted,
1473 struct chfs_scan_leb, u.queue);
1474
1475 EBH_QUEUE_DESTROY(&si->erase,
1476 struct chfs_scan_leb, u.queue);
1477
1478 EBH_QUEUE_DESTROY(&si->erased,
1479 struct chfs_scan_leb, u.queue);
1480
1481 EBH_QUEUE_DESTROY(&si->free,
1482 struct chfs_scan_leb, u.queue);
1483
1484 EBH_TREE_DESTROY(scan_leb_used_rbtree,
1485 &si->used, struct chfs_scan_leb);
1486
1487 kmem_free(si, sizeof(*si));
1488 dbg_ebh("[SCAN_INFO_DESTROY] scanning information destroyed\n");
1489 }
1490
1491 /**
1492 * scan_media - scan media
1493 *
1494 * @ebh - chfs eraseblock handler
1495 *
1496 * Returns zero in case of success, error code in case of fail.
1497 */
1498
1499 int
1500 scan_media(struct chfs_ebh *ebh)
1501 {
1502 int err, i, avg_ec;
1503 struct chfs_scan_info *si;
1504 struct chfs_scan_leb *sleb;
1505
1506 si = chfs_scan(ebh);
1507 /*
1508 * Process the scan info, manage the eraseblock lists
1509 */
1510 mutex_init(&ebh->ltree_lock, MUTEX_DEFAULT, IPL_NONE);
1511 mutex_init(&ebh->erase_lock, MUTEX_DEFAULT, IPL_NONE);
1512 RB_INIT(&ebh->ltree);
1513 RB_INIT(&ebh->free);
1514 RB_INIT(&ebh->in_use);
1515 TAILQ_INIT(&ebh->to_erase);
1516 TAILQ_INIT(&ebh->fully_erased);
1517 mutex_init(&ebh->alc_mutex, MUTEX_DEFAULT, IPL_NONE);
1518
1519 ebh->peb_nr -= si->bad_peb_cnt;
1520
1521 /*
1522 * Create background thread for erasing
1523 */
1524 erase_thread_start(ebh);
1525
1526 ebh->lmap = kmem_alloc(ebh->peb_nr * sizeof(int), KM_SLEEP);
1527
1528 for (i = 0; i < ebh->peb_nr; i++) {
1529 ebh->lmap[i] = EBH_LEB_UNMAPPED;
1530 }
1531
1532 if (si->num_of_eb == 0) {
1533 /* The flash contains no data. */
1534 avg_ec = 0;
1535 }
1536 else {
1537 avg_ec = (int) (si->sum_of_ec / si->num_of_eb);
1538 }
1539 dbg_ebh("num_of_eb: %d\n", si->num_of_eb);
1540
1541 mutex_enter(&ebh->erase_lock);
1542
1543 RB_FOREACH(sleb, scan_leb_used_rbtree, &si->used) {
1544 ebh->lmap[sleb->lnr] = sleb->pebnr;
1545 err = add_peb_to_in_use(ebh, sleb->pebnr, sleb->erase_cnt);
1546 if (err)
1547 goto out_free;
1548 }
1549
1550 TAILQ_FOREACH(sleb, &si->erased, u.queue) {
1551 err = add_peb_to_erase_queue(ebh, sleb->pebnr, avg_ec,
1552 &ebh->fully_erased);
1553 if (err)
1554 goto out_free;
1555 }
1556
1557 TAILQ_FOREACH(sleb, &si->erase, u.queue) {
1558 err = add_peb_to_erase_queue(ebh, sleb->pebnr, avg_ec,
1559 &ebh->to_erase);
1560 if (err)
1561 goto out_free;
1562 }
1563
1564 TAILQ_FOREACH(sleb, &si->free, u.queue) {
1565 err = add_peb_to_free(ebh, sleb->pebnr, sleb->erase_cnt);
1566 if (err)
1567 goto out_free;
1568 }
1569
1570 TAILQ_FOREACH(sleb, &si->corrupted, u.queue) {
1571 err = add_peb_to_erase_queue(ebh, sleb->pebnr, avg_ec,
1572 &ebh->to_erase);
1573 if (err)
1574 goto out_free;
1575 }
1576 mutex_exit(&ebh->erase_lock);
1577 scan_info_destroy(si);
1578 return 0;
1579
1580 out_free:
1581 mutex_exit(&ebh->erase_lock);
1582 kmem_free(ebh->lmap, ebh->peb_nr * sizeof(int));
1583 scan_info_destroy(si);
1584 dbg_ebh("[SCAN_MEDIA] returning with error: %d\n", err);
1585 return err;
1586 }
1587
1588 /*****************************************************************************/
1589 /* End of Scan related operations */
1590 /*****************************************************************************/
1591
1592 /**
1593 * ebh_open - opens mtd device and init ereaseblock header
1594 * @ebh: eraseblock handler
1595 * @flash_nr: flash device number to use
1596 *
1597 * Returns zero in case of success, error code in case of fail.
1598 */
1599 int
1600 ebh_open(struct chfs_ebh *ebh, dev_t dev)
1601 {
1602 int err;
1603
1604 ebh->flash_dev = flash_get_device(dev);
1605 if (!ebh->flash_dev) {
1606 aprint_error("ebh_open: cant get flash device\n");
1607 return ENODEV;
1608 }
1609
1610 ebh->flash_if = flash_get_interface(dev);
1611 if (!ebh->flash_if) {
1612 aprint_error("ebh_open: cant get flash interface\n");
1613 return ENODEV;
1614 }
1615
1616 ebh->flash_size = flash_get_size(dev);
1617 ebh->peb_nr = ebh->flash_size / ebh->flash_if->erasesize;
1618 // ebh->peb_nr = ebh->flash_if->size / ebh->flash_if->erasesize;
1619 /* Set up flash operations based on flash type */
1620 ebh->ops = kmem_alloc(sizeof(struct chfs_ebh_ops), KM_SLEEP);
1621
1622 switch (ebh->flash_if->type) {
1623 case FLASH_TYPE_NOR:
1624 ebh->eb_size = ebh->flash_if->erasesize -
1625 CHFS_EB_EC_HDR_SIZE - CHFS_EB_HDR_NOR_SIZE;
1626
1627 ebh->ops->read_eb_hdr = nor_read_eb_hdr;
1628 ebh->ops->write_eb_hdr = nor_write_eb_hdr;
1629 ebh->ops->check_eb_hdr = nor_check_eb_hdr;
1630 ebh->ops->mark_eb_hdr_dirty_flash =
1631 nor_mark_eb_hdr_dirty_flash;
1632 ebh->ops->invalidate_eb_hdr = nor_invalidate_eb_hdr;
1633 ebh->ops->mark_eb_hdr_free = mark_eb_hdr_free;
1634
1635 ebh->ops->process_eb = nor_process_eb;
1636
1637 ebh->ops->create_eb_hdr = nor_create_eb_hdr;
1638 ebh->ops->calc_data_offs = nor_calc_data_offs;
1639
1640 ebh->max_serial = NULL;
1641 break;
1642 case FLASH_TYPE_NAND:
1643 ebh->eb_size = ebh->flash_if->erasesize -
1644 2 * ebh->flash_if->page_size;
1645
1646 ebh->ops->read_eb_hdr = nand_read_eb_hdr;
1647 ebh->ops->write_eb_hdr = nand_write_eb_hdr;
1648 ebh->ops->check_eb_hdr = nand_check_eb_hdr;
1649 ebh->ops->mark_eb_hdr_free = mark_eb_hdr_free;
1650 ebh->ops->mark_eb_hdr_dirty_flash = NULL;
1651 ebh->ops->invalidate_eb_hdr = NULL;
1652
1653 ebh->ops->process_eb = nand_process_eb;
1654
1655 ebh->ops->create_eb_hdr = nand_create_eb_hdr;
1656 ebh->ops->calc_data_offs = nand_calc_data_offs;
1657
1658 ebh->max_serial = kmem_alloc(sizeof(uint64_t), KM_SLEEP);
1659
1660 *ebh->max_serial = 0;
1661 break;
1662 default:
1663 return 1;
1664 }
1665 printf("opening ebh: eb_size: %zu\n", ebh->eb_size);
1666 err = scan_media(ebh);
1667 if (err) {
1668 dbg_ebh("Scan failed.");
1669 kmem_free(ebh->ops, sizeof(struct chfs_ebh_ops));
1670 kmem_free(ebh, sizeof(struct chfs_ebh));
1671 return err;
1672 }
1673 return 0;
1674 }
1675
1676 /**
1677 * ebh_close - close ebh
1678 * @ebh: eraseblock handler
1679 * Returns zero in case of success, error code in case of fail.
1680 */
1681 int
1682 ebh_close(struct chfs_ebh *ebh)
1683 {
1684 erase_thread_stop(ebh);
1685
1686 EBH_TREE_DESTROY(peb_free_rbtree, &ebh->free, struct chfs_peb);
1687 EBH_TREE_DESTROY(peb_in_use_rbtree, &ebh->in_use, struct chfs_peb);
1688
1689 EBH_QUEUE_DESTROY(&ebh->fully_erased, struct chfs_peb, u.queue);
1690 EBH_QUEUE_DESTROY(&ebh->to_erase, struct chfs_peb, u.queue);
1691
1692 /* XXX HACK, see ebh.h */
1693 EBH_TREE_DESTROY_MUTEX(ltree_rbtree, &ebh->ltree,
1694 struct chfs_ltree_entry);
1695
1696 KASSERT(!mutex_owned(&ebh->ltree_lock));
1697 KASSERT(!mutex_owned(&ebh->alc_mutex));
1698 KASSERT(!mutex_owned(&ebh->erase_lock));
1699
1700 mutex_destroy(&ebh->ltree_lock);
1701 mutex_destroy(&ebh->alc_mutex);
1702 mutex_destroy(&ebh->erase_lock);
1703
1704 kmem_free(ebh->ops, sizeof(struct chfs_ebh_ops));
1705 kmem_free(ebh, sizeof(struct chfs_ebh));
1706
1707 return 0;
1708 }
1709
1710 /**
1711 * ebh_read_leb - read data from leb
1712 * @ebh: eraseblock handler
1713 * @lnr: logical eraseblock number
1714 * @buf: buffer to read to
1715 * @offset: offset from where to read
1716 * @len: bytes number to read
1717 *
1718 * Returns zero in case of success, error code in case of fail.
1719 */
1720 int
1721 ebh_read_leb(struct chfs_ebh *ebh, int lnr, char *buf, uint32_t offset,
1722 size_t len, size_t *retlen)
1723 {
1724 int err, pebnr;
1725 off_t data_offset;
1726
1727 KASSERT(offset + len <= ebh->eb_size);
1728
1729 err = leb_read_lock(ebh, lnr);
1730 if (err)
1731 return err;
1732 pebnr = ebh->lmap[lnr];
1733 /* If PEB is not mapped the buffer is filled with 0xFF */
1734 if (EBH_LEB_UNMAPPED == pebnr) {
1735 leb_read_unlock(ebh, lnr);
1736 memset(buf, 0xFF, len);
1737 return 0;
1738 }
1739
1740 /* Read data */
1741 data_offset = ebh->ops->calc_data_offs(ebh, pebnr, offset);
1742 err = flash_read(ebh->flash_dev, data_offset, len, retlen,
1743 (unsigned char *) buf);
1744 if (err)
1745 goto out_free;
1746
1747 KASSERT(len == *retlen);
1748
1749 leb_read_unlock(ebh, lnr);
1750 return err;
1751
1752 out_free:
1753 leb_read_unlock(ebh, lnr);
1754 return err;
1755 }
1756
1757 /**
1758 * get_peb: get a free physical eraseblock
1759 * @ebh - chfs eraseblock handler
1760 *
1761 * This function gets a free eraseblock from the ebh->free RB-tree.
1762 * The fist entry will be returned and deleted from the tree.
1763 * The entries sorted by the erase counters, so the PEB with the smallest
1764 * erase counter will be added back.
1765 * If something goes bad a negative value will be returned.
1766 */
1767 int
1768 get_peb(struct chfs_ebh *ebh)
1769 {
1770 int err, pebnr;
1771 struct chfs_peb *peb;
1772
1773 retry:
1774 mutex_enter(&ebh->erase_lock);
1775 //dbg_ebh("LOCK: ebh->erase_lock spin locked in get_peb()\n");
1776 if (RB_EMPTY(&ebh->free)) {
1777 /*There is no more free PEBs in the tree*/
1778 if (TAILQ_EMPTY(&ebh->to_erase) &&
1779 TAILQ_EMPTY(&ebh->fully_erased)) {
1780 mutex_exit(&ebh->erase_lock);
1781 //dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in get_peb()\n");
1782 return ENOSPC;
1783 }
1784 err = free_peb(ebh);
1785
1786 mutex_exit(&ebh->erase_lock);
1787 //dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in get_peb()\n");
1788
1789 if (err)
1790 return err;
1791 goto retry;
1792 }
1793 peb = RB_MIN(peb_free_rbtree, &ebh->free);
1794 pebnr = peb->pebnr;
1795 RB_REMOVE(peb_free_rbtree, &ebh->free, peb);
1796 err = add_peb_to_in_use(ebh, peb->pebnr, peb->erase_cnt);
1797 if (err)
1798 pebnr = err;
1799
1800 kmem_free(peb, sizeof(struct chfs_peb));
1801
1802 mutex_exit(&ebh->erase_lock);
1803 //dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in get_peb()\n");
1804
1805 return pebnr;
1806 }
1807
1808 /**
1809 * ebh_write_leb - write data to leb
1810 * @ebh: eraseblock handler
1811 * @lnr: logical eraseblock number
1812 * @buf: data to write
1813 * @offset: offset where to write
1814 * @len: bytes number to write
1815 *
1816 * Returns zero in case of success, error code in case of fail.
1817 */
1818 int
1819 ebh_write_leb(struct chfs_ebh *ebh, int lnr, char *buf, uint32_t offset,
1820 size_t len, size_t *retlen)
1821 {
1822 int err, pebnr, retries = 0;
1823 off_t data_offset;
1824 struct chfs_eb_hdr *ebhdr;
1825
1826 dbg("offset: %d | len: %zu | (offset+len): %zu "
1827 " | ebsize: %zu\n", offset, len, (offset+len), ebh->eb_size);
1828
1829 KASSERT(offset + len <= ebh->eb_size);
1830
1831 err = leb_write_lock(ebh, lnr);
1832 if (err)
1833 return err;
1834
1835 pebnr = ebh->lmap[lnr];
1836 /* If the LEB is mapped write out data */
1837 if (pebnr != EBH_LEB_UNMAPPED) {
1838 data_offset = ebh->ops->calc_data_offs(ebh, pebnr, offset);
1839 err = flash_write(ebh->flash_dev, data_offset, len, retlen,
1840 (unsigned char *) buf);
1841
1842 if (err) {
1843 chfs_err("error %d while writing %zu bytes to PEB "
1844 "%d:%ju, written %zu bytes\n",
1845 err, len, pebnr, (uintmax_t )offset, *retlen);
1846 } else {
1847 KASSERT(len == *retlen);
1848 }
1849
1850 leb_write_unlock(ebh, lnr);
1851 return err;
1852 }
1853
1854 /*
1855 * If the LEB is unmapped, get a free PEB and write the
1856 * eraseblock header first
1857 */
1858 ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
1859
1860 /* Setting up eraseblock header properties */
1861 ebh->ops->create_eb_hdr(ebhdr, lnr);
1862
1863 retry:
1864 /* Getting a physical eraseblock from the wear leveling system */
1865 pebnr = get_peb(ebh);
1866 if (pebnr < 0) {
1867 leb_write_unlock(ebh, lnr);
1868 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1869 return pebnr;
1870 }
1871
1872 /* Write the eraseblock header to the media */
1873 err = ebh->ops->write_eb_hdr(ebh, pebnr, ebhdr);
1874 if (err) {
1875 chfs_warn(
1876 "error writing eraseblock header: LEB %d , PEB %d\n",
1877 lnr, pebnr);
1878 goto write_error;
1879 }
1880
1881 /* Write out data */
1882 if (len) {
1883 data_offset = ebh->ops->calc_data_offs(ebh, pebnr, offset);
1884 err = flash_write(ebh->flash_dev,
1885 data_offset, len, retlen, (unsigned char *) buf);
1886 if (err) {
1887 chfs_err("error %d while writing %zu bytes to PEB "
1888 " %d:%ju, written %zu bytes\n",
1889 err, len, pebnr, (uintmax_t )offset, *retlen);
1890 goto write_error;
1891 }
1892 }
1893
1894 ebh->lmap[lnr] = pebnr;
1895 leb_write_unlock(ebh, lnr);
1896 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1897
1898 return 0;
1899
1900 write_error: err = release_peb(ebh, pebnr);
1901 // max retries (NOW: 2)
1902 if (err || CHFS_MAX_GET_PEB_RETRIES < ++retries) {
1903 leb_write_unlock(ebh, lnr);
1904 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1905 return err;
1906 }
1907 goto retry;
1908 }
1909
1910 /**
1911 * ebh_erase_leb - erase a leb
1912 * @ebh: eraseblock handler
1913 * @lnr: leb number
1914 *
1915 * Returns zero in case of success, error code in case of fail.
1916 */
1917 int
1918 ebh_erase_leb(struct chfs_ebh *ebh, int lnr)
1919 {
1920 int err, pebnr;
1921
1922 leb_write_lock(ebh, lnr);
1923
1924 pebnr = ebh->lmap[lnr];
1925 if (pebnr < 0) {
1926 leb_write_unlock(ebh, lnr);
1927 return EBH_LEB_UNMAPPED;
1928 }
1929 err = release_peb(ebh, pebnr);
1930 if (err)
1931 goto out_unlock;
1932
1933 ebh->lmap[lnr] = EBH_LEB_UNMAPPED;
1934 cv_signal(&ebh->bg_erase.eth_wakeup);
1935 out_unlock:
1936 leb_write_unlock(ebh, lnr);
1937 return err;
1938 }
1939
1940 /**
1941 * ebh_map_leb - maps a PEB to LEB
1942 * @ebh: eraseblock handler
1943 * @lnr: leb number
1944 *
1945 * Returns zero on success, error code in case of fail
1946 */
1947 int
1948 ebh_map_leb(struct chfs_ebh *ebh, int lnr)
1949 {
1950 int err, pebnr, retries = 0;
1951 struct chfs_eb_hdr *ebhdr;
1952
1953 ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
1954
1955 err = leb_write_lock(ebh, lnr);
1956 if (err)
1957 return err;
1958
1959 retry:
1960 pebnr = get_peb(ebh);
1961 if (pebnr < 0) {
1962 err = pebnr;
1963 goto out_unlock;
1964 }
1965
1966 ebh->ops->create_eb_hdr(ebhdr, lnr);
1967
1968 err = ebh->ops->write_eb_hdr(ebh, pebnr, ebhdr);
1969 if (err) {
1970 chfs_warn(
1971 "error writing eraseblock header: LEB %d , PEB %d\n",
1972 lnr, pebnr);
1973 goto write_error;
1974 }
1975
1976 ebh->lmap[lnr] = pebnr;
1977
1978 out_unlock:
1979 leb_write_unlock(ebh, lnr);
1980 return err;
1981
1982 write_error:
1983 err = release_peb(ebh, pebnr);
1984 // max retries (NOW: 2)
1985 if (err || CHFS_MAX_GET_PEB_RETRIES < ++retries) {
1986 leb_write_unlock(ebh, lnr);
1987 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1988 return err;
1989 }
1990 goto retry;
1991 }
1992
1993 /**
1994 * ebh_unmap_leb -
1995 * @ebh: eraseblock handler
1996 * @lnr: leb number
1997 *
1998 * Retruns zero on success, error code in case of fail.
1999 */
2000 int
2001 ebh_unmap_leb(struct chfs_ebh *ebh, int lnr)
2002 {
2003 int err;
2004
2005 if (ebh_is_mapped(ebh, lnr) < 0)
2006 /* If the eraseblock already unmapped */
2007 return 0;
2008
2009 err = ebh_erase_leb(ebh, lnr);
2010
2011 return err;
2012 }
2013
2014 /**
2015 * ebh_is_mapped - check if a PEB is mapped to @lnr
2016 * @ebh: eraseblock handler
2017 * @lnr: leb number
2018 *
2019 * Retruns 0 if the logical eraseblock is mapped, negative error code otherwise.
2020 */
2021 int
2022 ebh_is_mapped(struct chfs_ebh *ebh, int lnr)
2023 {
2024 int err, result;
2025 err = leb_read_lock(ebh, lnr);
2026 if (err)
2027 return err;
2028
2029 result = ebh->lmap[lnr];
2030 leb_read_unlock(ebh, lnr);
2031
2032 return result;
2033 }
2034
2035 /**
2036 * ebh_change_leb - write the LEB to another PEB
2037 * @ebh: eraseblock handler
2038 * @lnr: leb number
2039 * @buf: data to write
2040 * @len: length of data
2041 * Returns zero in case of success, error code in case of fail.
2042 */
2043 int
2044 ebh_change_leb(struct chfs_ebh *ebh, int lnr, char *buf, size_t len,
2045 size_t *retlen)
2046 {
2047 int err, pebnr, pebnr_old, retries = 0;
2048 off_t data_offset;
2049
2050 struct chfs_peb *peb = NULL;
2051 struct chfs_eb_hdr *ebhdr;
2052
2053 if (ebh_is_mapped(ebh, lnr) < 0)
2054 return EBH_LEB_UNMAPPED;
2055
2056 if (len == 0) {
2057 err = ebh_unmap_leb(ebh, lnr);
2058 if (err)
2059 return err;
2060 return ebh_map_leb(ebh, lnr);
2061 }
2062
2063 ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
2064
2065 pebnr_old = ebh->lmap[lnr];
2066
2067 mutex_enter(&ebh->alc_mutex);
2068 err = leb_write_lock(ebh, lnr);
2069 if (err)
2070 goto out_mutex;
2071
2072 if (ebh->ops->mark_eb_hdr_dirty_flash) {
2073 err = ebh->ops->mark_eb_hdr_dirty_flash(ebh, pebnr_old, lnr);
2074 if (err)
2075 goto out_unlock;
2076 }
2077
2078 /* Setting up eraseblock header properties */
2079 ebh->ops->create_eb_hdr(ebhdr, lnr);
2080
2081 retry:
2082 /* Getting a physical eraseblock from the wear leveling system */
2083 pebnr = get_peb(ebh);
2084 if (pebnr < 0) {
2085 leb_write_unlock(ebh, lnr);
2086 mutex_exit(&ebh->alc_mutex);
2087 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
2088 return pebnr;
2089 }
2090
2091 err = ebh->ops->write_eb_hdr(ebh, pebnr, ebhdr);
2092 if (err) {
2093 chfs_warn(
2094 "error writing eraseblock header: LEB %d , PEB %d",
2095 lnr, pebnr);
2096 goto write_error;
2097 }
2098
2099 /* Write out data */
2100 data_offset = ebh->ops->calc_data_offs(ebh, pebnr, 0);
2101 err = flash_write(ebh->flash_dev, data_offset, len, retlen,
2102 (unsigned char *) buf);
2103 if (err) {
2104 chfs_err("error %d while writing %zu bytes to PEB %d:%ju,"
2105 " written %zu bytes",
2106 err, len, pebnr, (uintmax_t)data_offset, *retlen);
2107 goto write_error;
2108 }
2109
2110 ebh->lmap[lnr] = pebnr;
2111
2112 if (ebh->ops->invalidate_eb_hdr) {
2113 err = ebh->ops->invalidate_eb_hdr(ebh, pebnr_old);
2114 if (err)
2115 goto out_unlock;
2116 }
2117 peb = find_peb_in_use(ebh, pebnr_old);
2118 err = release_peb(ebh, peb->pebnr);
2119
2120 out_unlock:
2121 leb_write_unlock(ebh, lnr);
2122
2123 out_mutex:
2124 mutex_exit(&ebh->alc_mutex);
2125 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
2126 kmem_free(peb, sizeof(struct chfs_peb));
2127 return err;
2128
2129 write_error:
2130 err = release_peb(ebh, pebnr);
2131 //max retries (NOW: 2)
2132 if (err || CHFS_MAX_GET_PEB_RETRIES < ++retries) {
2133 leb_write_unlock(ebh, lnr);
2134 mutex_exit(&ebh->alc_mutex);
2135 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
2136 return err;
2137 }
2138 goto retry;
2139 }
2140
2141