xref: /netbsd-src/sys/ufs/chfs/ebh.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: ebh.c,v 1.3 2012/08/10 09:26:58 ttoth Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 Department of Software Engineering,
5  *		      University of Szeged, Hungary
6  * Copyright (C) 2009 Ferenc Havasi <havasi@inf.u-szeged.hu>
7  * Copyright (C) 2009 Zoltan Sogor <weth@inf.u-szeged.hu>
8  * Copyright (C) 2009 David Tengeri <dtengeri@inf.u-szeged.hu>
9  * Copyright (C) 2009 Tamas Toth <ttoth@inf.u-szeged.hu>
10  * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to The NetBSD Foundation
14  * by the Department of Software Engineering, University of Szeged, Hungary
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
26  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #include "ebh.h"
39 
40 /*****************************************************************************/
41 /* Flash specific operations						     */
42 /*****************************************************************************/
43 int nor_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr);
44 int nand_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr);
45 int nor_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset);
46 int nand_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset);
47 int nor_read_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr);
48 int nand_read_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr);
49 int nor_write_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr);
50 int nand_write_eb_hdr(struct chfs_ebh *ebh, int pebnr,struct chfs_eb_hdr *ebhdr);
51 int nor_check_eb_hdr(struct chfs_ebh *ebh, void *buf);
52 int nand_check_eb_hdr(struct chfs_ebh *ebh, void *buf);
53 int nor_mark_eb_hdr_dirty_flash(struct chfs_ebh *ebh, int pebnr, int lid);
54 int nor_invalidate_eb_hdr(struct chfs_ebh *ebh, int pebnr);
55 int mark_eb_hdr_free(struct chfs_ebh *ebh, int pebnr, int ec);
56 
57 int ltree_entry_cmp(struct chfs_ltree_entry *le1, struct chfs_ltree_entry *le2);
58 int peb_in_use_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2);
59 int peb_free_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2);
60 int add_peb_to_erase_queue(struct chfs_ebh *ebh, int pebnr, int ec,struct peb_queue *queue);
61 struct chfs_peb * find_peb_in_use(struct chfs_ebh *ebh, int pebnr);
62 int add_peb_to_free(struct chfs_ebh *ebh, int pebnr, int ec);
63 int add_peb_to_in_use(struct chfs_ebh *ebh, int pebnr, int ec);
64 void erase_callback(struct flash_erase_instruction *ei);
65 int free_peb(struct chfs_ebh *ebh);
66 int release_peb(struct chfs_ebh *ebh, int pebnr);
67 void erase_thread(void *data);
68 static void erase_thread_start(struct chfs_ebh *ebh);
69 static void erase_thread_stop(struct chfs_ebh *ebh);
70 int scan_leb_used_cmp(struct chfs_scan_leb *sleb1, struct chfs_scan_leb *sleb2);
71 int nor_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,struct chfs_eb_hdr *ebhdr, int pebnr, int leb_status);
72 int nor_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
73     int pebnr, struct chfs_eb_hdr *ebhdr);
74 int nand_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,struct chfs_eb_hdr *ebhdr, int pebnr);
75 int nand_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
76     int pebnr, struct chfs_eb_hdr *ebhdr);
77 struct chfs_scan_info *chfs_scan(struct chfs_ebh *ebh);
78 void scan_info_destroy(struct chfs_scan_info *si);
79 int scan_media(struct chfs_ebh *ebh);
80 int get_peb(struct chfs_ebh *ebh);
81 /**
82  * nor_create_eb_hdr - creates an eraseblock header for NOR flash
83  * @ebhdr: ebhdr to set
84  * @lnr: LEB number
85  */
86 int
87 nor_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr)
88 {
89 	ebhdr->u.nor_hdr.lid = htole32(lnr);
90 	return 0;
91 }
92 
93 /**
94  * nand_create_eb_hdr - creates an eraseblock header for NAND flash
95  * @ebhdr: ebhdr to set
96  * @lnr: LEB number
97  */
98 int
99 nand_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr)
100 {
101 	ebhdr->u.nand_hdr.lid = htole32(lnr);
102 	return 0;
103 }
104 
105 /**
106  * nor_calc_data_offs - calculates data offset on NOR flash
107  * @ebh: chfs eraseblock handler
108  * @pebnr: eraseblock number
109  * @offset: offset within the eraseblock
110  */
111 int
112 nor_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset)
113 {
114 	return pebnr * ebh->flash_if->erasesize + offset +
115 	    CHFS_EB_EC_HDR_SIZE + CHFS_EB_HDR_NOR_SIZE;
116 }
117 
118 /**
119  * nand_calc_data_offs - calculates data offset on NAND flash
120  * @ebh: chfs eraseblock handler
121  * @pebnr: eraseblock number
122  * @offset: offset within the eraseblock
123  */
124 int
125 nand_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset)
126 {
127 	return pebnr * ebh->flash_if->erasesize + offset +
128 	    2 * ebh->flash_if->page_size;
129 }
130 
131 /**
132  * nor_read_eb_hdr - read ereaseblock header from NOR flash
133  *
134  * @ebh: chfs eraseblock handler
135  * @pebnr: eraseblock number
136  * @ebhdr: whereto store the data
137  *
138  * Reads the eraseblock header from media.
139  * Returns zero in case of success, error code in case of fail.
140  */
141 int
142 nor_read_eb_hdr(struct chfs_ebh *ebh,
143     int pebnr, struct chfs_eb_hdr *ebhdr)
144 {
145 	int ret;
146 	size_t retlen;
147 	off_t ofs = pebnr * ebh->flash_if->erasesize;
148 
149 	KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
150 
151 	ret = flash_read(ebh->flash_dev,
152 	    ofs, CHFS_EB_EC_HDR_SIZE,
153 	    &retlen, (unsigned char *) &ebhdr->ec_hdr);
154 
155 	if (ret || retlen != CHFS_EB_EC_HDR_SIZE)
156 		return ret;
157 
158 	ofs += CHFS_EB_EC_HDR_SIZE;
159 	ret = flash_read(ebh->flash_dev,
160 	    ofs, CHFS_EB_HDR_NOR_SIZE,
161 	    &retlen, (unsigned char *) &ebhdr->u.nor_hdr);
162 
163 	if (ret || retlen != CHFS_EB_HDR_NOR_SIZE)
164 		return ret;
165 
166 	return 0;
167 }
168 
169 /**
170  * nand_read_eb_hdr - read ereaseblock header from NAND flash
171  *
172  * @ebh: chfs eraseblock handler
173  * @pebnr: eraseblock number
174  * @ebhdr: whereto store the data
175  *
176  * Reads the eraseblock header from media. It is on the first two page.
177  * Returns zero in case of success, error code in case of fail.
178  */
179 int
180 nand_read_eb_hdr(struct chfs_ebh *ebh, int pebnr,
181     struct chfs_eb_hdr *ebhdr)
182 {
183 	int ret;
184 	size_t retlen;
185 	off_t ofs;
186 
187 	KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
188 
189 	/* Read erase counter header from the first page. */
190 	ofs = pebnr * ebh->flash_if->erasesize;
191 	ret = flash_read(ebh->flash_dev,
192 	    ofs, CHFS_EB_EC_HDR_SIZE, &retlen,
193 	    (unsigned char *) &ebhdr->ec_hdr);
194 	if (ret || retlen != CHFS_EB_EC_HDR_SIZE)
195 		return ret;
196 
197 	/* Read NAND eraseblock header from the second page */
198 	ofs += ebh->flash_if->page_size;
199 	ret = flash_read(ebh->flash_dev,
200 	    ofs, CHFS_EB_HDR_NAND_SIZE, &retlen,
201 	    (unsigned char *) &ebhdr->u.nand_hdr);
202 	if (ret || retlen != CHFS_EB_HDR_NAND_SIZE)
203 		return ret;
204 
205 	return 0;
206 }
207 
208 /**
209  * nor_write_eb_hdr - write ereaseblock header to NOR flash
210  *
211  * @ebh: chfs eraseblock handler
212  * @pebnr: eraseblock number whereto write
213  * @ebh: ebh to write
214  *
215  * Writes the eraseblock header to media.
216  * Returns zero in case of success, error code in case of fail.
217  */
218 int
219 nor_write_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr)
220 {
221 	int ret, crc;
222 	size_t retlen;
223 
224 	off_t ofs = pebnr * ebh->flash_if->erasesize + CHFS_EB_EC_HDR_SIZE;
225 
226 	ebhdr->u.nor_hdr.lid = ebhdr->u.nor_hdr.lid
227 	    | htole32(CHFS_LID_NOT_DIRTY_BIT);
228 
229 	crc = crc32(0, (uint8_t *)&ebhdr->u.nor_hdr + 4,
230 	    CHFS_EB_HDR_NOR_SIZE - 4);
231 	ebhdr->u.nand_hdr.crc = htole32(crc);
232 
233 	KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
234 
235 	ret = flash_write(ebh->flash_dev,
236 	    ofs, CHFS_EB_HDR_NOR_SIZE, &retlen,
237 	    (unsigned char *) &ebhdr->u.nor_hdr);
238 
239 	if (ret || retlen != CHFS_EB_HDR_NOR_SIZE)
240 		return ret;
241 
242 	return 0;
243 }
244 
245 /**
246  * nand_write_eb_hdr - write ereaseblock header to NAND flash
247  *
248  * @ebh: chfs eraseblock handler
249  * @pebnr: eraseblock number whereto write
250  * @ebh: ebh to write
251  *
252  * Writes the eraseblock header to media.
253  * Returns zero in case of success, error code in case of fail.
254  */
255 int
256 nand_write_eb_hdr(struct chfs_ebh *ebh, int pebnr,
257     struct chfs_eb_hdr *ebhdr)
258 {
259 	int ret, crc;
260 	size_t retlen;
261 	flash_off_t ofs;
262 
263 	KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
264 
265 	ofs = pebnr * ebh->flash_if->erasesize +
266 	    ebh->flash_if->page_size;
267 
268 	ebhdr->u.nand_hdr.serial = htole64(++(*ebh->max_serial));
269 
270 	crc = crc32(0, (uint8_t *)&ebhdr->u.nand_hdr + 4,
271 	    CHFS_EB_HDR_NAND_SIZE - 4);
272 	ebhdr->u.nand_hdr.crc = htole32(crc);
273 
274 	ret = flash_write(ebh->flash_dev, ofs,
275 	    CHFS_EB_HDR_NAND_SIZE, &retlen,
276 	    (unsigned char *) &ebhdr->u.nand_hdr);
277 
278 	if (ret || retlen != CHFS_EB_HDR_NAND_SIZE)
279 		return ret;
280 
281 	return 0;
282 }
283 
284 /**
285  * nor_check_eb_hdr - check ereaseblock header read from NOR flash
286  *
287  * @ebh: chfs eraseblock handler
288  * @buf: eraseblock header to check
289  *
290  * Returns eraseblock header status.
291  */
292 int
293 nor_check_eb_hdr(struct chfs_ebh *ebh, void *buf)
294 {
295 	uint32_t magic, crc, hdr_crc;
296 	struct chfs_eb_hdr *ebhdr = buf;
297 	le32 lid_save;
298 
299 	//check is there a header
300 	if (check_pattern((void *) &ebhdr->ec_hdr,
301 		0xFF, 0, CHFS_EB_EC_HDR_SIZE)) {
302 		dbg_ebh("no header found\n");
303 		return EBHDR_LEB_NO_HDR;
304 	}
305 
306 	// check magic
307 	magic = le32toh(ebhdr->ec_hdr.magic);
308 	if (magic != CHFS_MAGIC_BITMASK) {
309 		dbg_ebh("bad magic bitmask(exp: %x found %x)\n",
310 		    CHFS_MAGIC_BITMASK, magic);
311 		return EBHDR_LEB_BADMAGIC;
312 	}
313 
314 	// check CRC_EC
315 	hdr_crc = le32toh(ebhdr->ec_hdr.crc_ec);
316 	crc = crc32(0, (uint8_t *) &ebhdr->ec_hdr + 8, 4);
317 	if (hdr_crc != crc) {
318 		dbg_ebh("bad crc_ec found\n");
319 		return EBHDR_LEB_BADCRC;
320 	}
321 
322 	/* check if the PEB is free: magic, crc_ec and erase_cnt is good and
323 	 * everything else is FFF..
324 	 */
325 	if (check_pattern((void *) &ebhdr->u.nor_hdr, 0xFF, 0,
326 		CHFS_EB_HDR_NOR_SIZE)) {
327 		dbg_ebh("free peb found\n");
328 		return EBHDR_LEB_FREE;
329 	}
330 
331 	// check invalidated (CRC == LID == 0)
332 	if (ebhdr->u.nor_hdr.crc == 0 && ebhdr->u.nor_hdr.lid == 0) {
333 		dbg_ebh("invalidated ebhdr found\n");
334 		return EBHDR_LEB_INVALIDATED;
335 	}
336 
337 	// check CRC
338 	hdr_crc = le32toh(ebhdr->u.nor_hdr.crc);
339 	lid_save = ebhdr->u.nor_hdr.lid;
340 
341 	// mark lid as not dirty for crc calc
342 	ebhdr->u.nor_hdr.lid = ebhdr->u.nor_hdr.lid | htole32(
343 		CHFS_LID_NOT_DIRTY_BIT);
344 	crc = crc32(0, (uint8_t *) &ebhdr->u.nor_hdr + 4,
345 	    CHFS_EB_HDR_NOR_SIZE - 4);
346 	// restore the original lid value in ebh
347 	ebhdr->u.nor_hdr.lid = lid_save;
348 
349 	if (crc != hdr_crc) {
350 		dbg_ebh("bad crc found\n");
351 		return EBHDR_LEB_BADCRC;
352 	}
353 
354 	// check dirty
355 	if (!(le32toh(lid_save) & CHFS_LID_NOT_DIRTY_BIT)) {
356 		dbg_ebh("dirty ebhdr found\n");
357 		return EBHDR_LEB_DIRTY;
358 	}
359 
360 	return EBHDR_LEB_OK;
361 }
362 
363 /**
364  * nand_check_eb_hdr - check ereaseblock header read from NAND flash
365  *
366  * @ebh: chfs eraseblock handler
367  * @buf: eraseblock header to check
368  *
369  * Returns eraseblock header status.
370  */
371 int
372 nand_check_eb_hdr(struct chfs_ebh *ebh, void *buf)
373 {
374 	uint32_t magic, crc, hdr_crc;
375 	struct chfs_eb_hdr *ebhdr = buf;
376 
377 	//check is there a header
378 	if (check_pattern((void *) &ebhdr->ec_hdr,
379 		0xFF, 0, CHFS_EB_EC_HDR_SIZE)) {
380 		dbg_ebh("no header found\n");
381 		return EBHDR_LEB_NO_HDR;
382 	}
383 
384 	// check magic
385 	magic = le32toh(ebhdr->ec_hdr.magic);
386 	if (magic != CHFS_MAGIC_BITMASK) {
387 		dbg_ebh("bad magic bitmask(exp: %x found %x)\n",
388 		    CHFS_MAGIC_BITMASK, magic);
389 		return EBHDR_LEB_BADMAGIC;
390 	}
391 
392 	// check CRC_EC
393 	hdr_crc = le32toh(ebhdr->ec_hdr.crc_ec);
394 	crc = crc32(0, (uint8_t *) &ebhdr->ec_hdr + 8, 4);
395 	if (hdr_crc != crc) {
396 		dbg_ebh("bad crc_ec found\n");
397 		return EBHDR_LEB_BADCRC;
398 	}
399 
400 	/* check if the PEB is free: magic, crc_ec and erase_cnt is good and
401 	 * everything else is FFF..
402 	 */
403 	if (check_pattern((void *) &ebhdr->u.nand_hdr, 0xFF, 0,
404 		CHFS_EB_HDR_NAND_SIZE)) {
405 		dbg_ebh("free peb found\n");
406 		return EBHDR_LEB_FREE;
407 	}
408 
409 	// check CRC
410 	hdr_crc = le32toh(ebhdr->u.nand_hdr.crc);
411 
412 	crc = crc32(0, (uint8_t *) &ebhdr->u.nand_hdr + 4,
413 	    CHFS_EB_HDR_NAND_SIZE - 4);
414 
415 	if (crc != hdr_crc) {
416 		dbg_ebh("bad crc found\n");
417 		return EBHDR_LEB_BADCRC;
418 	}
419 
420 	return EBHDR_LEB_OK;
421 }
422 
423 /**
424  * nor_mark_eb_hdr_dirty_flash- mark ereaseblock header dirty on NOR flash
425  *
426  * @ebh: chfs eraseblock handler
427  * @pebnr: eraseblock number
428  * @lid: leb id (it's bit number 31 will be set to 0)
429  *
430  * It pulls the CHFS_LID_NOT_DIRTY_BIT to zero on flash.
431  *
432  * Returns zero in case of success, error code in case of fail.
433  */
434 int
435 nor_mark_eb_hdr_dirty_flash(struct chfs_ebh *ebh, int pebnr, int lid)
436 {
437 	int ret;
438 	size_t retlen;
439 	off_t ofs;
440 
441 	/* mark leb id dirty */
442 	lid = htole32(lid & CHFS_LID_DIRTY_BIT_MASK);
443 
444 	/* calculate position */
445 	ofs = pebnr * ebh->flash_if->erasesize + CHFS_EB_EC_HDR_SIZE
446 	    + CHFS_GET_MEMBER_POS(struct chfs_nor_eb_hdr , lid);
447 
448 	ret = flash_write(ebh->flash_dev, ofs, sizeof(lid), &retlen,
449 	    (unsigned char *) &lid);
450 	if (ret || retlen != sizeof(lid)) {
451 		chfs_err("can't mark peb dirty");
452 		return ret;
453 	}
454 
455 	return 0;
456 }
457 
458 /**
459  * nor_invalidate_eb_hdr - invalidate ereaseblock header on NOR flash
460  *
461  * @ebh: chfs eraseblock handler
462  * @pebnr: eraseblock number
463  *
464  * Sets crc and lip field to zero.
465  * Returns zero in case of success, error code in case of fail.
466  */
467 int
468 nor_invalidate_eb_hdr(struct chfs_ebh *ebh, int pebnr)
469 {
470 	int ret;
471 	size_t retlen;
472 	off_t ofs;
473 	char zero_buf[CHFS_INVALIDATE_SIZE];
474 
475 	/* fill with zero */
476 	memset(zero_buf, 0x0, CHFS_INVALIDATE_SIZE);
477 
478 	/* calculate position (!!! lid is directly behind crc !!!) */
479 	ofs = pebnr * ebh->flash_if->erasesize + CHFS_EB_EC_HDR_SIZE
480 	    + CHFS_GET_MEMBER_POS(struct chfs_nor_eb_hdr, crc);
481 
482 	ret = flash_write(ebh->flash_dev,
483 	    ofs, CHFS_INVALIDATE_SIZE, &retlen,
484 	    (unsigned char *) &zero_buf);
485 	if (ret || retlen != CHFS_INVALIDATE_SIZE) {
486 		chfs_err("can't invalidate peb");
487 		return ret;
488 	}
489 
490 	return 0;
491 }
492 
493 /**
494  * mark_eb_hdr_free - free ereaseblock header on NOR or NAND flash
495  *
496  * @ebh: chfs eraseblock handler
497  * @pebnr: eraseblock number
498  * @ec: erase counter of PEB
499  *
500  * Write out the magic and erase counter to the physical eraseblock.
501  * Returns zero in case of success, error code in case of fail.
502  */
503 int
504 mark_eb_hdr_free(struct chfs_ebh *ebh, int pebnr, int ec)
505 {
506 	int ret, crc;
507 	size_t retlen;
508 	off_t ofs;
509 	struct chfs_eb_hdr *ebhdr;
510 	ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
511 
512 	ebhdr->ec_hdr.magic = htole32(CHFS_MAGIC_BITMASK);
513 	ebhdr->ec_hdr.erase_cnt = htole32(ec);
514 	crc = crc32(0, (uint8_t *) &ebhdr->ec_hdr + 8, 4);
515 	ebhdr->ec_hdr.crc_ec = htole32(crc);
516 
517 	ofs = pebnr * ebh->flash_if->erasesize;
518 
519 	KASSERT(sizeof(ebhdr->ec_hdr) == CHFS_EB_EC_HDR_SIZE);
520 
521 	ret = flash_write(ebh->flash_dev,
522 	    ofs, CHFS_EB_EC_HDR_SIZE, &retlen,
523 	    (unsigned char *) &ebhdr->ec_hdr);
524 
525 	if (ret || retlen != CHFS_EB_EC_HDR_SIZE) {
526 		chfs_err("can't mark peb as free: %d\n", pebnr);
527 		kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
528 		return ret;
529 	}
530 
531 	kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
532 	return 0;
533 }
534 
535 /*****************************************************************************/
536 /* End of Flash specific operations					     */
537 /*****************************************************************************/
538 
539 /*****************************************************************************/
540 /* Lock Tree								     */
541 /*****************************************************************************/
542 
543 int
544 ltree_entry_cmp(struct chfs_ltree_entry *le1,
545     struct chfs_ltree_entry *le2)
546 {
547 	return (le1->lnr - le2->lnr);
548 }
549 
550 /* Generate functions for Lock tree's red-black tree */
551 RB_PROTOTYPE( ltree_rbtree, chfs_ltree_entry, rb, ltree_entry_cmp);
552 RB_GENERATE( ltree_rbtree, chfs_ltree_entry, rb, ltree_entry_cmp);
553 
554 
555 /**
556  * ltree_lookup - looks up a logical eraseblock in the lock tree
557  * @ebh: chfs eraseblock handler
558  * @lid: identifier of the logical eraseblock
559  *
560  * This function returns a pointer to the wanted &struct chfs_ltree_entry
561  * if the logical eraseblock is in the lock tree, so it is locked, NULL
562  * otherwise.
563  * @ebh->ltree_lock has to be locked!
564  */
565 static struct chfs_ltree_entry *
566 ltree_lookup(struct chfs_ebh *ebh, int lnr)
567 {
568 	struct chfs_ltree_entry le, *result;
569 	le.lnr = lnr;
570 	result = RB_FIND(ltree_rbtree, &ebh->ltree, &le);
571 	return result;
572 }
573 
574 /**
575  * ltree_add_entry - add an entry to the lock tree
576  * @ebh: chfs eraseblock handler
577  * @lnr: identifier of the logical eraseblock
578  *
579  * This function adds a new logical eraseblock entry identified with @lnr to the
580  * lock tree. If the entry is already in the tree, it increases the user
581  * counter.
582  * Returns NULL if can not allocate memory for lock tree entry, or a pointer
583  * to the inserted entry otherwise.
584  */
585 static struct chfs_ltree_entry *
586 ltree_add_entry(struct chfs_ebh *ebh, int lnr)
587 {
588 	struct chfs_ltree_entry *le, *result;
589 
590 	le = kmem_alloc(sizeof(struct chfs_ltree_entry), KM_SLEEP);
591 
592 	le->lnr = lnr;
593 	le->users = 1;
594 	rw_init(&le->mutex);
595 
596 	//dbg_ebh("enter ltree lock\n");
597 	mutex_enter(&ebh->ltree_lock);
598 	//dbg_ebh("insert\n");
599 	result = RB_INSERT(ltree_rbtree, &ebh->ltree, le);
600 	//dbg_ebh("inserted\n");
601 	if (result) {
602 		//The entry is already in the tree
603 		result->users++;
604 		kmem_free(le, sizeof(struct chfs_ltree_entry));
605 	}
606 	else {
607 		result = le;
608 	}
609 	mutex_exit(&ebh->ltree_lock);
610 
611 	return result;
612 }
613 
614 /**
615  * leb_read_lock - lock a logical eraseblock for read
616  * @ebh: chfs eraseblock handler
617  * @lnr: identifier of the logical eraseblock
618  *
619  * Returns zero in case of success, error code in case of fail.
620  */
621 static int
622 leb_read_lock(struct chfs_ebh *ebh, int lnr)
623 {
624 	struct chfs_ltree_entry *le;
625 
626 	le = ltree_add_entry(ebh, lnr);
627 	if (!le)
628 		return ENOMEM;
629 
630 	rw_enter(&le->mutex, RW_READER);
631 	return 0;
632 }
633 
634 /**
635  * leb_read_unlock - unlock a logical eraseblock from read
636  * @ebh: chfs eraseblock handler
637  * @lnr: identifier of the logical eraseblock
638  *
639  * This function unlocks a logical eraseblock from read and delete it from the
640  * lock tree is there are no more users of it.
641  */
642 static void
643 leb_read_unlock(struct chfs_ebh *ebh, int lnr)
644 {
645 	struct chfs_ltree_entry *le;
646 
647 	mutex_enter(&ebh->ltree_lock);
648 	//dbg_ebh("LOCK: ebh->ltree_lock spin locked in leb_read_unlock()\n");
649 	le = ltree_lookup(ebh, lnr);
650 	if (!le)
651 		goto out;
652 
653 	le->users -= 1;
654 	KASSERT(le->users >= 0);
655 	rw_exit(&le->mutex);
656 	if (le->users == 0) {
657 		le = RB_REMOVE(ltree_rbtree, &ebh->ltree, le);
658 		if (le) {
659 			KASSERT(!rw_lock_held(&le->mutex));
660 			rw_destroy(&le->mutex);
661 
662 			kmem_free(le, sizeof(struct chfs_ltree_entry));
663 		}
664 	}
665 
666 out:
667 	mutex_exit(&ebh->ltree_lock);
668 	//dbg_ebh("UNLOCK: ebh->ltree_lock spin unlocked in leb_read_unlock()\n");
669 }
670 
671 /**
672  * leb_write_lock - lock a logical eraseblock for write
673  * @ebh: chfs eraseblock handler
674  * @lnr: identifier of the logical eraseblock
675  *
676  * Returns zero in case of success, error code in case of fail.
677  */
678 static int
679 leb_write_lock(struct chfs_ebh *ebh, int lnr)
680 {
681 	struct chfs_ltree_entry *le;
682 
683 	le = ltree_add_entry(ebh, lnr);
684 	if (!le)
685 		return ENOMEM;
686 
687 	rw_enter(&le->mutex, RW_WRITER);
688 	return 0;
689 }
690 
691 /**
692  * leb_write_unlock - unlock a logical eraseblock from write
693  * @ebh: chfs eraseblock handler
694  * @lnr: identifier of the logical eraseblock
695  *
696  * This function unlocks a logical eraseblock from write and delete it from the
697  * lock tree is there are no more users of it.
698  */
699 static void
700 leb_write_unlock(struct chfs_ebh *ebh, int lnr)
701 {
702 	struct chfs_ltree_entry *le;
703 
704 	mutex_enter(&ebh->ltree_lock);
705 	//dbg_ebh("LOCK: ebh->ltree_lock spin locked in leb_write_unlock()\n");
706 	le = ltree_lookup(ebh, lnr);
707 	if (!le)
708 		goto out;
709 
710 	le->users -= 1;
711 	KASSERT(le->users >= 0);
712 	rw_exit(&le->mutex);
713 	if (le->users == 0) {
714 		RB_REMOVE(ltree_rbtree, &ebh->ltree, le);
715 
716 		KASSERT(!rw_lock_held(&le->mutex));
717 		rw_destroy(&le->mutex);
718 
719 		kmem_free(le, sizeof(struct chfs_ltree_entry));
720 	}
721 
722 out:
723 	mutex_exit(&ebh->ltree_lock);
724 	//dbg_ebh("UNLOCK: ebh->ltree_lock spin unlocked in leb_write_unlock()\n");
725 }
726 
727 /*****************************************************************************/
728 /* End of Lock Tree							     */
729 /*****************************************************************************/
730 
731 /*****************************************************************************/
732 /* Erase related operations						     */
733 /*****************************************************************************/
734 
735 /**
736  * If the first argument is smaller than the second, the function
737  * returns a value smaller than zero. If they are equal, the function re-
738  * turns zero. Otherwise, it should return a value greater than zero.
739  */
740 int
741 peb_in_use_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2)
742 {
743 	return (peb1->pebnr - peb2->pebnr);
744 }
745 
746 int
747 peb_free_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2)
748 {
749 	int comp;
750 
751 	comp = peb1->erase_cnt - peb2->erase_cnt;
752 	if (0 == comp)
753 		comp = peb1->pebnr - peb2->pebnr;
754 
755 	return comp;
756 }
757 
758 /* Generate functions for in use PEB's red-black tree */
759 RB_PROTOTYPE(peb_in_use_rbtree, chfs_peb, u.rb, peb_in_use_cmp);
760 RB_GENERATE(peb_in_use_rbtree, chfs_peb, u.rb, peb_in_use_cmp);
761 RB_PROTOTYPE(peb_free_rbtree, chfs_peb, u.rb, peb_free_cmp);
762 RB_GENERATE(peb_free_rbtree, chfs_peb, u.rb, peb_free_cmp);
763 
764 /**
765  * add_peb_to_erase_queue: adds a PEB to to_erase/fully_erased queue
766  * @ebh - chfs eraseblock handler
767  * @pebnr - physical eraseblock's number
768  * @ec - erase counter of PEB
769  * @queue: the queue to add to
770  *
771  * This function adds a PEB to the erase queue specified by @queue.
772  * The @ebh->erase_lock must be locked before using this.
773  * Returns zero in case of success, error code in case of fail.
774  */
775 int
776 add_peb_to_erase_queue(struct chfs_ebh *ebh, int pebnr, int ec,
777     struct peb_queue *queue)
778 {
779 	struct chfs_peb *peb;
780 
781 	peb = kmem_alloc(sizeof(struct chfs_peb), KM_SLEEP);
782 
783 	peb->erase_cnt = ec;
784 	peb->pebnr = pebnr;
785 
786 	TAILQ_INSERT_TAIL(queue, peb, u.queue);
787 
788 	return 0;
789 
790 }
791 //TODO
792 /**
793  * find_peb_in_use - looks up a PEB in the RB-tree of used blocks
794  * @ebh - chfs eraseblock handler
795  *
796  * This function returns a pointer to the PEB found in the tree,
797  * NULL otherwise.
798  * The @ebh->erase_lock must be locked before using this.
799  */
800 struct chfs_peb *
801 find_peb_in_use(struct chfs_ebh *ebh, int pebnr)
802 {
803 	struct chfs_peb peb, *result;
804 	peb.pebnr = pebnr;
805 	result = RB_FIND(peb_in_use_rbtree, &ebh->in_use, &peb);
806 	return result;
807 }
808 
809 /**
810  * add_peb_to_free - adds a PEB to the RB-tree of free PEBs
811  * @ebh - chfs eraseblock handler
812  * @pebnr - physical eraseblock's number
813  * @ec - erase counter of PEB
814  *
815  *
816  * This function adds a physical eraseblock to the RB-tree of free PEBs
817  * stored in the @ebh. The key is the erase counter and pebnr.
818  * The @ebh->erase_lock must be locked before using this.
819  * Returns zero in case of success, error code in case of fail.
820  */
821 int
822 add_peb_to_free(struct chfs_ebh *ebh, int pebnr, int ec)
823 {
824 	struct chfs_peb *peb, *result;
825 
826 	peb = kmem_alloc(sizeof(struct chfs_peb), KM_SLEEP);
827 
828 	peb->erase_cnt = ec;
829 	peb->pebnr = pebnr;
830 	result = RB_INSERT(peb_free_rbtree, &ebh->free, peb);
831 	if (result)
832 		return 1;
833 
834 	return 0;
835 }
836 
837 /**
838  * add_peb_to_in_use - adds a PEB to the RB-tree of used PEBs
839  * @ebh - chfs eraseblock handler
840  * @pebnr - physical eraseblock's number
841  * @ec - erase counter of PEB
842  *
843  *
844  * This function adds a physical eraseblock to the RB-tree of used PEBs
845  * stored in the @ebh. The key is pebnr.
846  * The @ebh->erase_lock must be locked before using this.
847  * Returns zero in case of success, error code in case of fail.
848  */
849 int
850 add_peb_to_in_use(struct chfs_ebh *ebh, int pebnr, int ec)
851 {
852 	struct chfs_peb *peb, *result;
853 
854 	peb = kmem_alloc(sizeof(struct chfs_peb), KM_SLEEP);
855 
856 	peb->erase_cnt = ec;
857 	peb->pebnr = pebnr;
858 	result = RB_INSERT(peb_in_use_rbtree, &ebh->in_use, peb);
859 	if (result)
860 		return 1;
861 
862 	return 0;
863 }
864 
865 /**
866  * erase_callback - callback function for flash erase
867  * @ei: erase information
868  */
869 void
870 erase_callback(struct flash_erase_instruction *ei)
871 {
872 	int err;
873 	struct chfs_erase_info_priv *priv = (void *) ei->ei_priv;
874 	//dbg_ebh("ERASE_CALLBACK() CALLED\n");
875 	struct chfs_ebh *ebh = priv->ebh;
876 	struct chfs_peb *peb = priv->peb;
877 
878 	peb->erase_cnt += 1;
879 
880 	if (ei->ei_state == FLASH_ERASE_DONE) {
881 
882 		/* Write out erase counter */
883 		err = ebh->ops->mark_eb_hdr_free(ebh,
884 		    peb->pebnr, peb->erase_cnt);
885 		if (err) {
886 			/* cannot mark PEB as free,so erase it again */
887 			chfs_err(
888 				"cannot mark eraseblock as free, PEB: %d\n",
889 				peb->pebnr);
890 			mutex_enter(&ebh->erase_lock);
891 			/*dbg_ebh("LOCK: ebh->erase_lock spin locked in erase_callback() "
892 			  "after mark ebhdr free\n");*/
893 			add_peb_to_erase_queue(ebh, peb->pebnr, peb->erase_cnt,
894 			    &ebh->to_erase);
895 			mutex_exit(&ebh->erase_lock);
896 			/*dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_callback() "
897 			  "after mark ebhdr free\n");*/
898 			kmem_free(peb, sizeof(struct chfs_peb));
899 			return;
900 		}
901 
902 		mutex_enter(&ebh->erase_lock);
903 		/*dbg_ebh("LOCK: ebh->erase_lock spin locked in erase_callback()\n");*/
904 		err = add_peb_to_free(ebh, peb->pebnr, peb->erase_cnt);
905 		mutex_exit(&ebh->erase_lock);
906 		/*dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_callback()\n");*/
907 		kmem_free(peb, sizeof(struct chfs_peb));
908 	} else {
909 		/*
910 		 * Erase is finished, but there was a problem,
911 		 * so erase PEB again
912 		 */
913 		chfs_err("erase failed, state is: 0x%x\n", ei->ei_state);
914 		add_peb_to_erase_queue(ebh, peb->pebnr, peb->erase_cnt, &ebh->to_erase);
915 		kmem_free(peb, sizeof(struct chfs_peb));
916 	}
917 }
918 
919 /**
920  * free_peb: free a PEB
921  * @ebh: chfs eraseblock handler
922  *
923  * This function erases the first physical eraseblock from one of the erase
924  * lists and adds to the RB-tree of free PEBs.
925  * Returns zero in case of succes, error code in case of fail.
926  */
927 int
928 free_peb(struct chfs_ebh *ebh)
929 {
930 	int err, retries = 0;
931 	off_t ofs;
932 	struct chfs_peb *peb = NULL;
933 	struct flash_erase_instruction *ei;
934 
935 	KASSERT(mutex_owned(&ebh->erase_lock));
936 
937 	if (!TAILQ_EMPTY(&ebh->fully_erased)) {
938 		//dbg_ebh("[FREE PEB] got a fully erased block\n");
939 		peb = TAILQ_FIRST(&ebh->fully_erased);
940 		TAILQ_REMOVE(&ebh->fully_erased, peb, u.queue);
941 		err = ebh->ops->mark_eb_hdr_free(ebh,
942 		    peb->pebnr, peb->erase_cnt);
943 		if (err) {
944 			goto out_free;
945 		}
946 		err = add_peb_to_free(ebh, peb->pebnr, peb->erase_cnt);
947 		goto out_free;
948 	}
949 	/* Erase PEB */
950 	//dbg_ebh("[FREE PEB] eraseing a block\n");
951 	peb = TAILQ_FIRST(&ebh->to_erase);
952 	TAILQ_REMOVE(&ebh->to_erase, peb, u.queue);
953 	mutex_exit(&ebh->erase_lock);
954 	//dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in free_peb()\n");
955 	ofs = peb->pebnr * ebh->flash_if->erasesize;
956 
957 	/* XXX where do we free this? */
958 	ei = kmem_alloc(sizeof(struct flash_erase_instruction)
959 	    + sizeof(struct chfs_erase_info_priv), KM_SLEEP);
960 retry:
961 	memset(ei, 0, sizeof(*ei));
962 
963 //	ei->ei_if = ebh->flash_if;
964 	ei->ei_addr = ofs;
965 	ei->ei_len = ebh->flash_if->erasesize;
966 	ei->ei_callback = erase_callback;
967 	ei->ei_priv = (unsigned long) (&ei[1]);
968 
969 	((struct chfs_erase_info_priv *) ei->ei_priv)->ebh = ebh;
970 	((struct chfs_erase_info_priv *) ei->ei_priv)->peb = peb;
971 
972 	err = flash_erase(ebh->flash_dev, ei);
973 	dbg_ebh("erased peb: %d\n", peb->pebnr);
974 
975 	/* einval would mean we did something wrong */
976 	KASSERT(err != EINVAL);
977 
978 	if (err) {
979 		dbg_ebh("errno: %d, ei->ei_state: %d\n", err, ei->ei_state);
980 		if (CHFS_MAX_GET_PEB_RETRIES < ++retries &&
981 		    ei->ei_state == FLASH_ERASE_FAILED) {
982 			/* The block went bad mark it */
983 			dbg_ebh("ebh markbad! 0x%jx\n", (uintmax_t )ofs);
984 			err = flash_block_markbad(ebh->flash_dev, ofs);
985 			if (!err) {
986 				ebh->peb_nr--;
987 			}
988 
989 			goto out;
990 		}
991 		chfs_err("can not erase PEB: %d, try again\n", peb->pebnr);
992 		goto retry;
993 	}
994 
995 out:
996 	/* lock the erase_lock, because it was locked
997 	 * when the function was called */
998 	mutex_enter(&ebh->erase_lock);
999 	return err;
1000 
1001 out_free:
1002 	kmem_free(peb, sizeof(struct chfs_peb));
1003 	return err;
1004 }
1005 
1006 /**
1007  * release_peb - schedule an erase for the PEB
1008  * @ebh: chfs eraseblock handler
1009  * @pebnr: physical eraseblock number
1010  *
1011  * This function get the peb identified by @pebnr from the in_use RB-tree of
1012  * @ebh, removes it and schedule an erase for it.
1013  *
1014  * Returns zero on success, error code in case of fail.
1015  */
1016 int
1017 release_peb(struct chfs_ebh *ebh, int pebnr)
1018 {
1019 	int err = 0;
1020 	struct chfs_peb *peb;
1021 
1022 	mutex_enter(&ebh->erase_lock);
1023 
1024 	//dbg_ebh("LOCK: ebh->erase_lock spin locked in release_peb()\n");
1025 	peb = find_peb_in_use(ebh, pebnr);
1026 	if (!peb) {
1027 		chfs_err("LEB is mapped, but is not in the 'in_use' "
1028 		    "tree of ebh\n");
1029 		goto out_unlock;
1030 	}
1031 	err = add_peb_to_erase_queue(ebh, peb->pebnr, peb->erase_cnt,
1032 	    &ebh->to_erase);
1033 
1034 	if (err)
1035 		goto out_unlock;
1036 
1037 	RB_REMOVE(peb_in_use_rbtree, &ebh->in_use, peb);
1038 out_unlock:
1039 	mutex_exit(&ebh->erase_lock);
1040 	//dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in release_peb()"
1041 	//		" at out_unlock\n");
1042 	return err;
1043 }
1044 
1045 /**
1046  * erase_thread - background thread for erasing PEBs
1047  * @data: pointer to the eraseblock handler
1048  */
1049 /*void
1050   erase_thread(void *data)
1051   {
1052   struct chfs_ebh *ebh = data;
1053 
1054   dbg_ebh("erase thread started\n");
1055   while (ebh->bg_erase.eth_running) {
1056   int err;
1057 
1058   mutex_enter(&ebh->erase_lock);
1059   dbg_ebh("LOCK: ebh->erase_lock spin locked in erase_thread()\n");
1060   if (TAILQ_EMPTY(&ebh->to_erase) && TAILQ_EMPTY(&ebh->fully_erased)) {
1061   dbg_ebh("thread has nothing to do\n");
1062   mutex_exit(&ebh->erase_lock);
1063   mutex_enter(&ebh->bg_erase.eth_thread_mtx);
1064   cv_timedwait_sig(&ebh->bg_erase.eth_wakeup,
1065   &ebh->bg_erase.eth_thread_mtx, mstohz(100));
1066   mutex_exit(&ebh->bg_erase.eth_thread_mtx);
1067 
1068   dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_thread()\n");
1069   continue;
1070   }
1071   mutex_exit(&ebh->erase_lock);
1072   dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_thread()\n");
1073 
1074   err = free_peb(ebh);
1075   if (err)
1076   chfs_err("freeing PEB failed in the background thread: %d\n", err);
1077 
1078   }
1079   dbg_ebh("erase thread stopped\n");
1080   kthread_exit(0);
1081   }*/
1082 
1083 /**
1084  * erase_thread - background thread for erasing PEBs
1085  * @data: pointer to the eraseblock handler
1086  */
1087 void
1088 erase_thread(void *data) {
1089 	dbg_ebh("[EBH THREAD] erase thread started\n");
1090 
1091 	struct chfs_ebh *ebh = data;
1092 	int err;
1093 
1094 	mutex_enter(&ebh->erase_lock);
1095 	while (ebh->bg_erase.eth_running) {
1096 		if (TAILQ_EMPTY(&ebh->to_erase) &&
1097 		    TAILQ_EMPTY(&ebh->fully_erased)) {
1098 			cv_timedwait_sig(&ebh->bg_erase.eth_wakeup,
1099 			    &ebh->erase_lock, mstohz(100));
1100 		} else {
1101 			/* XXX exiting this mutex is a bit odd here as
1102 			 * free_peb instantly reenters it...
1103 			 */
1104 			err = free_peb(ebh);
1105 			mutex_exit(&ebh->erase_lock);
1106 			if (err) {
1107 				chfs_err("freeing PEB failed in the"
1108 				    " background thread: %d\n", err);
1109 			}
1110 			mutex_enter(&ebh->erase_lock);
1111 		}
1112 	}
1113 	mutex_exit(&ebh->erase_lock);
1114 
1115 	dbg_ebh("[EBH THREAD] erase thread stopped\n");
1116 	kthread_exit(0);
1117 }
1118 
1119 /**
1120  * erase_thread_start - init and start erase thread
1121  * @ebh: eraseblock handler
1122  */
1123 static void
1124 erase_thread_start(struct chfs_ebh *ebh)
1125 {
1126 	cv_init(&ebh->bg_erase.eth_wakeup, "ebheracv");
1127 
1128 	ebh->bg_erase.eth_running = true;
1129 	kthread_create(PRI_NONE, KTHREAD_MPSAFE | KTHREAD_MUSTJOIN, NULL,
1130 	    erase_thread, ebh, &ebh->bg_erase.eth_thread, "ebherase");
1131 }
1132 
1133 /**
1134  * erase_thread_stop - stop background erase thread
1135  * @ebh: eraseblock handler
1136  */
1137 static void
1138 erase_thread_stop(struct chfs_ebh *ebh)
1139 {
1140 	ebh->bg_erase.eth_running = false;
1141 	cv_signal(&ebh->bg_erase.eth_wakeup);
1142 	dbg_ebh("[EBH THREAD STOP] signaled\n");
1143 
1144 	kthread_join(ebh->bg_erase.eth_thread);
1145 #ifdef BROKEN_KTH_JOIN
1146 	kpause("chfsebhjointh", false, mstohz(1000), NULL);
1147 #endif
1148 
1149 	cv_destroy(&ebh->bg_erase.eth_wakeup);
1150 }
1151 
1152 /*****************************************************************************/
1153 /* End of Erase related operations					     */
1154 /*****************************************************************************/
1155 
1156 /*****************************************************************************/
1157 /* Scan related operations						     */
1158 /*****************************************************************************/
1159 int
1160 scan_leb_used_cmp(struct chfs_scan_leb *sleb1, struct chfs_scan_leb *sleb2)
1161 {
1162 	return (sleb1->lnr - sleb2->lnr);
1163 }
1164 
1165 RB_PROTOTYPE(scan_leb_used_rbtree, chfs_scan_leb, u.rb, scan_leb_used_cmp);
1166 RB_GENERATE(scan_leb_used_rbtree, chfs_scan_leb, u.rb, scan_leb_used_cmp);
1167 
1168 /**
1169  * scan_add_to_queue - adds a physical eraseblock to one of the
1170  *                     eraseblock queue
1171  * @si: chfs scanning information
1172  * @pebnr: physical eraseblock number
1173  * @erase_cnt: erase counter of the physical eraseblock
1174  * @list: the list to add to
1175  *
1176  * This function adds a physical eraseblock to one of the lists in the scanning
1177  * information.
1178  * Returns zero in case of success, negative error code in case of fail.
1179  */
1180 static int
1181 scan_add_to_queue(struct chfs_scan_info *si, int pebnr, int erase_cnt,
1182     struct scan_leb_queue *queue)
1183 {
1184 	struct chfs_scan_leb *sleb;
1185 
1186 	sleb = kmem_alloc(sizeof(struct chfs_scan_leb), KM_SLEEP);
1187 
1188 	sleb->pebnr = pebnr;
1189 	sleb->erase_cnt = erase_cnt;
1190 	TAILQ_INSERT_TAIL(queue, sleb, u.queue);
1191 	return 0;
1192 }
1193 
1194 /*
1195  * nor_scan_add_to_used - add a physical eraseblock to the
1196  *                        used tree of scan info
1197  * @ebh: chfs eraseblock handler
1198  * @si: chfs scanning information
1199  * @ebhdr: eraseblock header
1200  * @pebnr: physical eraseblock number
1201  * @leb_status: the status of the PEB's eraseblock header
1202  *
1203  * This function adds a PEB to the used tree of the scanning information.
1204  * It handles the situations if there are more physical eraseblock referencing
1205  * to the same logical eraseblock.
1206  * Returns zero in case of success, error code in case of fail.
1207  */
1208 int
1209 nor_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1210     struct chfs_eb_hdr *ebhdr, int pebnr, int leb_status)
1211 {
1212 	int err, lnr, ec;
1213 	struct chfs_scan_leb *sleb, *old;
1214 
1215 	lnr = CHFS_GET_LID(ebhdr->u.nor_hdr.lid);
1216 	ec = le32toh(ebhdr->ec_hdr.erase_cnt);
1217 
1218 	sleb = kmem_alloc(sizeof(struct chfs_scan_leb), KM_SLEEP);
1219 
1220 	sleb->erase_cnt = ec;
1221 	sleb->lnr = lnr;
1222 	sleb->pebnr = pebnr;
1223 	sleb->info = leb_status;
1224 
1225 	old = RB_INSERT(scan_leb_used_rbtree, &si->used, sleb);
1226 	if (old) {
1227 		kmem_free(sleb, sizeof(struct chfs_scan_leb));
1228 		/* There is already an eraseblock in the used tree */
1229 		/* If the new one is bad */
1230 		if (EBHDR_LEB_DIRTY == leb_status &&
1231 		    EBHDR_LEB_OK == old->info) {
1232 			return scan_add_to_queue(si, pebnr, ec, &si->erase);
1233 		} else {
1234 			err = scan_add_to_queue(si, old->pebnr,
1235 			    old->erase_cnt, &si->erase);
1236 			if (err) {
1237 				return err;
1238 			}
1239 
1240 			old->erase_cnt = ec;
1241 			old->lnr = lnr;
1242 			old->pebnr = pebnr;
1243 			old->info = leb_status;
1244 			return 0;
1245 		}
1246 	}
1247 	return 0;
1248 }
1249 
1250 /**
1251  * nor_process eb -read the headers from NOR flash, check them and add to
1252  * 				   the scanning information
1253  * @ebh: chfs eraseblock handler
1254  * @si: chfs scanning information
1255  * @pebnr: physical eraseblock number
1256  *
1257  * Returns zero in case of success, error code in case of fail.
1258  */
1259 int
1260 nor_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1261     int pebnr, struct chfs_eb_hdr *ebhdr)
1262 {
1263 	int err, erase_cnt, leb_status;
1264 
1265 	err = ebh->ops->read_eb_hdr(ebh, pebnr, ebhdr);
1266 	if (err)
1267 		return err;
1268 
1269 	erase_cnt = le32toh(ebhdr->ec_hdr.erase_cnt);
1270 	dbg_ebh("erase_cnt: %d\n", erase_cnt);
1271 	leb_status = ebh->ops->check_eb_hdr(ebh, ebhdr);
1272 	if (EBHDR_LEB_BADMAGIC == leb_status ||
1273 	    EBHDR_LEB_BADCRC == leb_status) {
1274 		err = scan_add_to_queue(si, pebnr, erase_cnt, &si->corrupted);
1275 		return err;
1276 	}
1277 	else if (EBHDR_LEB_FREE == leb_status) {
1278 		err = scan_add_to_queue(si, pebnr, erase_cnt, &si->free);
1279 		goto count_mean;
1280 	}
1281 	else if (EBHDR_LEB_NO_HDR == leb_status) {
1282 		err = scan_add_to_queue(si, pebnr, erase_cnt, &si->erased);
1283 		return err;
1284 	}
1285 	else if (EBHDR_LEB_INVALIDATED == leb_status) {
1286 		err = scan_add_to_queue(si, pebnr, erase_cnt, &si->erase);
1287 		return err;
1288 	}
1289 
1290 	err = nor_scan_add_to_used(ebh, si, ebhdr, pebnr, leb_status);
1291 	if (err)
1292 		return err;
1293 
1294 
1295 count_mean:
1296 	si->sum_of_ec += erase_cnt;
1297 	si->num_of_eb++;
1298 
1299 	return err;
1300 }
1301 
1302 /*
1303  * nand_scan_add_to_used - add a physical eraseblock to the
1304  *                         used tree of scan info
1305  * @ebh: chfs eraseblock handler
1306  * @si: chfs scanning information
1307  * @ebhdr: eraseblock header
1308  * @pebnr: physical eraseblock number
1309  * @leb_status: the status of the PEB's eraseblock header
1310  *
1311  * This function adds a PEB to the used tree of the scanning information.
1312  * It handles the situations if there are more physical eraseblock referencing
1313  * to the same logical eraseblock.
1314  * Returns zero in case of success, error code in case of fail.
1315  */
1316 int
1317 nand_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1318     struct chfs_eb_hdr *ebhdr, int pebnr)
1319 {
1320 	int err, lnr, ec;
1321 	struct chfs_scan_leb *sleb, *old;
1322 	uint64_t serial = le64toh(ebhdr->u.nand_hdr.serial);
1323 
1324 	lnr = CHFS_GET_LID(ebhdr->u.nor_hdr.lid);
1325 	ec = le32toh(ebhdr->ec_hdr.erase_cnt);
1326 
1327 	sleb = kmem_alloc(sizeof(struct chfs_scan_leb), KM_SLEEP);
1328 
1329 	sleb->erase_cnt = ec;
1330 	sleb->lnr = lnr;
1331 	sleb->pebnr = pebnr;
1332 	sleb->info = serial;
1333 
1334 	old = RB_INSERT(scan_leb_used_rbtree, &si->used, sleb);
1335 	if (old) {
1336 		kmem_free(sleb, sizeof(struct chfs_scan_leb));
1337 		/* There is already an eraseblock in the used tree */
1338 		/* If the new one is bad */
1339 		if (serial < old->info)
1340 			return scan_add_to_queue(si, pebnr, ec, &si->erase);
1341 		else {
1342 			err = scan_add_to_queue(si,
1343 			    old->pebnr, old->erase_cnt, &si->erase);
1344 			if (err)
1345 				return err;
1346 
1347 			old->erase_cnt = ec;
1348 			old->lnr = lnr;
1349 			old->pebnr = pebnr;
1350 			old->info = serial;
1351 			return 0;
1352 		}
1353 	}
1354 	return 0;
1355 }
1356 
1357 /**
1358  * nand_process eb -read the headers from NAND flash, check them and add to the
1359  * 					scanning information
1360  * @ebh: chfs eraseblock handler
1361  * @si: chfs scanning information
1362  * @pebnr: physical eraseblock number
1363  *
1364  * Returns zero in case of success, error code in case of fail.
1365  */
1366 int
1367 nand_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1368     int pebnr, struct chfs_eb_hdr *ebhdr)
1369 {
1370 	int err, erase_cnt, leb_status;
1371 	uint64_t max_serial;
1372 	/* isbad() is defined on some ancient platforms, heh */
1373 	bool is_bad;
1374 
1375 	/* Check block is bad */
1376 	err = flash_block_isbad(ebh->flash_dev,
1377 	    pebnr * ebh->flash_if->erasesize, &is_bad);
1378 	if (err) {
1379 		chfs_err("checking block is bad failed\n");
1380 		return err;
1381 	}
1382 	if (is_bad) {
1383 		si->bad_peb_cnt++;
1384 		return 0;
1385 	}
1386 
1387 	err = ebh->ops->read_eb_hdr(ebh, pebnr, ebhdr);
1388 	if (err)
1389 		return err;
1390 
1391 	erase_cnt = le32toh(ebhdr->ec_hdr.erase_cnt);
1392 	leb_status = ebh->ops->check_eb_hdr(ebh, ebhdr);
1393 	if (EBHDR_LEB_BADMAGIC == leb_status ||
1394 	    EBHDR_LEB_BADCRC == leb_status) {
1395 		err = scan_add_to_queue(si, pebnr, erase_cnt, &si->corrupted);
1396 		return err;
1397 	}
1398 	else if (EBHDR_LEB_FREE == leb_status) {
1399 		err = scan_add_to_queue(si, pebnr, erase_cnt, &si->free);
1400 		goto count_mean;
1401 	}
1402 	else if (EBHDR_LEB_NO_HDR == leb_status) {
1403 		err = scan_add_to_queue(si, pebnr, erase_cnt, &si->erased);
1404 		return err;
1405 	}
1406 
1407 	err = nand_scan_add_to_used(ebh, si, ebhdr, pebnr);
1408 	if (err)
1409 		return err;
1410 
1411 	max_serial = le64toh(ebhdr->u.nand_hdr.serial);
1412 	if (max_serial > *ebh->max_serial) {
1413 		*ebh->max_serial = max_serial;
1414 	}
1415 
1416 count_mean:
1417 	si->sum_of_ec += erase_cnt;
1418 	si->num_of_eb++;
1419 
1420 	return err;
1421 }
1422 
1423 /**
1424  * chfs_scan - scans the media and returns informations about it
1425  * @ebh: chfs eraseblock handler
1426  *
1427  * This function scans through the media and returns information about it or if
1428  * it fails NULL will be returned.
1429  */
1430 struct chfs_scan_info *
1431 chfs_scan(struct chfs_ebh *ebh)
1432 {
1433 	struct chfs_scan_info *si;
1434 	struct chfs_eb_hdr *ebhdr;
1435 	int pebnr, err;
1436 
1437 	si = kmem_alloc(sizeof(*si), KM_SLEEP);
1438 
1439 	TAILQ_INIT(&si->corrupted);
1440 	TAILQ_INIT(&si->free);
1441 	TAILQ_INIT(&si->erase);
1442 	TAILQ_INIT(&si->erased);
1443 	RB_INIT(&si->used);
1444 	si->bad_peb_cnt = 0;
1445 	si->num_of_eb = 0;
1446 	si->sum_of_ec = 0;
1447 
1448 	ebhdr = kmem_alloc(sizeof(*ebhdr), KM_SLEEP);
1449 
1450 	for (pebnr = 0; pebnr < ebh->peb_nr; pebnr++) {
1451 		dbg_ebh("processing PEB %d\n", pebnr);
1452 		err = ebh->ops->process_eb(ebh, si, pebnr, ebhdr);
1453 		if (err < 0)
1454 			goto out_ebhdr;
1455 	}
1456 	kmem_free(ebhdr, sizeof(*ebhdr));
1457 	dbg_ebh("[CHFS_SCAN] scanning information collected\n");
1458 	return si;
1459 
1460 out_ebhdr:
1461 	kmem_free(ebhdr, sizeof(*ebhdr));
1462 	kmem_free(si, sizeof(*si));
1463 	return NULL;
1464 }
1465 
1466 /**
1467  * scan_info_destroy - frees all lists and trees in the scanning information
1468  * @si: the scanning information
1469  */
1470 void
1471 scan_info_destroy(struct chfs_scan_info *si)
1472 {
1473 	EBH_QUEUE_DESTROY(&si->corrupted,
1474 	    struct chfs_scan_leb, u.queue);
1475 
1476 	EBH_QUEUE_DESTROY(&si->erase,
1477 	    struct chfs_scan_leb, u.queue);
1478 
1479 	EBH_QUEUE_DESTROY(&si->erased,
1480 	    struct chfs_scan_leb, u.queue);
1481 
1482 	EBH_QUEUE_DESTROY(&si->free,
1483 	    struct chfs_scan_leb, u.queue);
1484 
1485 	EBH_TREE_DESTROY(scan_leb_used_rbtree,
1486 	    &si->used, struct chfs_scan_leb);
1487 
1488 	kmem_free(si, sizeof(*si));
1489 	dbg_ebh("[SCAN_INFO_DESTROY] scanning information destroyed\n");
1490 }
1491 
1492 /**
1493  * scan_media - scan media
1494  *
1495  * @ebh - chfs eraseblock handler
1496  *
1497  * Returns zero in case of success, error code in case of fail.
1498  */
1499 
1500 int
1501 scan_media(struct chfs_ebh *ebh)
1502 {
1503 	int err, i, avg_ec;
1504 	struct chfs_scan_info *si;
1505 	struct chfs_scan_leb *sleb;
1506 
1507 	si = chfs_scan(ebh);
1508 	/*
1509 	 * Process the scan info, manage the eraseblock lists
1510 	 */
1511 	mutex_init(&ebh->ltree_lock, MUTEX_DEFAULT, IPL_NONE);
1512 	mutex_init(&ebh->erase_lock, MUTEX_DEFAULT, IPL_NONE);
1513 	RB_INIT(&ebh->ltree);
1514 	RB_INIT(&ebh->free);
1515 	RB_INIT(&ebh->in_use);
1516 	TAILQ_INIT(&ebh->to_erase);
1517 	TAILQ_INIT(&ebh->fully_erased);
1518 	mutex_init(&ebh->alc_mutex, MUTEX_DEFAULT, IPL_NONE);
1519 
1520 	ebh->peb_nr -= si->bad_peb_cnt;
1521 
1522 	/*
1523 	 * Create background thread for erasing
1524 	 */
1525 	erase_thread_start(ebh);
1526 
1527 	ebh->lmap = kmem_alloc(ebh->peb_nr * sizeof(int), KM_SLEEP);
1528 
1529 	for (i = 0; i < ebh->peb_nr; i++) {
1530 		ebh->lmap[i] = EBH_LEB_UNMAPPED;
1531 	}
1532 
1533 	if (si->num_of_eb == 0) {
1534 		/* The flash contains no data. */
1535 		avg_ec = 0;
1536 	}
1537 	else {
1538 		avg_ec = (int) (si->sum_of_ec / si->num_of_eb);
1539 	}
1540 	dbg_ebh("num_of_eb: %d\n", si->num_of_eb);
1541 
1542 	mutex_enter(&ebh->erase_lock);
1543 
1544 	RB_FOREACH(sleb, scan_leb_used_rbtree, &si->used) {
1545 		ebh->lmap[sleb->lnr] = sleb->pebnr;
1546 		err = add_peb_to_in_use(ebh, sleb->pebnr, sleb->erase_cnt);
1547 		if (err)
1548 			goto out_free;
1549 	}
1550 
1551 	TAILQ_FOREACH(sleb, &si->erased, u.queue) {
1552 		err = add_peb_to_erase_queue(ebh, sleb->pebnr, avg_ec,
1553 		    &ebh->fully_erased);
1554 		if (err)
1555 			goto out_free;
1556 	}
1557 
1558 	TAILQ_FOREACH(sleb, &si->erase, u.queue) {
1559 		err = add_peb_to_erase_queue(ebh, sleb->pebnr, avg_ec,
1560 		    &ebh->to_erase);
1561 		if (err)
1562 			goto out_free;
1563 	}
1564 
1565 	TAILQ_FOREACH(sleb, &si->free, u.queue) {
1566 		err = add_peb_to_free(ebh, sleb->pebnr, sleb->erase_cnt);
1567 		if (err)
1568 			goto out_free;
1569 	}
1570 
1571 	TAILQ_FOREACH(sleb, &si->corrupted, u.queue) {
1572 		err = add_peb_to_erase_queue(ebh, sleb->pebnr, avg_ec,
1573 		    &ebh->to_erase);
1574 		if (err)
1575 			goto out_free;
1576 	}
1577 	mutex_exit(&ebh->erase_lock);
1578 	scan_info_destroy(si);
1579 	return 0;
1580 
1581 out_free:
1582 	mutex_exit(&ebh->erase_lock);
1583 	kmem_free(ebh->lmap, ebh->peb_nr * sizeof(int));
1584 	scan_info_destroy(si);
1585 	dbg_ebh("[SCAN_MEDIA] returning with error: %d\n", err);
1586 	return err;
1587 }
1588 
1589 /*****************************************************************************/
1590 /* End of Scan related operations					     */
1591 /*****************************************************************************/
1592 
1593 /**
1594  * ebh_open - opens mtd device and init ereaseblock header
1595  * @ebh: eraseblock handler
1596  * @flash_nr: flash device number to use
1597  *
1598  * Returns zero in case of success, error code in case of fail.
1599  */
1600 int
1601 ebh_open(struct chfs_ebh *ebh, dev_t dev)
1602 {
1603 	int err;
1604 
1605 	ebh->flash_dev = flash_get_device(dev);
1606 	if (!ebh->flash_dev) {
1607 		aprint_error("ebh_open: cant get flash device\n");
1608 		return ENODEV;
1609 	}
1610 
1611 	ebh->flash_if = flash_get_interface(dev);
1612 	if (!ebh->flash_if) {
1613 		aprint_error("ebh_open: cant get flash interface\n");
1614 		return ENODEV;
1615 	}
1616 
1617 	ebh->flash_size = flash_get_size(dev);
1618 	ebh->peb_nr = ebh->flash_size / ebh->flash_if->erasesize;
1619 //	ebh->peb_nr = ebh->flash_if->size / ebh->flash_if->erasesize;
1620 	/* Set up flash operations based on flash type */
1621 	ebh->ops = kmem_alloc(sizeof(struct chfs_ebh_ops), KM_SLEEP);
1622 
1623 	switch (ebh->flash_if->type) {
1624 	case FLASH_TYPE_NOR:
1625 		ebh->eb_size = ebh->flash_if->erasesize -
1626 		    CHFS_EB_EC_HDR_SIZE - CHFS_EB_HDR_NOR_SIZE;
1627 
1628 		ebh->ops->read_eb_hdr = nor_read_eb_hdr;
1629 		ebh->ops->write_eb_hdr = nor_write_eb_hdr;
1630 		ebh->ops->check_eb_hdr = nor_check_eb_hdr;
1631 		ebh->ops->mark_eb_hdr_dirty_flash =
1632 		    nor_mark_eb_hdr_dirty_flash;
1633 		ebh->ops->invalidate_eb_hdr = nor_invalidate_eb_hdr;
1634 		ebh->ops->mark_eb_hdr_free = mark_eb_hdr_free;
1635 
1636 		ebh->ops->process_eb = nor_process_eb;
1637 
1638 		ebh->ops->create_eb_hdr = nor_create_eb_hdr;
1639 		ebh->ops->calc_data_offs = nor_calc_data_offs;
1640 
1641 		ebh->max_serial = NULL;
1642 		break;
1643 	case FLASH_TYPE_NAND:
1644 		ebh->eb_size = ebh->flash_if->erasesize -
1645 		    2 * ebh->flash_if->page_size;
1646 
1647 		ebh->ops->read_eb_hdr = nand_read_eb_hdr;
1648 		ebh->ops->write_eb_hdr = nand_write_eb_hdr;
1649 		ebh->ops->check_eb_hdr = nand_check_eb_hdr;
1650 		ebh->ops->mark_eb_hdr_free = mark_eb_hdr_free;
1651 		ebh->ops->mark_eb_hdr_dirty_flash = NULL;
1652 		ebh->ops->invalidate_eb_hdr = NULL;
1653 
1654 		ebh->ops->process_eb = nand_process_eb;
1655 
1656 		ebh->ops->create_eb_hdr = nand_create_eb_hdr;
1657 		ebh->ops->calc_data_offs = nand_calc_data_offs;
1658 
1659 		ebh->max_serial = kmem_alloc(sizeof(uint64_t), KM_SLEEP);
1660 
1661 		*ebh->max_serial = 0;
1662 		break;
1663 	default:
1664 		return 1;
1665 	}
1666 	printf("opening ebh: eb_size: %zu\n", ebh->eb_size);
1667 	err = scan_media(ebh);
1668 	if (err) {
1669 		dbg_ebh("Scan failed.");
1670 		kmem_free(ebh->ops, sizeof(struct chfs_ebh_ops));
1671 		kmem_free(ebh, sizeof(struct chfs_ebh));
1672 		return err;
1673 	}
1674 	return 0;
1675 }
1676 
1677 /**
1678  * ebh_close - close ebh
1679  * @ebh: eraseblock handler
1680  * Returns zero in case of success, error code in case of fail.
1681  */
1682 int
1683 ebh_close(struct chfs_ebh *ebh)
1684 {
1685 	erase_thread_stop(ebh);
1686 
1687 	EBH_TREE_DESTROY(peb_free_rbtree, &ebh->free, struct chfs_peb);
1688 	EBH_TREE_DESTROY(peb_in_use_rbtree, &ebh->in_use, struct chfs_peb);
1689 
1690 	EBH_QUEUE_DESTROY(&ebh->fully_erased, struct chfs_peb, u.queue);
1691 	EBH_QUEUE_DESTROY(&ebh->to_erase, struct chfs_peb, u.queue);
1692 
1693 	/* XXX HACK, see ebh.h */
1694 	EBH_TREE_DESTROY_MUTEX(ltree_rbtree, &ebh->ltree,
1695 	    struct chfs_ltree_entry);
1696 
1697 	KASSERT(!mutex_owned(&ebh->ltree_lock));
1698 	KASSERT(!mutex_owned(&ebh->alc_mutex));
1699 	KASSERT(!mutex_owned(&ebh->erase_lock));
1700 
1701 	mutex_destroy(&ebh->ltree_lock);
1702 	mutex_destroy(&ebh->alc_mutex);
1703 	mutex_destroy(&ebh->erase_lock);
1704 
1705 	kmem_free(ebh->ops, sizeof(struct chfs_ebh_ops));
1706 	kmem_free(ebh, sizeof(struct chfs_ebh));
1707 
1708 	return 0;
1709 }
1710 
1711 /**
1712  * ebh_read_leb - read data from leb
1713  * @ebh: eraseblock handler
1714  * @lnr: logical eraseblock number
1715  * @buf: buffer to read to
1716  * @offset: offset from where to read
1717  * @len: bytes number to read
1718  *
1719  * Returns zero in case of success, error code in case of fail.
1720  */
1721 int
1722 ebh_read_leb(struct chfs_ebh *ebh, int lnr, char *buf, uint32_t offset,
1723     size_t len, size_t *retlen)
1724 {
1725 	int err, pebnr;
1726 	off_t data_offset;
1727 
1728 	KASSERT(offset + len <= ebh->eb_size);
1729 
1730 	err = leb_read_lock(ebh, lnr);
1731 	if (err)
1732 		return err;
1733 
1734 	pebnr = ebh->lmap[lnr];
1735 	/* If PEB is not mapped the buffer is filled with 0xFF */
1736 	if (EBH_LEB_UNMAPPED == pebnr) {
1737 		leb_read_unlock(ebh, lnr);
1738 		memset(buf, 0xFF, len);
1739 		return 0;
1740 	}
1741 
1742 	/* Read data */
1743 	data_offset = ebh->ops->calc_data_offs(ebh, pebnr, offset);
1744 	err = flash_read(ebh->flash_dev, data_offset, len, retlen,
1745 	    (unsigned char *) buf);
1746 	if (err)
1747 		goto out_free;
1748 
1749 	KASSERT(len == *retlen);
1750 
1751 out_free:
1752 	leb_read_unlock(ebh, lnr);
1753 	return err;
1754 }
1755 
1756 /**
1757  * get_peb: get a free physical eraseblock
1758  * @ebh - chfs eraseblock handler
1759  *
1760  * This function gets a free eraseblock from the ebh->free RB-tree.
1761  * The fist entry will be returned and deleted from the tree.
1762  * The entries sorted by the erase counters, so the PEB with the smallest
1763  * erase counter will be added back.
1764  * If something goes bad a negative value will be returned.
1765  */
1766 int
1767 get_peb(struct chfs_ebh *ebh)
1768 {
1769 	int err, pebnr;
1770 	struct chfs_peb *peb;
1771 
1772 retry:
1773 	mutex_enter(&ebh->erase_lock);
1774 	//dbg_ebh("LOCK: ebh->erase_lock spin locked in get_peb()\n");
1775 	if (RB_EMPTY(&ebh->free)) {
1776 		/*There is no more free PEBs in the tree*/
1777 		if (TAILQ_EMPTY(&ebh->to_erase) &&
1778 		    TAILQ_EMPTY(&ebh->fully_erased)) {
1779 			mutex_exit(&ebh->erase_lock);
1780 			//dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in get_peb()\n");
1781 			return ENOSPC;
1782 		}
1783 		err = free_peb(ebh);
1784 
1785 		mutex_exit(&ebh->erase_lock);
1786 		//dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in get_peb()\n");
1787 
1788 		if (err)
1789 			return err;
1790 		goto retry;
1791 	}
1792 	peb = RB_MIN(peb_free_rbtree, &ebh->free);
1793 	pebnr = peb->pebnr;
1794 	RB_REMOVE(peb_free_rbtree, &ebh->free, peb);
1795 	err = add_peb_to_in_use(ebh, peb->pebnr, peb->erase_cnt);
1796 	if (err)
1797 		pebnr = err;
1798 
1799 	kmem_free(peb, sizeof(struct chfs_peb));
1800 
1801 	mutex_exit(&ebh->erase_lock);
1802 	//dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in get_peb()\n");
1803 
1804 	return pebnr;
1805 }
1806 
1807 /**
1808  * ebh_write_leb - write data to leb
1809  * @ebh: eraseblock handler
1810  * @lnr: logical eraseblock number
1811  * @buf: data to write
1812  * @offset: offset where to write
1813  * @len: bytes number to write
1814  *
1815  * Returns zero in case of success, error code in case of fail.
1816  */
1817 int
1818 ebh_write_leb(struct chfs_ebh *ebh, int lnr, char *buf, uint32_t offset,
1819     size_t len, size_t *retlen)
1820 {
1821 	int err, pebnr, retries = 0;
1822 	off_t data_offset;
1823 	struct chfs_eb_hdr *ebhdr;
1824 
1825 	dbg("offset: %d | len: %zu | (offset+len): %zu "
1826 	    " | ebsize: %zu\n", offset, len, (offset+len), ebh->eb_size);
1827 
1828 	KASSERT(offset + len <= ebh->eb_size);
1829 
1830 	err = leb_write_lock(ebh, lnr);
1831 	if (err)
1832 		return err;
1833 
1834 	pebnr = ebh->lmap[lnr];
1835 	/* If the LEB is mapped write out data */
1836 	if (pebnr != EBH_LEB_UNMAPPED) {
1837 		data_offset = ebh->ops->calc_data_offs(ebh, pebnr, offset);
1838 		err = flash_write(ebh->flash_dev, data_offset, len, retlen,
1839 		    (unsigned char *) buf);
1840 
1841 		if (err) {
1842 			chfs_err("error %d while writing %zu bytes to PEB "
1843 			    "%d:%ju, written %zu bytes\n",
1844 			    err, len, pebnr, (uintmax_t )offset, *retlen);
1845 		} else {
1846 			KASSERT(len == *retlen);
1847 		}
1848 
1849 		leb_write_unlock(ebh, lnr);
1850 		return err;
1851 	}
1852 
1853 	/*
1854 	 * If the LEB is unmapped, get a free PEB and write the
1855 	 * eraseblock header first
1856 	 */
1857 	ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
1858 
1859 	/* Setting up eraseblock header properties */
1860 	ebh->ops->create_eb_hdr(ebhdr, lnr);
1861 
1862 retry:
1863 	/* Getting a physical eraseblock from the wear leveling system */
1864 	pebnr = get_peb(ebh);
1865 	if (pebnr < 0) {
1866 		leb_write_unlock(ebh, lnr);
1867 		kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1868 		return pebnr;
1869 	}
1870 
1871 	/* Write the eraseblock header to the media */
1872 	err = ebh->ops->write_eb_hdr(ebh, pebnr, ebhdr);
1873 	if (err) {
1874 		chfs_warn(
1875 			"error writing eraseblock header: LEB %d , PEB %d\n",
1876 			lnr, pebnr);
1877 		goto write_error;
1878 	}
1879 
1880 	/* Write out data */
1881 	if (len) {
1882 		data_offset = ebh->ops->calc_data_offs(ebh, pebnr, offset);
1883 		err = flash_write(ebh->flash_dev,
1884 		    data_offset, len, retlen, (unsigned char *) buf);
1885 		if (err) {
1886 			chfs_err("error %d while writing %zu bytes to PEB "
1887 			    " %d:%ju, written %zu bytes\n",
1888 			    err, len, pebnr, (uintmax_t )offset, *retlen);
1889 			goto write_error;
1890 		}
1891 	}
1892 
1893 	ebh->lmap[lnr] = pebnr;
1894 	leb_write_unlock(ebh, lnr);
1895 	kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1896 
1897 	return 0;
1898 
1899 write_error: err = release_peb(ebh, pebnr);
1900 	// max retries (NOW: 2)
1901 	if (err || CHFS_MAX_GET_PEB_RETRIES < ++retries) {
1902 		leb_write_unlock(ebh, lnr);
1903 		kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1904 		return err;
1905 	}
1906 	goto retry;
1907 }
1908 
1909 /**
1910  * ebh_erase_leb - erase a leb
1911  * @ebh: eraseblock handler
1912  * @lnr: leb number
1913  *
1914  * Returns zero in case of success, error code in case of fail.
1915  */
1916 int
1917 ebh_erase_leb(struct chfs_ebh *ebh, int lnr)
1918 {
1919 	int err, pebnr;
1920 
1921 	leb_write_lock(ebh, lnr);
1922 
1923 	pebnr = ebh->lmap[lnr];
1924 	if (pebnr < 0) {
1925 		leb_write_unlock(ebh, lnr);
1926 		return EBH_LEB_UNMAPPED;
1927 	}
1928 	err = release_peb(ebh, pebnr);
1929 	if (err)
1930 		goto out_unlock;
1931 
1932 	ebh->lmap[lnr] = EBH_LEB_UNMAPPED;
1933 	cv_signal(&ebh->bg_erase.eth_wakeup);
1934 out_unlock:
1935 	leb_write_unlock(ebh, lnr);
1936 	return err;
1937 }
1938 
1939 /**
1940  * ebh_map_leb - maps a PEB to LEB
1941  * @ebh: eraseblock handler
1942  * @lnr: leb number
1943  *
1944  * Returns zero on success, error code in case of fail
1945  */
1946 int
1947 ebh_map_leb(struct chfs_ebh *ebh, int lnr)
1948 {
1949 	int err, pebnr, retries = 0;
1950 	struct chfs_eb_hdr *ebhdr;
1951 
1952 	ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
1953 
1954 	err = leb_write_lock(ebh, lnr);
1955 	if (err)
1956 		return err;
1957 
1958 retry:
1959 	pebnr = get_peb(ebh);
1960 	if (pebnr < 0) {
1961 		err = pebnr;
1962 		goto out_unlock;
1963 	}
1964 
1965 	ebh->ops->create_eb_hdr(ebhdr, lnr);
1966 
1967 	err = ebh->ops->write_eb_hdr(ebh, pebnr, ebhdr);
1968 	if (err) {
1969 		chfs_warn(
1970 			"error writing eraseblock header: LEB %d , PEB %d\n",
1971 			lnr, pebnr);
1972 		goto write_error;
1973 	}
1974 
1975 	ebh->lmap[lnr] = pebnr;
1976 
1977 out_unlock:
1978 	leb_write_unlock(ebh, lnr);
1979 	return err;
1980 
1981 write_error:
1982 	err = release_peb(ebh, pebnr);
1983 	// max retries (NOW: 2)
1984 	if (err || CHFS_MAX_GET_PEB_RETRIES < ++retries) {
1985 		leb_write_unlock(ebh, lnr);
1986 		kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1987 		return err;
1988 	}
1989 	goto retry;
1990 }
1991 
1992 /**
1993  * ebh_unmap_leb -
1994  * @ebh: eraseblock handler
1995  * @lnr: leb number
1996  *
1997  * Retruns zero on success, error code in case of fail.
1998  */
1999 int
2000 ebh_unmap_leb(struct chfs_ebh *ebh, int lnr)
2001 {
2002 	int err;
2003 
2004 	if (ebh_is_mapped(ebh, lnr) < 0)
2005 		/* If the eraseblock already unmapped */
2006 		return 0;
2007 
2008 	err = ebh_erase_leb(ebh, lnr);
2009 
2010 	return err;
2011 }
2012 
2013 /**
2014  * ebh_is_mapped - check if a PEB is mapped to @lnr
2015  * @ebh: eraseblock handler
2016  * @lnr: leb number
2017  *
2018  * Retruns 0 if the logical eraseblock is mapped, negative error code otherwise.
2019  */
2020 int
2021 ebh_is_mapped(struct chfs_ebh *ebh, int lnr)
2022 {
2023 	int err, result;
2024 	err = leb_read_lock(ebh, lnr);
2025 	if (err)
2026 		return err;
2027 
2028 	result = ebh->lmap[lnr];
2029 	leb_read_unlock(ebh, lnr);
2030 
2031 	return result;
2032 }
2033 
2034 /**
2035  * ebh_change_leb - write the LEB to another PEB
2036  * @ebh: eraseblock handler
2037  * @lnr: leb number
2038  * @buf: data to write
2039  * @len: length of data
2040  * Returns zero in case of success, error code in case of fail.
2041  */
2042 int
2043 ebh_change_leb(struct chfs_ebh *ebh, int lnr, char *buf, size_t len,
2044     size_t *retlen)
2045 {
2046 	int err, pebnr, pebnr_old, retries = 0;
2047 	off_t data_offset;
2048 
2049 	struct chfs_peb *peb = NULL;
2050 	struct chfs_eb_hdr *ebhdr;
2051 
2052 	if (ebh_is_mapped(ebh, lnr) < 0)
2053 		return EBH_LEB_UNMAPPED;
2054 
2055 	if (len == 0) {
2056 		err = ebh_unmap_leb(ebh, lnr);
2057 		if (err)
2058 			return err;
2059 		return ebh_map_leb(ebh, lnr);
2060 	}
2061 
2062 	ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
2063 
2064 	pebnr_old = ebh->lmap[lnr];
2065 
2066 	mutex_enter(&ebh->alc_mutex);
2067 	err = leb_write_lock(ebh, lnr);
2068 	if (err)
2069 		goto out_mutex;
2070 
2071 	if (ebh->ops->mark_eb_hdr_dirty_flash) {
2072 		err = ebh->ops->mark_eb_hdr_dirty_flash(ebh, pebnr_old, lnr);
2073 		if (err)
2074 			goto out_unlock;
2075 	}
2076 
2077 	/* Setting up eraseblock header properties */
2078 	ebh->ops->create_eb_hdr(ebhdr, lnr);
2079 
2080 retry:
2081 	/* Getting a physical eraseblock from the wear leveling system */
2082 	pebnr = get_peb(ebh);
2083 	if (pebnr < 0) {
2084 		leb_write_unlock(ebh, lnr);
2085 		mutex_exit(&ebh->alc_mutex);
2086 		kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
2087 		return pebnr;
2088 	}
2089 
2090 	err = ebh->ops->write_eb_hdr(ebh, pebnr, ebhdr);
2091 	if (err) {
2092 		chfs_warn(
2093 			"error writing eraseblock header: LEB %d , PEB %d",
2094 			lnr, pebnr);
2095 		goto write_error;
2096 	}
2097 
2098 	/* Write out data */
2099 	data_offset = ebh->ops->calc_data_offs(ebh, pebnr, 0);
2100 	err = flash_write(ebh->flash_dev, data_offset, len, retlen,
2101 	    (unsigned char *) buf);
2102 	if (err) {
2103 		chfs_err("error %d while writing %zu bytes to PEB %d:%ju,"
2104 		    " written %zu bytes",
2105 		    err, len, pebnr, (uintmax_t)data_offset, *retlen);
2106 		goto write_error;
2107 	}
2108 
2109 	ebh->lmap[lnr] = pebnr;
2110 
2111 	if (ebh->ops->invalidate_eb_hdr) {
2112 		err = ebh->ops->invalidate_eb_hdr(ebh, pebnr_old);
2113 		if (err)
2114 			goto out_unlock;
2115 	}
2116 	peb = find_peb_in_use(ebh, pebnr_old);
2117 	err = release_peb(ebh, peb->pebnr);
2118 
2119 out_unlock:
2120 	leb_write_unlock(ebh, lnr);
2121 
2122 out_mutex:
2123 	mutex_exit(&ebh->alc_mutex);
2124 	kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
2125 	kmem_free(peb, sizeof(struct chfs_peb));
2126 	return err;
2127 
2128 write_error:
2129 	err = release_peb(ebh, pebnr);
2130 	//max retries (NOW: 2)
2131 	if (err || CHFS_MAX_GET_PEB_RETRIES < ++retries) {
2132 		leb_write_unlock(ebh, lnr);
2133 		mutex_exit(&ebh->alc_mutex);
2134 		kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
2135 		return err;
2136 	}
2137 	goto retry;
2138 }
2139 
2140