xref: /netbsd-src/sys/ufs/chfs/ebh.c (revision 832e988b9651e3e836291d95b2e32c8c2347de45)
1 /*	$NetBSD: ebh.c,v 1.11 2025/01/08 11:39:50 andvar Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 Department of Software Engineering,
5  *		      University of Szeged, Hungary
6  * Copyright (C) 2009 Ferenc Havasi <havasi@inf.u-szeged.hu>
7  * Copyright (C) 2009 Zoltan Sogor <weth@inf.u-szeged.hu>
8  * Copyright (C) 2009 David Tengeri <dtengeri@inf.u-szeged.hu>
9  * Copyright (C) 2009 Tamas Toth <ttoth@inf.u-szeged.hu>
10  * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
11  * All rights reserved.
12  *
13  * This code is derived from software contributed to The NetBSD Foundation
14  * by the Department of Software Engineering, University of Szeged, Hungary
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
26  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #include "ebh.h"
39 
40 /*****************************************************************************/
41 /* Flash specific operations						     */
42 /*****************************************************************************/
43 int nor_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr);
44 int nand_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr);
45 int nor_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset);
46 int nand_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset);
47 int nor_read_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr);
48 int nand_read_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr);
49 int nor_write_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr);
50 int nand_write_eb_hdr(struct chfs_ebh *ebh, int pebnr,struct chfs_eb_hdr *ebhdr);
51 int nor_check_eb_hdr(struct chfs_ebh *ebh, void *buf);
52 int nand_check_eb_hdr(struct chfs_ebh *ebh, void *buf);
53 int nor_mark_eb_hdr_dirty_flash(struct chfs_ebh *ebh, int pebnr, int lid);
54 int nor_invalidate_eb_hdr(struct chfs_ebh *ebh, int pebnr);
55 int mark_eb_hdr_free(struct chfs_ebh *ebh, int pebnr, int ec);
56 
57 int ltree_entry_cmp(struct chfs_ltree_entry *le1, struct chfs_ltree_entry *le2);
58 int peb_in_use_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2);
59 int peb_free_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2);
60 int add_peb_to_erase_queue(struct chfs_ebh *ebh, int pebnr, int ec,struct peb_queue *queue);
61 struct chfs_peb * find_peb_in_use(struct chfs_ebh *ebh, int pebnr);
62 int add_peb_to_free(struct chfs_ebh *ebh, int pebnr, int ec);
63 int add_peb_to_in_use(struct chfs_ebh *ebh, int pebnr, int ec);
64 void erase_callback(struct flash_erase_instruction *ei);
65 int free_peb(struct chfs_ebh *ebh);
66 int release_peb(struct chfs_ebh *ebh, int pebnr);
67 void erase_thread(void *data);
68 static void erase_thread_start(struct chfs_ebh *ebh);
69 static void erase_thread_stop(struct chfs_ebh *ebh);
70 int scan_leb_used_cmp(struct chfs_scan_leb *sleb1, struct chfs_scan_leb *sleb2);
71 int nor_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,struct chfs_eb_hdr *ebhdr, int pebnr, int leb_status);
72 int nor_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
73     int pebnr, struct chfs_eb_hdr *ebhdr);
74 int nand_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,struct chfs_eb_hdr *ebhdr, int pebnr);
75 int nand_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
76     int pebnr, struct chfs_eb_hdr *ebhdr);
77 struct chfs_scan_info *chfs_scan(struct chfs_ebh *ebh);
78 void scan_info_destroy(struct chfs_scan_info *si);
79 int scan_media(struct chfs_ebh *ebh);
80 int get_peb(struct chfs_ebh *ebh);
81 /**
82  * nor_create_eb_hdr - creates an eraseblock header for NOR flash
83  * @ebhdr: ebhdr to set
84  * @lnr: LEB number
85  */
86 int
87 nor_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr)
88 {
89 	ebhdr->u.nor_hdr.lid = htole32(lnr);
90 	return 0;
91 }
92 
93 /**
94  * nand_create_eb_hdr - creates an eraseblock header for NAND flash
95  * @ebhdr: ebhdr to set
96  * @lnr: LEB number
97  */
98 int
99 nand_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr)
100 {
101 	ebhdr->u.nand_hdr.lid = htole32(lnr);
102 	return 0;
103 }
104 
105 /**
106  * nor_calc_data_offs - calculates data offset on NOR flash
107  * @ebh: chfs eraseblock handler
108  * @pebnr: eraseblock number
109  * @offset: offset within the eraseblock
110  */
111 int
112 nor_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset)
113 {
114 	return pebnr * ebh->flash_if->erasesize + offset +
115 	    CHFS_EB_EC_HDR_SIZE + CHFS_EB_HDR_NOR_SIZE;
116 }
117 
118 /**
119  * nand_calc_data_offs - calculates data offset on NAND flash
120  * @ebh: chfs eraseblock handler
121  * @pebnr: eraseblock number
122  * @offset: offset within the eraseblock
123  */
124 int
125 nand_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset)
126 {
127 	return pebnr * ebh->flash_if->erasesize + offset +
128 	    2 * ebh->flash_if->page_size;
129 }
130 
131 /**
132  * nor_read_eb_hdr - read eraseblock header from NOR flash
133  *
134  * @ebh: chfs eraseblock handler
135  * @pebnr: eraseblock number
136  * @ebhdr: whereto store the data
137  *
138  * Reads the eraseblock header from media.
139  * Returns zero in case of success, error code in case of fail.
140  */
141 int
142 nor_read_eb_hdr(struct chfs_ebh *ebh,
143     int pebnr, struct chfs_eb_hdr *ebhdr)
144 {
145 	int ret;
146 	size_t retlen;
147 	off_t ofs = pebnr * ebh->flash_if->erasesize;
148 
149 	KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
150 
151 	ret = flash_read(ebh->flash_dev,
152 	    ofs, CHFS_EB_EC_HDR_SIZE,
153 	    &retlen, (unsigned char *) &ebhdr->ec_hdr);
154 
155 	if (ret || retlen != CHFS_EB_EC_HDR_SIZE)
156 		return ret;
157 
158 	ofs += CHFS_EB_EC_HDR_SIZE;
159 	ret = flash_read(ebh->flash_dev,
160 	    ofs, CHFS_EB_HDR_NOR_SIZE,
161 	    &retlen, (unsigned char *) &ebhdr->u.nor_hdr);
162 
163 	if (ret || retlen != CHFS_EB_HDR_NOR_SIZE)
164 		return ret;
165 
166 	return 0;
167 }
168 
169 /**
170  * nand_read_eb_hdr - read eraseblock header from NAND flash
171  *
172  * @ebh: chfs eraseblock handler
173  * @pebnr: eraseblock number
174  * @ebhdr: whereto store the data
175  *
176  * Reads the eraseblock header from media. It is on the first two page.
177  * Returns zero in case of success, error code in case of fail.
178  */
179 int
180 nand_read_eb_hdr(struct chfs_ebh *ebh, int pebnr,
181     struct chfs_eb_hdr *ebhdr)
182 {
183 	int ret;
184 	size_t retlen;
185 	off_t ofs;
186 
187 	KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
188 
189 	/* Read erase counter header from the first page. */
190 	ofs = pebnr * ebh->flash_if->erasesize;
191 	ret = flash_read(ebh->flash_dev,
192 	    ofs, CHFS_EB_EC_HDR_SIZE, &retlen,
193 	    (unsigned char *) &ebhdr->ec_hdr);
194 	if (ret || retlen != CHFS_EB_EC_HDR_SIZE)
195 		return ret;
196 
197 	/* Read NAND eraseblock header from the second page */
198 	ofs += ebh->flash_if->page_size;
199 	ret = flash_read(ebh->flash_dev,
200 	    ofs, CHFS_EB_HDR_NAND_SIZE, &retlen,
201 	    (unsigned char *) &ebhdr->u.nand_hdr);
202 	if (ret || retlen != CHFS_EB_HDR_NAND_SIZE)
203 		return ret;
204 
205 	return 0;
206 }
207 
208 /**
209  * nor_write_eb_hdr - write eraseblock header to NOR flash
210  *
211  * @ebh: chfs eraseblock handler
212  * @pebnr: eraseblock number whereto write
213  * @ebh: ebh to write
214  *
215  * Writes the eraseblock header to media.
216  * Returns zero in case of success, error code in case of fail.
217  */
218 int
219 nor_write_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr)
220 {
221 	int ret, crc;
222 	size_t retlen;
223 
224 	off_t ofs = pebnr * ebh->flash_if->erasesize + CHFS_EB_EC_HDR_SIZE;
225 
226 	ebhdr->u.nor_hdr.lid = ebhdr->u.nor_hdr.lid
227 	    | htole32(CHFS_LID_NOT_DIRTY_BIT);
228 
229 	crc = crc32(0, (uint8_t *)&ebhdr->u.nor_hdr + 4,
230 	    CHFS_EB_HDR_NOR_SIZE - 4);
231 	ebhdr->u.nand_hdr.crc = htole32(crc);
232 
233 	KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
234 
235 	ret = flash_write(ebh->flash_dev,
236 	    ofs, CHFS_EB_HDR_NOR_SIZE, &retlen,
237 	    (unsigned char *) &ebhdr->u.nor_hdr);
238 
239 	if (ret || retlen != CHFS_EB_HDR_NOR_SIZE)
240 		return ret;
241 
242 	return 0;
243 }
244 
245 /**
246  * nand_write_eb_hdr - write eraseblock header to NAND flash
247  *
248  * @ebh: chfs eraseblock handler
249  * @pebnr: eraseblock number whereto write
250  * @ebh: ebh to write
251  *
252  * Writes the eraseblock header to media.
253  * Returns zero in case of success, error code in case of fail.
254  */
255 int
256 nand_write_eb_hdr(struct chfs_ebh *ebh, int pebnr,
257     struct chfs_eb_hdr *ebhdr)
258 {
259 	int ret, crc;
260 	size_t retlen;
261 	flash_off_t ofs;
262 
263 	KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
264 
265 	ofs = pebnr * ebh->flash_if->erasesize +
266 	    ebh->flash_if->page_size;
267 
268 	ebhdr->u.nand_hdr.serial = htole64(++(*ebh->max_serial));
269 
270 	crc = crc32(0, (uint8_t *)&ebhdr->u.nand_hdr + 4,
271 	    CHFS_EB_HDR_NAND_SIZE - 4);
272 	ebhdr->u.nand_hdr.crc = htole32(crc);
273 
274 	ret = flash_write(ebh->flash_dev, ofs,
275 	    CHFS_EB_HDR_NAND_SIZE, &retlen,
276 	    (unsigned char *) &ebhdr->u.nand_hdr);
277 
278 	if (ret || retlen != CHFS_EB_HDR_NAND_SIZE)
279 		return ret;
280 
281 	return 0;
282 }
283 
284 /**
285  * nor_check_eb_hdr - check eraseblock header read from NOR flash
286  *
287  * @ebh: chfs eraseblock handler
288  * @buf: eraseblock header to check
289  *
290  * Returns eraseblock header status.
291  */
292 int
293 nor_check_eb_hdr(struct chfs_ebh *ebh, void *buf)
294 {
295 	uint32_t magic, crc, hdr_crc;
296 	struct chfs_eb_hdr *ebhdr = buf;
297 	le32 lid_save;
298 
299 	//check is there a header
300 	if (check_pattern((void *) &ebhdr->ec_hdr,
301 		0xFF, 0, CHFS_EB_EC_HDR_SIZE)) {
302 		dbg_ebh("no header found\n");
303 		return EBHDR_LEB_NO_HDR;
304 	}
305 
306 	// check magic
307 	magic = le32toh(ebhdr->ec_hdr.magic);
308 	if (magic != CHFS_MAGIC_BITMASK) {
309 		dbg_ebh("bad magic bitmask(exp: %x found %x)\n",
310 		    CHFS_MAGIC_BITMASK, magic);
311 		return EBHDR_LEB_BADMAGIC;
312 	}
313 
314 	// check CRC_EC
315 	hdr_crc = le32toh(ebhdr->ec_hdr.crc_ec);
316 	crc = crc32(0, (uint8_t *) &ebhdr->ec_hdr + 8, 4);
317 	if (hdr_crc != crc) {
318 		dbg_ebh("bad crc_ec found\n");
319 		return EBHDR_LEB_BADCRC;
320 	}
321 
322 	/* check if the PEB is free: magic, crc_ec and erase_cnt is good and
323 	 * everything else is FFF..
324 	 */
325 	if (check_pattern((void *) &ebhdr->u.nor_hdr, 0xFF, 0,
326 		CHFS_EB_HDR_NOR_SIZE)) {
327 		dbg_ebh("free peb found\n");
328 		return EBHDR_LEB_FREE;
329 	}
330 
331 	// check invalidated (CRC == LID == 0)
332 	if (ebhdr->u.nor_hdr.crc == 0 && ebhdr->u.nor_hdr.lid == 0) {
333 		dbg_ebh("invalidated ebhdr found\n");
334 		return EBHDR_LEB_INVALIDATED;
335 	}
336 
337 	// check CRC
338 	hdr_crc = le32toh(ebhdr->u.nor_hdr.crc);
339 	lid_save = ebhdr->u.nor_hdr.lid;
340 
341 	// mark lid as not dirty for crc calc
342 	ebhdr->u.nor_hdr.lid = ebhdr->u.nor_hdr.lid | htole32(
343 		CHFS_LID_NOT_DIRTY_BIT);
344 	crc = crc32(0, (uint8_t *) &ebhdr->u.nor_hdr + 4,
345 	    CHFS_EB_HDR_NOR_SIZE - 4);
346 	// restore the original lid value in ebh
347 	ebhdr->u.nor_hdr.lid = lid_save;
348 
349 	if (crc != hdr_crc) {
350 		dbg_ebh("bad crc found\n");
351 		return EBHDR_LEB_BADCRC;
352 	}
353 
354 	// check dirty
355 	if (!(le32toh(lid_save) & CHFS_LID_NOT_DIRTY_BIT)) {
356 		dbg_ebh("dirty ebhdr found\n");
357 		return EBHDR_LEB_DIRTY;
358 	}
359 
360 	return EBHDR_LEB_OK;
361 }
362 
363 /**
364  * nand_check_eb_hdr - check eraseblock header read from NAND flash
365  *
366  * @ebh: chfs eraseblock handler
367  * @buf: eraseblock header to check
368  *
369  * Returns eraseblock header status.
370  */
371 int
372 nand_check_eb_hdr(struct chfs_ebh *ebh, void *buf)
373 {
374 	uint32_t magic, crc, hdr_crc;
375 	struct chfs_eb_hdr *ebhdr = buf;
376 
377 	//check is there a header
378 	if (check_pattern((void *) &ebhdr->ec_hdr,
379 		0xFF, 0, CHFS_EB_EC_HDR_SIZE)) {
380 		dbg_ebh("no header found\n");
381 		return EBHDR_LEB_NO_HDR;
382 	}
383 
384 	// check magic
385 	magic = le32toh(ebhdr->ec_hdr.magic);
386 	if (magic != CHFS_MAGIC_BITMASK) {
387 		dbg_ebh("bad magic bitmask(exp: %x found %x)\n",
388 		    CHFS_MAGIC_BITMASK, magic);
389 		return EBHDR_LEB_BADMAGIC;
390 	}
391 
392 	// check CRC_EC
393 	hdr_crc = le32toh(ebhdr->ec_hdr.crc_ec);
394 	crc = crc32(0, (uint8_t *) &ebhdr->ec_hdr + 8, 4);
395 	if (hdr_crc != crc) {
396 		dbg_ebh("bad crc_ec found\n");
397 		return EBHDR_LEB_BADCRC;
398 	}
399 
400 	/* check if the PEB is free: magic, crc_ec and erase_cnt is good and
401 	 * everything else is FFF..
402 	 */
403 	if (check_pattern((void *) &ebhdr->u.nand_hdr, 0xFF, 0,
404 		CHFS_EB_HDR_NAND_SIZE)) {
405 		dbg_ebh("free peb found\n");
406 		return EBHDR_LEB_FREE;
407 	}
408 
409 	// check CRC
410 	hdr_crc = le32toh(ebhdr->u.nand_hdr.crc);
411 
412 	crc = crc32(0, (uint8_t *) &ebhdr->u.nand_hdr + 4,
413 	    CHFS_EB_HDR_NAND_SIZE - 4);
414 
415 	if (crc != hdr_crc) {
416 		dbg_ebh("bad crc found\n");
417 		return EBHDR_LEB_BADCRC;
418 	}
419 
420 	return EBHDR_LEB_OK;
421 }
422 
423 /**
424  * nor_mark_eb_hdr_dirty_flash- mark eraseblock header dirty on NOR flash
425  *
426  * @ebh: chfs eraseblock handler
427  * @pebnr: eraseblock number
428  * @lid: leb id (its bit number 31 will be set to 0)
429  *
430  * It pulls the CHFS_LID_NOT_DIRTY_BIT to zero on flash.
431  *
432  * Returns zero in case of success, error code in case of fail.
433  */
434 int
435 nor_mark_eb_hdr_dirty_flash(struct chfs_ebh *ebh, int pebnr, int lid)
436 {
437 	int ret;
438 	size_t retlen;
439 	off_t ofs;
440 
441 	/* mark leb id dirty */
442 	lid = htole32(lid & CHFS_LID_DIRTY_BIT_MASK);
443 
444 	/* calculate position */
445 	ofs = pebnr * ebh->flash_if->erasesize + CHFS_EB_EC_HDR_SIZE
446 	    + CHFS_GET_MEMBER_POS(struct chfs_nor_eb_hdr , lid);
447 
448 	ret = flash_write(ebh->flash_dev, ofs, sizeof(lid), &retlen,
449 	    (unsigned char *) &lid);
450 	if (ret || retlen != sizeof(lid)) {
451 		chfs_err("can't mark peb dirty");
452 		return ret;
453 	}
454 
455 	return 0;
456 }
457 
458 /**
459  * nor_invalidate_eb_hdr - invalidate eraseblock header on NOR flash
460  *
461  * @ebh: chfs eraseblock handler
462  * @pebnr: eraseblock number
463  *
464  * Sets crc and lip field to zero.
465  * Returns zero in case of success, error code in case of fail.
466  */
467 int
468 nor_invalidate_eb_hdr(struct chfs_ebh *ebh, int pebnr)
469 {
470 	int ret;
471 	size_t retlen;
472 	off_t ofs;
473 	char zero_buf[CHFS_INVALIDATE_SIZE];
474 
475 	/* fill with zero */
476 	memset(zero_buf, 0x0, CHFS_INVALIDATE_SIZE);
477 
478 	/* calculate position (!!! lid is directly behind crc !!!) */
479 	ofs = pebnr * ebh->flash_if->erasesize + CHFS_EB_EC_HDR_SIZE
480 	    + CHFS_GET_MEMBER_POS(struct chfs_nor_eb_hdr, crc);
481 
482 	ret = flash_write(ebh->flash_dev,
483 	    ofs, CHFS_INVALIDATE_SIZE, &retlen,
484 	    (unsigned char *) &zero_buf);
485 	if (ret || retlen != CHFS_INVALIDATE_SIZE) {
486 		chfs_err("can't invalidate peb");
487 		return ret;
488 	}
489 
490 	return 0;
491 }
492 
493 /**
494  * mark_eb_hdr_free - free eraseblock header on NOR or NAND flash
495  *
496  * @ebh: chfs eraseblock handler
497  * @pebnr: eraseblock number
498  * @ec: erase counter of PEB
499  *
500  * Write out the magic and erase counter to the physical eraseblock.
501  * Returns zero in case of success, error code in case of fail.
502  */
503 int
504 mark_eb_hdr_free(struct chfs_ebh *ebh, int pebnr, int ec)
505 {
506 	int ret, crc;
507 	size_t retlen;
508 	off_t ofs;
509 	struct chfs_eb_hdr *ebhdr;
510 	ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
511 
512 	ebhdr->ec_hdr.magic = htole32(CHFS_MAGIC_BITMASK);
513 	ebhdr->ec_hdr.erase_cnt = htole32(ec);
514 	crc = crc32(0, (uint8_t *) &ebhdr->ec_hdr + 8, 4);
515 	ebhdr->ec_hdr.crc_ec = htole32(crc);
516 
517 	ofs = pebnr * ebh->flash_if->erasesize;
518 
519 	KASSERT(sizeof(ebhdr->ec_hdr) == CHFS_EB_EC_HDR_SIZE);
520 
521 	ret = flash_write(ebh->flash_dev,
522 	    ofs, CHFS_EB_EC_HDR_SIZE, &retlen,
523 	    (unsigned char *) &ebhdr->ec_hdr);
524 
525 	if (ret || retlen != CHFS_EB_EC_HDR_SIZE) {
526 		chfs_err("can't mark peb as free: %d\n", pebnr);
527 		kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
528 		return ret;
529 	}
530 
531 	kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
532 	return 0;
533 }
534 
535 /*****************************************************************************/
536 /* End of Flash specific operations					     */
537 /*****************************************************************************/
538 
539 /*****************************************************************************/
540 /* Lock Tree								     */
541 /*****************************************************************************/
542 
543 int
544 ltree_entry_cmp(struct chfs_ltree_entry *le1,
545     struct chfs_ltree_entry *le2)
546 {
547 	return (le1->lnr - le2->lnr);
548 }
549 
550 /* Generate functions for Lock tree's red-black tree */
551 RB_PROTOTYPE( ltree_rbtree, chfs_ltree_entry, rb, ltree_entry_cmp);
552 RB_GENERATE( ltree_rbtree, chfs_ltree_entry, rb, ltree_entry_cmp);
553 
554 
555 /**
556  * ltree_lookup - looks up a logical eraseblock in the lock tree
557  * @ebh: chfs eraseblock handler
558  * @lid: identifier of the logical eraseblock
559  *
560  * This function returns a pointer to the wanted &struct chfs_ltree_entry
561  * if the logical eraseblock is in the lock tree, so it is locked, NULL
562  * otherwise.
563  * @ebh->ltree_lock has to be locked!
564  */
565 static struct chfs_ltree_entry *
566 ltree_lookup(struct chfs_ebh *ebh, int lnr)
567 {
568 	struct chfs_ltree_entry le, *result;
569 	le.lnr = lnr;
570 	result = RB_FIND(ltree_rbtree, &ebh->ltree, &le);
571 	return result;
572 }
573 
574 /**
575  * ltree_add_entry - add an entry to the lock tree
576  * @ebh: chfs eraseblock handler
577  * @lnr: identifier of the logical eraseblock
578  *
579  * This function adds a new logical eraseblock entry identified with @lnr to the
580  * lock tree. If the entry is already in the tree, it increases the user
581  * counter.
582  * Returns NULL if can not allocate memory for lock tree entry, or a pointer
583  * to the inserted entry otherwise.
584  */
585 static struct chfs_ltree_entry *
586 ltree_add_entry(struct chfs_ebh *ebh, int lnr)
587 {
588 	struct chfs_ltree_entry *le, *result;
589 
590 	le = kmem_alloc(sizeof(struct chfs_ltree_entry), KM_SLEEP);
591 
592 	le->lnr = lnr;
593 	le->users = 1;
594 	rw_init(&le->mutex);
595 
596 	//dbg_ebh("enter ltree lock\n");
597 	mutex_enter(&ebh->ltree_lock);
598 	//dbg_ebh("insert\n");
599 	result = RB_INSERT(ltree_rbtree, &ebh->ltree, le);
600 	//dbg_ebh("inserted\n");
601 	if (result) {
602 		//The entry is already in the tree
603 		result->users++;
604 		kmem_free(le, sizeof(struct chfs_ltree_entry));
605 	}
606 	else {
607 		result = le;
608 	}
609 	mutex_exit(&ebh->ltree_lock);
610 
611 	return result;
612 }
613 
614 /**
615  * leb_read_lock - lock a logical eraseblock for read
616  * @ebh: chfs eraseblock handler
617  * @lnr: identifier of the logical eraseblock
618  *
619  * Returns zero in case of success, error code in case of fail.
620  */
621 static int
622 leb_read_lock(struct chfs_ebh *ebh, int lnr)
623 {
624 	struct chfs_ltree_entry *le;
625 
626 	le = ltree_add_entry(ebh, lnr);
627 	if (!le)
628 		return ENOMEM;
629 
630 	rw_enter(&le->mutex, RW_READER);
631 	return 0;
632 }
633 
634 /**
635  * leb_read_unlock - unlock a logical eraseblock from read
636  * @ebh: chfs eraseblock handler
637  * @lnr: identifier of the logical eraseblock
638  *
639  * This function unlocks a logical eraseblock from read and delete it from the
640  * lock tree is there are no more users of it.
641  */
642 static void
643 leb_read_unlock(struct chfs_ebh *ebh, int lnr)
644 {
645 	struct chfs_ltree_entry *le;
646 
647 	mutex_enter(&ebh->ltree_lock);
648 	//dbg_ebh("LOCK: ebh->ltree_lock spin locked in leb_read_unlock()\n");
649 	le = ltree_lookup(ebh, lnr);
650 	if (!le)
651 		goto out;
652 
653 	le->users -= 1;
654 	KASSERT(le->users >= 0);
655 	rw_exit(&le->mutex);
656 	if (le->users == 0) {
657 		le = RB_REMOVE(ltree_rbtree, &ebh->ltree, le);
658 		if (le) {
659 			rw_destroy(&le->mutex);
660 
661 			kmem_free(le, sizeof(struct chfs_ltree_entry));
662 		}
663 	}
664 
665 out:
666 	mutex_exit(&ebh->ltree_lock);
667 	//dbg_ebh("UNLOCK: ebh->ltree_lock spin unlocked in leb_read_unlock()\n");
668 }
669 
670 /**
671  * leb_write_lock - lock a logical eraseblock for write
672  * @ebh: chfs eraseblock handler
673  * @lnr: identifier of the logical eraseblock
674  *
675  * Returns zero in case of success, error code in case of fail.
676  */
677 static int
678 leb_write_lock(struct chfs_ebh *ebh, int lnr)
679 {
680 	struct chfs_ltree_entry *le;
681 
682 	le = ltree_add_entry(ebh, lnr);
683 	if (!le)
684 		return ENOMEM;
685 
686 	rw_enter(&le->mutex, RW_WRITER);
687 	return 0;
688 }
689 
690 /**
691  * leb_write_unlock - unlock a logical eraseblock from write
692  * @ebh: chfs eraseblock handler
693  * @lnr: identifier of the logical eraseblock
694  *
695  * This function unlocks a logical eraseblock from write and delete it from the
696  * lock tree is there are no more users of it.
697  */
698 static void
699 leb_write_unlock(struct chfs_ebh *ebh, int lnr)
700 {
701 	struct chfs_ltree_entry *le;
702 
703 	mutex_enter(&ebh->ltree_lock);
704 	//dbg_ebh("LOCK: ebh->ltree_lock spin locked in leb_write_unlock()\n");
705 	le = ltree_lookup(ebh, lnr);
706 	if (!le)
707 		goto out;
708 
709 	le->users -= 1;
710 	KASSERT(le->users >= 0);
711 	rw_exit(&le->mutex);
712 	if (le->users == 0) {
713 		RB_REMOVE(ltree_rbtree, &ebh->ltree, le);
714 
715 		rw_destroy(&le->mutex);
716 
717 		kmem_free(le, sizeof(struct chfs_ltree_entry));
718 	}
719 
720 out:
721 	mutex_exit(&ebh->ltree_lock);
722 	//dbg_ebh("UNLOCK: ebh->ltree_lock spin unlocked in leb_write_unlock()\n");
723 }
724 
725 /*****************************************************************************/
726 /* End of Lock Tree							     */
727 /*****************************************************************************/
728 
729 /*****************************************************************************/
730 /* Erase related operations						     */
731 /*****************************************************************************/
732 
733 /**
734  * If the first argument is smaller than the second, the function
735  * returns a value smaller than zero. If they are equal, the function re-
736  * turns zero. Otherwise, it should return a value greater than zero.
737  */
738 int
739 peb_in_use_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2)
740 {
741 	return (peb1->pebnr - peb2->pebnr);
742 }
743 
744 int
745 peb_free_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2)
746 {
747 	int comp;
748 
749 	comp = peb1->erase_cnt - peb2->erase_cnt;
750 	if (0 == comp)
751 		comp = peb1->pebnr - peb2->pebnr;
752 
753 	return comp;
754 }
755 
756 /* Generate functions for in use PEB's red-black tree */
757 RB_PROTOTYPE(peb_in_use_rbtree, chfs_peb, u.rb, peb_in_use_cmp);
758 RB_GENERATE(peb_in_use_rbtree, chfs_peb, u.rb, peb_in_use_cmp);
759 RB_PROTOTYPE(peb_free_rbtree, chfs_peb, u.rb, peb_free_cmp);
760 RB_GENERATE(peb_free_rbtree, chfs_peb, u.rb, peb_free_cmp);
761 
762 /**
763  * add_peb_to_erase_queue: adds a PEB to to_erase/fully_erased queue
764  * @ebh - chfs eraseblock handler
765  * @pebnr - physical eraseblock's number
766  * @ec - erase counter of PEB
767  * @queue: the queue to add to
768  *
769  * This function adds a PEB to the erase queue specified by @queue.
770  * The @ebh->erase_lock must be locked before using this.
771  * Returns zero in case of success, error code in case of fail.
772  */
773 int
774 add_peb_to_erase_queue(struct chfs_ebh *ebh, int pebnr, int ec,
775     struct peb_queue *queue)
776 {
777 	struct chfs_peb *peb;
778 
779 	peb = kmem_alloc(sizeof(struct chfs_peb), KM_SLEEP);
780 
781 	peb->erase_cnt = ec;
782 	peb->pebnr = pebnr;
783 
784 	TAILQ_INSERT_TAIL(queue, peb, u.queue);
785 
786 	return 0;
787 
788 }
789 //TODO
790 /**
791  * find_peb_in_use - looks up a PEB in the RB-tree of used blocks
792  * @ebh - chfs eraseblock handler
793  *
794  * This function returns a pointer to the PEB found in the tree,
795  * NULL otherwise.
796  * The @ebh->erase_lock must be locked before using this.
797  */
798 struct chfs_peb *
799 find_peb_in_use(struct chfs_ebh *ebh, int pebnr)
800 {
801 	struct chfs_peb peb, *result;
802 	peb.pebnr = pebnr;
803 	result = RB_FIND(peb_in_use_rbtree, &ebh->in_use, &peb);
804 	return result;
805 }
806 
807 /**
808  * add_peb_to_free - adds a PEB to the RB-tree of free PEBs
809  * @ebh - chfs eraseblock handler
810  * @pebnr - physical eraseblock's number
811  * @ec - erase counter of PEB
812  *
813  *
814  * This function adds a physical eraseblock to the RB-tree of free PEBs
815  * stored in the @ebh. The key is the erase counter and pebnr.
816  * The @ebh->erase_lock must be locked before using this.
817  * Returns zero in case of success, error code in case of fail.
818  */
819 int
820 add_peb_to_free(struct chfs_ebh *ebh, int pebnr, int ec)
821 {
822 	struct chfs_peb *peb, *result;
823 
824 	peb = kmem_alloc(sizeof(struct chfs_peb), KM_SLEEP);
825 
826 	peb->erase_cnt = ec;
827 	peb->pebnr = pebnr;
828 	result = RB_INSERT(peb_free_rbtree, &ebh->free, peb);
829 	if (result) {
830 		kmem_free(peb, sizeof(struct chfs_peb));
831 		return 1;
832 	}
833 
834 	return 0;
835 }
836 
837 /**
838  * add_peb_to_in_use - adds a PEB to the RB-tree of used PEBs
839  * @ebh - chfs eraseblock handler
840  * @pebnr - physical eraseblock's number
841  * @ec - erase counter of PEB
842  *
843  *
844  * This function adds a physical eraseblock to the RB-tree of used PEBs
845  * stored in the @ebh. The key is pebnr.
846  * The @ebh->erase_lock must be locked before using this.
847  * Returns zero in case of success, error code in case of fail.
848  */
849 int
850 add_peb_to_in_use(struct chfs_ebh *ebh, int pebnr, int ec)
851 {
852 	struct chfs_peb *peb, *result;
853 
854 	peb = kmem_alloc(sizeof(struct chfs_peb), KM_SLEEP);
855 
856 	peb->erase_cnt = ec;
857 	peb->pebnr = pebnr;
858 	result = RB_INSERT(peb_in_use_rbtree, &ebh->in_use, peb);
859 	if (result) {
860 		kmem_free(peb, sizeof(struct chfs_peb));
861 		return 1;
862 	}
863 
864 	return 0;
865 }
866 
867 /**
868  * erase_callback - callback function for flash erase
869  * @ei: erase information
870  */
871 void
872 erase_callback(struct flash_erase_instruction *ei)
873 {
874 	int err;
875 	struct chfs_erase_info_priv *priv = (void *) ei->ei_priv;
876 	//dbg_ebh("ERASE_CALLBACK() CALLED\n");
877 	struct chfs_ebh *ebh = priv->ebh;
878 	struct chfs_peb *peb = priv->peb;
879 
880 	peb->erase_cnt += 1;
881 
882 	if (ei->ei_state == FLASH_ERASE_DONE) {
883 
884 		/* Write out erase counter */
885 		err = ebh->ops->mark_eb_hdr_free(ebh,
886 		    peb->pebnr, peb->erase_cnt);
887 		if (err) {
888 			/* cannot mark PEB as free,so erase it again */
889 			chfs_err(
890 				"cannot mark eraseblock as free, PEB: %d\n",
891 				peb->pebnr);
892 			mutex_enter(&ebh->erase_lock);
893 			/*dbg_ebh("LOCK: ebh->erase_lock spin locked in erase_callback() "
894 			  "after mark ebhdr free\n");*/
895 			add_peb_to_erase_queue(ebh, peb->pebnr, peb->erase_cnt,
896 			    &ebh->to_erase);
897 			mutex_exit(&ebh->erase_lock);
898 			/*dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_callback() "
899 			  "after mark ebhdr free\n");*/
900 			kmem_free(peb, sizeof(struct chfs_peb));
901 			return;
902 		}
903 
904 		mutex_enter(&ebh->erase_lock);
905 		/*dbg_ebh("LOCK: ebh->erase_lock spin locked in erase_callback()\n");*/
906 		err = add_peb_to_free(ebh, peb->pebnr, peb->erase_cnt);
907 		mutex_exit(&ebh->erase_lock);
908 		/*dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_callback()\n");*/
909 		kmem_free(peb, sizeof(struct chfs_peb));
910 	} else {
911 		/*
912 		 * Erase is finished, but there was a problem,
913 		 * so erase PEB again
914 		 */
915 		chfs_err("erase failed, state is: 0x%x\n", ei->ei_state);
916 		add_peb_to_erase_queue(ebh, peb->pebnr, peb->erase_cnt, &ebh->to_erase);
917 		kmem_free(peb, sizeof(struct chfs_peb));
918 	}
919 }
920 
921 /**
922  * free_peb: free a PEB
923  * @ebh: chfs eraseblock handler
924  *
925  * This function erases the first physical eraseblock from one of the erase
926  * lists and adds to the RB-tree of free PEBs.
927  * Returns zero in case of success, error code in case of fail.
928  */
929 int
930 free_peb(struct chfs_ebh *ebh)
931 {
932 	int err, retries = 0;
933 	off_t ofs;
934 	struct chfs_peb *peb = NULL;
935 	struct flash_erase_instruction *ei;
936 
937 	KASSERT(mutex_owned(&ebh->erase_lock));
938 
939 	if (!TAILQ_EMPTY(&ebh->fully_erased)) {
940 		//dbg_ebh("[FREE PEB] got a fully erased block\n");
941 		peb = TAILQ_FIRST(&ebh->fully_erased);
942 		TAILQ_REMOVE(&ebh->fully_erased, peb, u.queue);
943 		err = ebh->ops->mark_eb_hdr_free(ebh,
944 		    peb->pebnr, peb->erase_cnt);
945 		if (err) {
946 			goto out_free;
947 		}
948 		err = add_peb_to_free(ebh, peb->pebnr, peb->erase_cnt);
949 		goto out_free;
950 	}
951 	/* Erase PEB */
952 	//dbg_ebh("[FREE PEB] erasing a block\n");
953 	peb = TAILQ_FIRST(&ebh->to_erase);
954 	TAILQ_REMOVE(&ebh->to_erase, peb, u.queue);
955 	mutex_exit(&ebh->erase_lock);
956 	//dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in free_peb()\n");
957 	ofs = peb->pebnr * ebh->flash_if->erasesize;
958 
959 	/* XXX where do we free this? */
960 	ei = kmem_alloc(sizeof(struct flash_erase_instruction)
961 	    + sizeof(struct chfs_erase_info_priv), KM_SLEEP);
962 retry:
963 	memset(ei, 0, sizeof(*ei));
964 
965 //	ei->ei_if = ebh->flash_if;
966 	ei->ei_addr = ofs;
967 	ei->ei_len = ebh->flash_if->erasesize;
968 	ei->ei_callback = erase_callback;
969 	ei->ei_priv = (unsigned long) (&ei[1]);
970 
971 	((struct chfs_erase_info_priv *) ei->ei_priv)->ebh = ebh;
972 	((struct chfs_erase_info_priv *) ei->ei_priv)->peb = peb;
973 
974 	err = flash_erase(ebh->flash_dev, ei);
975 	dbg_ebh("erased peb: %d\n", peb->pebnr);
976 
977 	/* einval would mean we did something wrong */
978 	KASSERT(err != EINVAL);
979 
980 	if (err) {
981 		dbg_ebh("errno: %d, ei->ei_state: %d\n", err, ei->ei_state);
982 		if (CHFS_MAX_GET_PEB_RETRIES < ++retries &&
983 		    ei->ei_state == FLASH_ERASE_FAILED) {
984 			/* The block went bad mark it */
985 			dbg_ebh("ebh markbad! 0x%jx\n", (uintmax_t )ofs);
986 			err = flash_block_markbad(ebh->flash_dev, ofs);
987 			if (!err) {
988 				ebh->peb_nr--;
989 			}
990 
991 			goto out;
992 		}
993 		chfs_err("can not erase PEB: %d, try again\n", peb->pebnr);
994 		goto retry;
995 	}
996 
997 out:
998 	/* lock the erase_lock, because it was locked
999 	 * when the function was called */
1000 	mutex_enter(&ebh->erase_lock);
1001 	return err;
1002 
1003 out_free:
1004 	kmem_free(peb, sizeof(struct chfs_peb));
1005 	return err;
1006 }
1007 
1008 /**
1009  * release_peb - schedule an erase for the PEB
1010  * @ebh: chfs eraseblock handler
1011  * @pebnr: physical eraseblock number
1012  *
1013  * This function get the peb identified by @pebnr from the in_use RB-tree of
1014  * @ebh, removes it and schedule an erase for it.
1015  *
1016  * Returns zero on success, error code in case of fail.
1017  */
1018 int
1019 release_peb(struct chfs_ebh *ebh, int pebnr)
1020 {
1021 	int err = 0;
1022 	struct chfs_peb *peb;
1023 
1024 	mutex_enter(&ebh->erase_lock);
1025 
1026 	//dbg_ebh("LOCK: ebh->erase_lock spin locked in release_peb()\n");
1027 	peb = find_peb_in_use(ebh, pebnr);
1028 	if (!peb) {
1029 		chfs_err("LEB is mapped, but is not in the 'in_use' "
1030 		    "tree of ebh\n");
1031 		goto out_unlock;
1032 	}
1033 	err = add_peb_to_erase_queue(ebh, peb->pebnr, peb->erase_cnt,
1034 	    &ebh->to_erase);
1035 
1036 	if (err)
1037 		goto out_unlock;
1038 
1039 	RB_REMOVE(peb_in_use_rbtree, &ebh->in_use, peb);
1040 out_unlock:
1041 	mutex_exit(&ebh->erase_lock);
1042 	//dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in release_peb()"
1043 	//		" at out_unlock\n");
1044 	return err;
1045 }
1046 
1047 /**
1048  * erase_thread - background thread for erasing PEBs
1049  * @data: pointer to the eraseblock handler
1050  */
1051 /*void
1052   erase_thread(void *data)
1053   {
1054   struct chfs_ebh *ebh = data;
1055 
1056   dbg_ebh("erase thread started\n");
1057   while (ebh->bg_erase.eth_running) {
1058   int err;
1059 
1060   mutex_enter(&ebh->erase_lock);
1061   dbg_ebh("LOCK: ebh->erase_lock spin locked in erase_thread()\n");
1062   if (TAILQ_EMPTY(&ebh->to_erase) && TAILQ_EMPTY(&ebh->fully_erased)) {
1063   dbg_ebh("thread has nothing to do\n");
1064   mutex_exit(&ebh->erase_lock);
1065   mutex_enter(&ebh->bg_erase.eth_thread_mtx);
1066   cv_timedwait_sig(&ebh->bg_erase.eth_wakeup,
1067   &ebh->bg_erase.eth_thread_mtx, mstohz(100));
1068   mutex_exit(&ebh->bg_erase.eth_thread_mtx);
1069 
1070   dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_thread()\n");
1071   continue;
1072   }
1073   mutex_exit(&ebh->erase_lock);
1074   dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_thread()\n");
1075 
1076   err = free_peb(ebh);
1077   if (err)
1078   chfs_err("freeing PEB failed in the background thread: %d\n", err);
1079 
1080   }
1081   dbg_ebh("erase thread stopped\n");
1082   kthread_exit(0);
1083   }*/
1084 
1085 /**
1086  * erase_thread - background thread for erasing PEBs
1087  * @data: pointer to the eraseblock handler
1088  */
1089 void
1090 erase_thread(void *data) {
1091 	dbg_ebh("[EBH THREAD] erase thread started\n");
1092 
1093 	struct chfs_ebh *ebh = data;
1094 	int err;
1095 
1096 	mutex_enter(&ebh->erase_lock);
1097 	while (ebh->bg_erase.eth_running) {
1098 		if (TAILQ_EMPTY(&ebh->to_erase) &&
1099 		    TAILQ_EMPTY(&ebh->fully_erased)) {
1100 			cv_timedwait_sig(&ebh->bg_erase.eth_wakeup,
1101 			    &ebh->erase_lock, mstohz(100));
1102 		} else {
1103 			/* XXX exiting this mutex is a bit odd here as
1104 			 * free_peb instantly reenters it...
1105 			 */
1106 			err = free_peb(ebh);
1107 			mutex_exit(&ebh->erase_lock);
1108 			if (err) {
1109 				chfs_err("freeing PEB failed in the"
1110 				    " background thread: %d\n", err);
1111 			}
1112 			mutex_enter(&ebh->erase_lock);
1113 		}
1114 	}
1115 	mutex_exit(&ebh->erase_lock);
1116 
1117 	dbg_ebh("[EBH THREAD] erase thread stopped\n");
1118 	kthread_exit(0);
1119 }
1120 
1121 /**
1122  * erase_thread_start - init and start erase thread
1123  * @ebh: eraseblock handler
1124  */
1125 static void
1126 erase_thread_start(struct chfs_ebh *ebh)
1127 {
1128 	cv_init(&ebh->bg_erase.eth_wakeup, "ebheracv");
1129 
1130 	ebh->bg_erase.eth_running = true;
1131 	kthread_create(PRI_NONE, KTHREAD_MPSAFE | KTHREAD_MUSTJOIN, NULL,
1132 	    erase_thread, ebh, &ebh->bg_erase.eth_thread, "ebherase");
1133 }
1134 
1135 /**
1136  * erase_thread_stop - stop background erase thread
1137  * @ebh: eraseblock handler
1138  */
1139 static void
1140 erase_thread_stop(struct chfs_ebh *ebh)
1141 {
1142 	ebh->bg_erase.eth_running = false;
1143 	cv_signal(&ebh->bg_erase.eth_wakeup);
1144 	dbg_ebh("[EBH THREAD STOP] signaled\n");
1145 
1146 	kthread_join(ebh->bg_erase.eth_thread);
1147 #ifdef BROKEN_KTH_JOIN
1148 	kpause("chfsebhjointh", false, mstohz(1000), NULL);
1149 #endif
1150 
1151 	cv_destroy(&ebh->bg_erase.eth_wakeup);
1152 }
1153 
1154 /*****************************************************************************/
1155 /* End of Erase related operations					     */
1156 /*****************************************************************************/
1157 
1158 /*****************************************************************************/
1159 /* Scan related operations						     */
1160 /*****************************************************************************/
1161 int
1162 scan_leb_used_cmp(struct chfs_scan_leb *sleb1, struct chfs_scan_leb *sleb2)
1163 {
1164 	return (sleb1->lnr - sleb2->lnr);
1165 }
1166 
1167 RB_PROTOTYPE(scan_leb_used_rbtree, chfs_scan_leb, u.rb, scan_leb_used_cmp);
1168 RB_GENERATE(scan_leb_used_rbtree, chfs_scan_leb, u.rb, scan_leb_used_cmp);
1169 
1170 /**
1171  * scan_add_to_queue - adds a physical eraseblock to one of the
1172  *                     eraseblock queue
1173  * @si: chfs scanning information
1174  * @pebnr: physical eraseblock number
1175  * @erase_cnt: erase counter of the physical eraseblock
1176  * @list: the list to add to
1177  *
1178  * This function adds a physical eraseblock to one of the lists in the scanning
1179  * information.
1180  * Returns zero in case of success, negative error code in case of fail.
1181  */
1182 static int
1183 scan_add_to_queue(struct chfs_scan_info *si, int pebnr, int erase_cnt,
1184     struct scan_leb_queue *queue)
1185 {
1186 	struct chfs_scan_leb *sleb;
1187 
1188 	sleb = kmem_alloc(sizeof(struct chfs_scan_leb), KM_SLEEP);
1189 
1190 	sleb->pebnr = pebnr;
1191 	sleb->erase_cnt = erase_cnt;
1192 	TAILQ_INSERT_TAIL(queue, sleb, u.queue);
1193 	return 0;
1194 }
1195 
1196 /*
1197  * nor_scan_add_to_used - add a physical eraseblock to the
1198  *                        used tree of scan info
1199  * @ebh: chfs eraseblock handler
1200  * @si: chfs scanning information
1201  * @ebhdr: eraseblock header
1202  * @pebnr: physical eraseblock number
1203  * @leb_status: the status of the PEB's eraseblock header
1204  *
1205  * This function adds a PEB to the used tree of the scanning information.
1206  * It handles the situations if there are more physical eraseblock referencing
1207  * to the same logical eraseblock.
1208  * Returns zero in case of success, error code in case of fail.
1209  */
1210 int
1211 nor_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1212     struct chfs_eb_hdr *ebhdr, int pebnr, int leb_status)
1213 {
1214 	int err, lnr, ec;
1215 	struct chfs_scan_leb *sleb, *old;
1216 
1217 	lnr = CHFS_GET_LID(ebhdr->u.nor_hdr.lid);
1218 	ec = le32toh(ebhdr->ec_hdr.erase_cnt);
1219 
1220 	sleb = kmem_alloc(sizeof(struct chfs_scan_leb), KM_SLEEP);
1221 
1222 	sleb->erase_cnt = ec;
1223 	sleb->lnr = lnr;
1224 	sleb->pebnr = pebnr;
1225 	sleb->info = leb_status;
1226 
1227 	old = RB_INSERT(scan_leb_used_rbtree, &si->used, sleb);
1228 	if (old) {
1229 		kmem_free(sleb, sizeof(struct chfs_scan_leb));
1230 		/* There is already an eraseblock in the used tree */
1231 		/* If the new one is bad */
1232 		if (EBHDR_LEB_DIRTY == leb_status &&
1233 		    EBHDR_LEB_OK == old->info) {
1234 			return scan_add_to_queue(si, pebnr, ec, &si->erase);
1235 		} else {
1236 			err = scan_add_to_queue(si, old->pebnr,
1237 			    old->erase_cnt, &si->erase);
1238 			if (err) {
1239 				return err;
1240 			}
1241 
1242 			old->erase_cnt = ec;
1243 			old->lnr = lnr;
1244 			old->pebnr = pebnr;
1245 			old->info = leb_status;
1246 			return 0;
1247 		}
1248 	}
1249 	return 0;
1250 }
1251 
1252 /**
1253  * nor_process eb -read the headers from NOR flash, check them and add to
1254  * 				   the scanning information
1255  * @ebh: chfs eraseblock handler
1256  * @si: chfs scanning information
1257  * @pebnr: physical eraseblock number
1258  *
1259  * Returns zero in case of success, error code in case of fail.
1260  */
1261 int
1262 nor_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1263     int pebnr, struct chfs_eb_hdr *ebhdr)
1264 {
1265 	int err, erase_cnt, leb_status;
1266 
1267 	err = ebh->ops->read_eb_hdr(ebh, pebnr, ebhdr);
1268 	if (err)
1269 		return err;
1270 
1271 	erase_cnt = le32toh(ebhdr->ec_hdr.erase_cnt);
1272 	dbg_ebh("erase_cnt: %d\n", erase_cnt);
1273 	leb_status = ebh->ops->check_eb_hdr(ebh, ebhdr);
1274 	if (EBHDR_LEB_BADMAGIC == leb_status ||
1275 	    EBHDR_LEB_BADCRC == leb_status) {
1276 		err = scan_add_to_queue(si, pebnr, erase_cnt, &si->corrupted);
1277 		return err;
1278 	}
1279 	else if (EBHDR_LEB_FREE == leb_status) {
1280 		err = scan_add_to_queue(si, pebnr, erase_cnt, &si->free);
1281 		goto count_mean;
1282 	}
1283 	else if (EBHDR_LEB_NO_HDR == leb_status) {
1284 		err = scan_add_to_queue(si, pebnr, erase_cnt, &si->erased);
1285 		return err;
1286 	}
1287 	else if (EBHDR_LEB_INVALIDATED == leb_status) {
1288 		err = scan_add_to_queue(si, pebnr, erase_cnt, &si->erase);
1289 		return err;
1290 	}
1291 
1292 	err = nor_scan_add_to_used(ebh, si, ebhdr, pebnr, leb_status);
1293 	if (err)
1294 		return err;
1295 
1296 
1297 count_mean:
1298 	si->sum_of_ec += erase_cnt;
1299 	si->num_of_eb++;
1300 
1301 	return err;
1302 }
1303 
1304 /*
1305  * nand_scan_add_to_used - add a physical eraseblock to the
1306  *                         used tree of scan info
1307  * @ebh: chfs eraseblock handler
1308  * @si: chfs scanning information
1309  * @ebhdr: eraseblock header
1310  * @pebnr: physical eraseblock number
1311  * @leb_status: the status of the PEB's eraseblock header
1312  *
1313  * This function adds a PEB to the used tree of the scanning information.
1314  * It handles the situations if there are more physical eraseblock referencing
1315  * to the same logical eraseblock.
1316  * Returns zero in case of success, error code in case of fail.
1317  */
1318 int
1319 nand_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1320     struct chfs_eb_hdr *ebhdr, int pebnr)
1321 {
1322 	int err, lnr, ec;
1323 	struct chfs_scan_leb *sleb, *old;
1324 	uint64_t serial = le64toh(ebhdr->u.nand_hdr.serial);
1325 
1326 	lnr = CHFS_GET_LID(ebhdr->u.nor_hdr.lid);
1327 	ec = le32toh(ebhdr->ec_hdr.erase_cnt);
1328 
1329 	sleb = kmem_alloc(sizeof(struct chfs_scan_leb), KM_SLEEP);
1330 
1331 	sleb->erase_cnt = ec;
1332 	sleb->lnr = lnr;
1333 	sleb->pebnr = pebnr;
1334 	sleb->info = serial;
1335 
1336 	old = RB_INSERT(scan_leb_used_rbtree, &si->used, sleb);
1337 	if (old) {
1338 		kmem_free(sleb, sizeof(struct chfs_scan_leb));
1339 		/* There is already an eraseblock in the used tree */
1340 		/* If the new one is bad */
1341 		if (serial < old->info)
1342 			return scan_add_to_queue(si, pebnr, ec, &si->erase);
1343 		else {
1344 			err = scan_add_to_queue(si,
1345 			    old->pebnr, old->erase_cnt, &si->erase);
1346 			if (err)
1347 				return err;
1348 
1349 			old->erase_cnt = ec;
1350 			old->lnr = lnr;
1351 			old->pebnr = pebnr;
1352 			old->info = serial;
1353 			return 0;
1354 		}
1355 	}
1356 	return 0;
1357 }
1358 
1359 /**
1360  * nand_process eb -read the headers from NAND flash, check them and add to the
1361  * 					scanning information
1362  * @ebh: chfs eraseblock handler
1363  * @si: chfs scanning information
1364  * @pebnr: physical eraseblock number
1365  *
1366  * Returns zero in case of success, error code in case of fail.
1367  */
1368 int
1369 nand_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1370     int pebnr, struct chfs_eb_hdr *ebhdr)
1371 {
1372 	int err, erase_cnt, leb_status;
1373 	uint64_t max_serial;
1374 	/* isbad() is defined on some ancient platforms, heh */
1375 	bool is_bad;
1376 
1377 	/* Check block is bad */
1378 	err = flash_block_isbad(ebh->flash_dev,
1379 	    pebnr * ebh->flash_if->erasesize, &is_bad);
1380 	if (err) {
1381 		chfs_err("checking block is bad failed\n");
1382 		return err;
1383 	}
1384 	if (is_bad) {
1385 		si->bad_peb_cnt++;
1386 		return 0;
1387 	}
1388 
1389 	err = ebh->ops->read_eb_hdr(ebh, pebnr, ebhdr);
1390 	if (err)
1391 		return err;
1392 
1393 	erase_cnt = le32toh(ebhdr->ec_hdr.erase_cnt);
1394 	leb_status = ebh->ops->check_eb_hdr(ebh, ebhdr);
1395 	if (EBHDR_LEB_BADMAGIC == leb_status ||
1396 	    EBHDR_LEB_BADCRC == leb_status) {
1397 		err = scan_add_to_queue(si, pebnr, erase_cnt, &si->corrupted);
1398 		return err;
1399 	}
1400 	else if (EBHDR_LEB_FREE == leb_status) {
1401 		err = scan_add_to_queue(si, pebnr, erase_cnt, &si->free);
1402 		goto count_mean;
1403 	}
1404 	else if (EBHDR_LEB_NO_HDR == leb_status) {
1405 		err = scan_add_to_queue(si, pebnr, erase_cnt, &si->erased);
1406 		return err;
1407 	}
1408 
1409 	err = nand_scan_add_to_used(ebh, si, ebhdr, pebnr);
1410 	if (err)
1411 		return err;
1412 
1413 	max_serial = le64toh(ebhdr->u.nand_hdr.serial);
1414 	if (max_serial > *ebh->max_serial) {
1415 		*ebh->max_serial = max_serial;
1416 	}
1417 
1418 count_mean:
1419 	si->sum_of_ec += erase_cnt;
1420 	si->num_of_eb++;
1421 
1422 	return err;
1423 }
1424 
1425 /**
1426  * chfs_scan - scans the media and returns information about it
1427  * @ebh: chfs eraseblock handler
1428  *
1429  * This function scans through the media and returns information about it or if
1430  * it fails NULL will be returned.
1431  */
1432 struct chfs_scan_info *
1433 chfs_scan(struct chfs_ebh *ebh)
1434 {
1435 	struct chfs_scan_info *si;
1436 	struct chfs_eb_hdr *ebhdr;
1437 	int pebnr, err;
1438 
1439 	si = kmem_alloc(sizeof(*si), KM_SLEEP);
1440 
1441 	TAILQ_INIT(&si->corrupted);
1442 	TAILQ_INIT(&si->free);
1443 	TAILQ_INIT(&si->erase);
1444 	TAILQ_INIT(&si->erased);
1445 	RB_INIT(&si->used);
1446 	si->bad_peb_cnt = 0;
1447 	si->num_of_eb = 0;
1448 	si->sum_of_ec = 0;
1449 
1450 	ebhdr = kmem_alloc(sizeof(*ebhdr), KM_SLEEP);
1451 
1452 	for (pebnr = 0; pebnr < ebh->peb_nr; pebnr++) {
1453 		dbg_ebh("processing PEB %d\n", pebnr);
1454 		err = ebh->ops->process_eb(ebh, si, pebnr, ebhdr);
1455 		if (err < 0)
1456 			goto out_ebhdr;
1457 	}
1458 	kmem_free(ebhdr, sizeof(*ebhdr));
1459 	dbg_ebh("[CHFS_SCAN] scanning information collected\n");
1460 	return si;
1461 
1462 out_ebhdr:
1463 	kmem_free(ebhdr, sizeof(*ebhdr));
1464 	kmem_free(si, sizeof(*si));
1465 	return NULL;
1466 }
1467 
1468 /**
1469  * scan_info_destroy - frees all lists and trees in the scanning information
1470  * @si: the scanning information
1471  */
1472 void
1473 scan_info_destroy(struct chfs_scan_info *si)
1474 {
1475 	EBH_QUEUE_DESTROY(&si->corrupted,
1476 	    struct chfs_scan_leb, u.queue);
1477 
1478 	EBH_QUEUE_DESTROY(&si->erase,
1479 	    struct chfs_scan_leb, u.queue);
1480 
1481 	EBH_QUEUE_DESTROY(&si->erased,
1482 	    struct chfs_scan_leb, u.queue);
1483 
1484 	EBH_QUEUE_DESTROY(&si->free,
1485 	    struct chfs_scan_leb, u.queue);
1486 
1487 	EBH_TREE_DESTROY(scan_leb_used_rbtree,
1488 	    &si->used, struct chfs_scan_leb);
1489 
1490 	kmem_free(si, sizeof(*si));
1491 	dbg_ebh("[SCAN_INFO_DESTROY] scanning information destroyed\n");
1492 }
1493 
1494 /**
1495  * scan_media - scan media
1496  *
1497  * @ebh - chfs eraseblock handler
1498  *
1499  * Returns zero in case of success, error code in case of fail.
1500  */
1501 
1502 int
1503 scan_media(struct chfs_ebh *ebh)
1504 {
1505 	int err, i, avg_ec;
1506 	struct chfs_scan_info *si;
1507 	struct chfs_scan_leb *sleb;
1508 
1509 	si = chfs_scan(ebh);
1510 	/*
1511 	 * Process the scan info, manage the eraseblock lists
1512 	 */
1513 	mutex_init(&ebh->ltree_lock, MUTEX_DEFAULT, IPL_NONE);
1514 	mutex_init(&ebh->erase_lock, MUTEX_DEFAULT, IPL_NONE);
1515 	RB_INIT(&ebh->ltree);
1516 	RB_INIT(&ebh->free);
1517 	RB_INIT(&ebh->in_use);
1518 	TAILQ_INIT(&ebh->to_erase);
1519 	TAILQ_INIT(&ebh->fully_erased);
1520 	mutex_init(&ebh->alc_mutex, MUTEX_DEFAULT, IPL_NONE);
1521 
1522 	ebh->peb_nr -= si->bad_peb_cnt;
1523 
1524 	/*
1525 	 * Create background thread for erasing
1526 	 */
1527 	erase_thread_start(ebh);
1528 
1529 	ebh->lmap = kmem_alloc(ebh->peb_nr * sizeof(int), KM_SLEEP);
1530 
1531 	for (i = 0; i < ebh->peb_nr; i++) {
1532 		ebh->lmap[i] = EBH_LEB_UNMAPPED;
1533 	}
1534 
1535 	if (si->num_of_eb == 0) {
1536 		/* The flash contains no data. */
1537 		avg_ec = 0;
1538 	}
1539 	else {
1540 		avg_ec = (int) (si->sum_of_ec / si->num_of_eb);
1541 	}
1542 	dbg_ebh("num_of_eb: %d\n", si->num_of_eb);
1543 
1544 	mutex_enter(&ebh->erase_lock);
1545 
1546 	RB_FOREACH(sleb, scan_leb_used_rbtree, &si->used) {
1547 		ebh->lmap[sleb->lnr] = sleb->pebnr;
1548 		err = add_peb_to_in_use(ebh, sleb->pebnr, sleb->erase_cnt);
1549 		if (err)
1550 			goto out_free;
1551 	}
1552 
1553 	TAILQ_FOREACH(sleb, &si->erased, u.queue) {
1554 		err = add_peb_to_erase_queue(ebh, sleb->pebnr, avg_ec,
1555 		    &ebh->fully_erased);
1556 		if (err)
1557 			goto out_free;
1558 	}
1559 
1560 	TAILQ_FOREACH(sleb, &si->erase, u.queue) {
1561 		err = add_peb_to_erase_queue(ebh, sleb->pebnr, avg_ec,
1562 		    &ebh->to_erase);
1563 		if (err)
1564 			goto out_free;
1565 	}
1566 
1567 	TAILQ_FOREACH(sleb, &si->free, u.queue) {
1568 		err = add_peb_to_free(ebh, sleb->pebnr, sleb->erase_cnt);
1569 		if (err)
1570 			goto out_free;
1571 	}
1572 
1573 	TAILQ_FOREACH(sleb, &si->corrupted, u.queue) {
1574 		err = add_peb_to_erase_queue(ebh, sleb->pebnr, avg_ec,
1575 		    &ebh->to_erase);
1576 		if (err)
1577 			goto out_free;
1578 	}
1579 	mutex_exit(&ebh->erase_lock);
1580 	scan_info_destroy(si);
1581 	return 0;
1582 
1583 out_free:
1584 	mutex_exit(&ebh->erase_lock);
1585 	kmem_free(ebh->lmap, ebh->peb_nr * sizeof(int));
1586 	scan_info_destroy(si);
1587 	dbg_ebh("[SCAN_MEDIA] returning with error: %d\n", err);
1588 	return err;
1589 }
1590 
1591 /*****************************************************************************/
1592 /* End of Scan related operations					     */
1593 /*****************************************************************************/
1594 
1595 /**
1596  * ebh_open - opens mtd device and init eraseblock header
1597  * @ebh: eraseblock handler
1598  * @flash_nr: flash device number to use
1599  *
1600  * Returns zero in case of success, error code in case of fail.
1601  */
1602 int
1603 ebh_open(struct chfs_ebh *ebh, dev_t dev)
1604 {
1605 	int err;
1606 
1607 	ebh->flash_dev = flash_get_device(dev);
1608 	if (!ebh->flash_dev) {
1609 		aprint_error("ebh_open: cant get flash device\n");
1610 		return ENODEV;
1611 	}
1612 
1613 	ebh->flash_if = flash_get_interface(dev);
1614 	if (!ebh->flash_if) {
1615 		aprint_error("ebh_open: cant get flash interface\n");
1616 		return ENODEV;
1617 	}
1618 
1619 	ebh->flash_size = flash_get_size(dev);
1620 	ebh->peb_nr = ebh->flash_size / ebh->flash_if->erasesize;
1621 //	ebh->peb_nr = ebh->flash_if->size / ebh->flash_if->erasesize;
1622 	/* Set up flash operations based on flash type */
1623 	ebh->ops = kmem_alloc(sizeof(struct chfs_ebh_ops), KM_SLEEP);
1624 
1625 	switch (ebh->flash_if->type) {
1626 	case FLASH_TYPE_NOR:
1627 		ebh->eb_size = ebh->flash_if->erasesize -
1628 		    CHFS_EB_EC_HDR_SIZE - CHFS_EB_HDR_NOR_SIZE;
1629 
1630 		ebh->ops->read_eb_hdr = nor_read_eb_hdr;
1631 		ebh->ops->write_eb_hdr = nor_write_eb_hdr;
1632 		ebh->ops->check_eb_hdr = nor_check_eb_hdr;
1633 		ebh->ops->mark_eb_hdr_dirty_flash =
1634 		    nor_mark_eb_hdr_dirty_flash;
1635 		ebh->ops->invalidate_eb_hdr = nor_invalidate_eb_hdr;
1636 		ebh->ops->mark_eb_hdr_free = mark_eb_hdr_free;
1637 
1638 		ebh->ops->process_eb = nor_process_eb;
1639 
1640 		ebh->ops->create_eb_hdr = nor_create_eb_hdr;
1641 		ebh->ops->calc_data_offs = nor_calc_data_offs;
1642 
1643 		ebh->max_serial = NULL;
1644 		break;
1645 	case FLASH_TYPE_NAND:
1646 		ebh->eb_size = ebh->flash_if->erasesize -
1647 		    2 * ebh->flash_if->page_size;
1648 
1649 		ebh->ops->read_eb_hdr = nand_read_eb_hdr;
1650 		ebh->ops->write_eb_hdr = nand_write_eb_hdr;
1651 		ebh->ops->check_eb_hdr = nand_check_eb_hdr;
1652 		ebh->ops->mark_eb_hdr_free = mark_eb_hdr_free;
1653 		ebh->ops->mark_eb_hdr_dirty_flash = NULL;
1654 		ebh->ops->invalidate_eb_hdr = NULL;
1655 
1656 		ebh->ops->process_eb = nand_process_eb;
1657 
1658 		ebh->ops->create_eb_hdr = nand_create_eb_hdr;
1659 		ebh->ops->calc_data_offs = nand_calc_data_offs;
1660 
1661 		ebh->max_serial = kmem_alloc(sizeof(uint64_t), KM_SLEEP);
1662 
1663 		*ebh->max_serial = 0;
1664 		break;
1665 	default:
1666 		return 1;
1667 	}
1668 	printf("opening ebh: eb_size: %zu\n", ebh->eb_size);
1669 	err = scan_media(ebh);
1670 	if (err) {
1671 		dbg_ebh("Scan failed.");
1672 		kmem_free(ebh->ops, sizeof(struct chfs_ebh_ops));
1673 		kmem_free(ebh, sizeof(struct chfs_ebh));
1674 		return err;
1675 	}
1676 	return 0;
1677 }
1678 
1679 /**
1680  * ebh_close - close ebh
1681  * @ebh: eraseblock handler
1682  * Returns zero in case of success, error code in case of fail.
1683  */
1684 int
1685 ebh_close(struct chfs_ebh *ebh)
1686 {
1687 	erase_thread_stop(ebh);
1688 
1689 	EBH_TREE_DESTROY(peb_free_rbtree, &ebh->free, struct chfs_peb);
1690 	EBH_TREE_DESTROY(peb_in_use_rbtree, &ebh->in_use, struct chfs_peb);
1691 
1692 	EBH_QUEUE_DESTROY(&ebh->fully_erased, struct chfs_peb, u.queue);
1693 	EBH_QUEUE_DESTROY(&ebh->to_erase, struct chfs_peb, u.queue);
1694 
1695 	/* XXX HACK, see ebh.h */
1696 	EBH_TREE_DESTROY_MUTEX(ltree_rbtree, &ebh->ltree,
1697 	    struct chfs_ltree_entry);
1698 
1699 	KASSERT(!mutex_owned(&ebh->ltree_lock));
1700 	KASSERT(!mutex_owned(&ebh->alc_mutex));
1701 	KASSERT(!mutex_owned(&ebh->erase_lock));
1702 
1703 	mutex_destroy(&ebh->ltree_lock);
1704 	mutex_destroy(&ebh->alc_mutex);
1705 	mutex_destroy(&ebh->erase_lock);
1706 
1707 	kmem_free(ebh->ops, sizeof(struct chfs_ebh_ops));
1708 	kmem_free(ebh, sizeof(struct chfs_ebh));
1709 
1710 	return 0;
1711 }
1712 
1713 /**
1714  * ebh_read_leb - read data from leb
1715  * @ebh: eraseblock handler
1716  * @lnr: logical eraseblock number
1717  * @buf: buffer to read to
1718  * @offset: offset from where to read
1719  * @len: bytes number to read
1720  *
1721  * Returns zero in case of success, error code in case of fail.
1722  */
1723 int
1724 ebh_read_leb(struct chfs_ebh *ebh, int lnr, char *buf, uint32_t offset,
1725     size_t len, size_t *retlen)
1726 {
1727 	int err, pebnr;
1728 	off_t data_offset;
1729 
1730 	KASSERT(offset + len <= ebh->eb_size);
1731 
1732 	err = leb_read_lock(ebh, lnr);
1733 	if (err)
1734 		return err;
1735 
1736 	pebnr = ebh->lmap[lnr];
1737 	/* If PEB is not mapped the buffer is filled with 0xFF */
1738 	if (EBH_LEB_UNMAPPED == pebnr) {
1739 		leb_read_unlock(ebh, lnr);
1740 		memset(buf, 0xFF, len);
1741 		return 0;
1742 	}
1743 
1744 	/* Read data */
1745 	data_offset = ebh->ops->calc_data_offs(ebh, pebnr, offset);
1746 	err = flash_read(ebh->flash_dev, data_offset, len, retlen,
1747 	    (unsigned char *) buf);
1748 	if (err)
1749 		goto out_free;
1750 
1751 	KASSERT(len == *retlen);
1752 
1753 out_free:
1754 	leb_read_unlock(ebh, lnr);
1755 	return err;
1756 }
1757 
1758 /**
1759  * get_peb: get a free physical eraseblock
1760  * @ebh - chfs eraseblock handler
1761  *
1762  * This function gets a free eraseblock from the ebh->free RB-tree.
1763  * The first entry will be returned and deleted from the tree.
1764  * The entries sorted by the erase counters, so the PEB with the smallest
1765  * erase counter will be added back.
1766  * If something goes bad a negative value will be returned.
1767  */
1768 int
1769 get_peb(struct chfs_ebh *ebh)
1770 {
1771 	int err, pebnr;
1772 	struct chfs_peb *peb;
1773 
1774 retry:
1775 	mutex_enter(&ebh->erase_lock);
1776 	//dbg_ebh("LOCK: ebh->erase_lock spin locked in get_peb()\n");
1777 	if (RB_EMPTY(&ebh->free)) {
1778 		/*There is no more free PEBs in the tree*/
1779 		if (TAILQ_EMPTY(&ebh->to_erase) &&
1780 		    TAILQ_EMPTY(&ebh->fully_erased)) {
1781 			mutex_exit(&ebh->erase_lock);
1782 			//dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in get_peb()\n");
1783 			return ENOSPC;
1784 		}
1785 		err = free_peb(ebh);
1786 
1787 		mutex_exit(&ebh->erase_lock);
1788 		//dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in get_peb()\n");
1789 
1790 		if (err)
1791 			return err;
1792 		goto retry;
1793 	}
1794 	peb = RB_MIN(peb_free_rbtree, &ebh->free);
1795 	pebnr = peb->pebnr;
1796 	RB_REMOVE(peb_free_rbtree, &ebh->free, peb);
1797 	err = add_peb_to_in_use(ebh, peb->pebnr, peb->erase_cnt);
1798 	if (err)
1799 		pebnr = err;
1800 
1801 	kmem_free(peb, sizeof(struct chfs_peb));
1802 
1803 	mutex_exit(&ebh->erase_lock);
1804 	//dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in get_peb()\n");
1805 
1806 	return pebnr;
1807 }
1808 
1809 /**
1810  * ebh_write_leb - write data to leb
1811  * @ebh: eraseblock handler
1812  * @lnr: logical eraseblock number
1813  * @buf: data to write
1814  * @offset: offset where to write
1815  * @len: bytes number to write
1816  *
1817  * Returns zero in case of success, error code in case of fail.
1818  */
1819 int
1820 ebh_write_leb(struct chfs_ebh *ebh, int lnr, char *buf, uint32_t offset,
1821     size_t len, size_t *retlen)
1822 {
1823 	int err, pebnr, retries = 0;
1824 	off_t data_offset;
1825 	struct chfs_eb_hdr *ebhdr;
1826 
1827 	dbg("offset: %d | len: %zu | (offset+len): %zu "
1828 	    " | ebsize: %zu\n", offset, len, (offset+len), ebh->eb_size);
1829 
1830 	KASSERT(offset + len <= ebh->eb_size);
1831 
1832 	err = leb_write_lock(ebh, lnr);
1833 	if (err)
1834 		return err;
1835 
1836 	pebnr = ebh->lmap[lnr];
1837 	/* If the LEB is mapped write out data */
1838 	if (pebnr != EBH_LEB_UNMAPPED) {
1839 		data_offset = ebh->ops->calc_data_offs(ebh, pebnr, offset);
1840 		err = flash_write(ebh->flash_dev, data_offset, len, retlen,
1841 		    (unsigned char *) buf);
1842 
1843 		if (err) {
1844 			chfs_err("error %d while writing %zu bytes to PEB "
1845 			    "%d:%ju, written %zu bytes\n",
1846 			    err, len, pebnr, (uintmax_t )offset, *retlen);
1847 		} else {
1848 			KASSERT(len == *retlen);
1849 		}
1850 
1851 		leb_write_unlock(ebh, lnr);
1852 		return err;
1853 	}
1854 
1855 	/*
1856 	 * If the LEB is unmapped, get a free PEB and write the
1857 	 * eraseblock header first
1858 	 */
1859 	ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
1860 
1861 	/* Setting up eraseblock header properties */
1862 	ebh->ops->create_eb_hdr(ebhdr, lnr);
1863 
1864 retry:
1865 	/* Getting a physical eraseblock from the wear leveling system */
1866 	pebnr = get_peb(ebh);
1867 	if (pebnr < 0) {
1868 		leb_write_unlock(ebh, lnr);
1869 		kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1870 		return pebnr;
1871 	}
1872 
1873 	/* Write the eraseblock header to the media */
1874 	err = ebh->ops->write_eb_hdr(ebh, pebnr, ebhdr);
1875 	if (err) {
1876 		chfs_warn(
1877 			"error writing eraseblock header: LEB %d , PEB %d\n",
1878 			lnr, pebnr);
1879 		goto write_error;
1880 	}
1881 
1882 	/* Write out data */
1883 	if (len) {
1884 		data_offset = ebh->ops->calc_data_offs(ebh, pebnr, offset);
1885 		err = flash_write(ebh->flash_dev,
1886 		    data_offset, len, retlen, (unsigned char *) buf);
1887 		if (err) {
1888 			chfs_err("error %d while writing %zu bytes to PEB "
1889 			    " %d:%ju, written %zu bytes\n",
1890 			    err, len, pebnr, (uintmax_t )offset, *retlen);
1891 			goto write_error;
1892 		}
1893 	}
1894 
1895 	ebh->lmap[lnr] = pebnr;
1896 	leb_write_unlock(ebh, lnr);
1897 	kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1898 
1899 	return 0;
1900 
1901 write_error: err = release_peb(ebh, pebnr);
1902 	// max retries (NOW: 2)
1903 	if (err || CHFS_MAX_GET_PEB_RETRIES < ++retries) {
1904 		leb_write_unlock(ebh, lnr);
1905 		kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1906 		return err;
1907 	}
1908 	goto retry;
1909 }
1910 
1911 /**
1912  * ebh_erase_leb - erase a leb
1913  * @ebh: eraseblock handler
1914  * @lnr: leb number
1915  *
1916  * Returns zero in case of success, error code in case of fail.
1917  */
1918 int
1919 ebh_erase_leb(struct chfs_ebh *ebh, int lnr)
1920 {
1921 	int err, pebnr;
1922 
1923 	leb_write_lock(ebh, lnr);
1924 
1925 	pebnr = ebh->lmap[lnr];
1926 	if (pebnr < 0) {
1927 		leb_write_unlock(ebh, lnr);
1928 		return EBH_LEB_UNMAPPED;
1929 	}
1930 	err = release_peb(ebh, pebnr);
1931 	if (err)
1932 		goto out_unlock;
1933 
1934 	ebh->lmap[lnr] = EBH_LEB_UNMAPPED;
1935 	cv_signal(&ebh->bg_erase.eth_wakeup);
1936 out_unlock:
1937 	leb_write_unlock(ebh, lnr);
1938 	return err;
1939 }
1940 
1941 /**
1942  * ebh_map_leb - maps a PEB to LEB
1943  * @ebh: eraseblock handler
1944  * @lnr: leb number
1945  *
1946  * Returns zero on success, error code in case of fail
1947  */
1948 int
1949 ebh_map_leb(struct chfs_ebh *ebh, int lnr)
1950 {
1951 	int err, pebnr, retries = 0;
1952 	struct chfs_eb_hdr *ebhdr;
1953 
1954 	ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
1955 
1956 	err = leb_write_lock(ebh, lnr);
1957 	if (err) {
1958 		kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1959 		return err;
1960 	}
1961 
1962 retry:
1963 	pebnr = get_peb(ebh);
1964 	if (pebnr < 0) {
1965 		err = pebnr;
1966 		goto out_unlock;
1967 	}
1968 
1969 	ebh->ops->create_eb_hdr(ebhdr, lnr);
1970 
1971 	err = ebh->ops->write_eb_hdr(ebh, pebnr, ebhdr);
1972 	if (err) {
1973 		chfs_warn(
1974 			"error writing eraseblock header: LEB %d , PEB %d\n",
1975 			lnr, pebnr);
1976 		goto write_error;
1977 	}
1978 
1979 	ebh->lmap[lnr] = pebnr;
1980 
1981 out_unlock:
1982 	leb_write_unlock(ebh, lnr);
1983 	return err;
1984 
1985 write_error:
1986 	err = release_peb(ebh, pebnr);
1987 	// max retries (NOW: 2)
1988 	if (err || CHFS_MAX_GET_PEB_RETRIES < ++retries) {
1989 		leb_write_unlock(ebh, lnr);
1990 		kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1991 		return err;
1992 	}
1993 	goto retry;
1994 }
1995 
1996 /**
1997  * ebh_unmap_leb -
1998  * @ebh: eraseblock handler
1999  * @lnr: leb number
2000  *
2001  * Returns zero on success, error code in case of fail.
2002  */
2003 int
2004 ebh_unmap_leb(struct chfs_ebh *ebh, int lnr)
2005 {
2006 	int err;
2007 
2008 	if (ebh_is_mapped(ebh, lnr) < 0)
2009 		/* If the eraseblock already unmapped */
2010 		return 0;
2011 
2012 	err = ebh_erase_leb(ebh, lnr);
2013 
2014 	return err;
2015 }
2016 
2017 /**
2018  * ebh_is_mapped - check if a PEB is mapped to @lnr
2019  * @ebh: eraseblock handler
2020  * @lnr: leb number
2021  *
2022  * Returns 0 if the logical eraseblock is mapped, negative error code otherwise.
2023  */
2024 int
2025 ebh_is_mapped(struct chfs_ebh *ebh, int lnr)
2026 {
2027 	int err, result;
2028 	err = leb_read_lock(ebh, lnr);
2029 	if (err)
2030 		return err;
2031 
2032 	result = ebh->lmap[lnr];
2033 	leb_read_unlock(ebh, lnr);
2034 
2035 	return result;
2036 }
2037 
2038 /**
2039  * ebh_change_leb - write the LEB to another PEB
2040  * @ebh: eraseblock handler
2041  * @lnr: leb number
2042  * @buf: data to write
2043  * @len: length of data
2044  * Returns zero in case of success, error code in case of fail.
2045  */
2046 int
2047 ebh_change_leb(struct chfs_ebh *ebh, int lnr, char *buf, size_t len,
2048     size_t *retlen)
2049 {
2050 	int err, pebnr, pebnr_old, retries = 0;
2051 	off_t data_offset;
2052 
2053 	struct chfs_peb *peb = NULL;
2054 	struct chfs_eb_hdr *ebhdr;
2055 
2056 	if (ebh_is_mapped(ebh, lnr) < 0)
2057 		return EBH_LEB_UNMAPPED;
2058 
2059 	if (len == 0) {
2060 		err = ebh_unmap_leb(ebh, lnr);
2061 		if (err)
2062 			return err;
2063 		return ebh_map_leb(ebh, lnr);
2064 	}
2065 
2066 	ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
2067 
2068 	pebnr_old = ebh->lmap[lnr];
2069 
2070 	mutex_enter(&ebh->alc_mutex);
2071 	err = leb_write_lock(ebh, lnr);
2072 	if (err)
2073 		goto out_mutex;
2074 
2075 	if (ebh->ops->mark_eb_hdr_dirty_flash) {
2076 		err = ebh->ops->mark_eb_hdr_dirty_flash(ebh, pebnr_old, lnr);
2077 		if (err)
2078 			goto out_unlock;
2079 	}
2080 
2081 	/* Setting up eraseblock header properties */
2082 	ebh->ops->create_eb_hdr(ebhdr, lnr);
2083 
2084 retry:
2085 	/* Getting a physical eraseblock from the wear leveling system */
2086 	pebnr = get_peb(ebh);
2087 	if (pebnr < 0) {
2088 		leb_write_unlock(ebh, lnr);
2089 		mutex_exit(&ebh->alc_mutex);
2090 		kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
2091 		return pebnr;
2092 	}
2093 
2094 	err = ebh->ops->write_eb_hdr(ebh, pebnr, ebhdr);
2095 	if (err) {
2096 		chfs_warn(
2097 			"error writing eraseblock header: LEB %d , PEB %d",
2098 			lnr, pebnr);
2099 		goto write_error;
2100 	}
2101 
2102 	/* Write out data */
2103 	data_offset = ebh->ops->calc_data_offs(ebh, pebnr, 0);
2104 	err = flash_write(ebh->flash_dev, data_offset, len, retlen,
2105 	    (unsigned char *) buf);
2106 	if (err) {
2107 		chfs_err("error %d while writing %zu bytes to PEB %d:%ju,"
2108 		    " written %zu bytes",
2109 		    err, len, pebnr, (uintmax_t)data_offset, *retlen);
2110 		goto write_error;
2111 	}
2112 
2113 	ebh->lmap[lnr] = pebnr;
2114 
2115 	if (ebh->ops->invalidate_eb_hdr) {
2116 		err = ebh->ops->invalidate_eb_hdr(ebh, pebnr_old);
2117 		if (err)
2118 			goto out_unlock;
2119 	}
2120 	peb = find_peb_in_use(ebh, pebnr_old);
2121 	err = release_peb(ebh, peb->pebnr);
2122 
2123 out_unlock:
2124 	leb_write_unlock(ebh, lnr);
2125 
2126 out_mutex:
2127 	mutex_exit(&ebh->alc_mutex);
2128 	kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
2129 	kmem_free(peb, sizeof(struct chfs_peb));
2130 	return err;
2131 
2132 write_error:
2133 	err = release_peb(ebh, pebnr);
2134 	//max retries (NOW: 2)
2135 	if (err || CHFS_MAX_GET_PEB_RETRIES < ++retries) {
2136 		leb_write_unlock(ebh, lnr);
2137 		mutex_exit(&ebh->alc_mutex);
2138 		kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
2139 		return err;
2140 	}
2141 	goto retry;
2142 }
2143 
2144