xref: /netbsd-src/sys/ufs/chfs/chfs_nodeops.c (revision e61202360d5611414dd6f6115934a96aa1f50b1a)
1 /*	$NetBSD: chfs_nodeops.c,v 1.2 2012/08/10 09:26:58 ttoth Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 Department of Software Engineering,
5  *		      University of Szeged, Hungary
6  * Copyright (C) 2010 David Tengeri <dtengeri@inf.u-szeged.hu>
7  * Copyright (C) 2010 Tamas Toth <ttoth@inf.u-szeged.hu>
8  * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
9  * All rights reserved.
10  *
11  * This code is derived from software contributed to The NetBSD Foundation
12  * by the Department of Software Engineering, University of Szeged, Hungary
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include "chfs.h"
37 
38 /**
39  * chfs_update_eb_dirty - updates dirty and free space, first and
40  *			      last node references
41  * @sbi: CHFS main descriptor structure
42  * @cheb: eraseblock to update
43  * @size: increase dirty space size with this
44  * Returns zero in case of success, %1 in case of fail.
45  */
46 int
47 chfs_update_eb_dirty(struct chfs_mount *chmp,
48     struct chfs_eraseblock *cheb, uint32_t size)
49 {
50 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
51 	KASSERT(!mutex_owned(&chmp->chm_lock_sizes));
52 
53 	if (!size)
54 		return 0;
55 
56 	if (size > cheb->free_size) {
57 		chfs_err("free_size (%d) is less then dirty space (%d) "
58 		    "on block (%d)\n", cheb->free_size, size, cheb->lnr);
59 		return 1;
60 	}
61 	mutex_enter(&chmp->chm_lock_sizes);
62 	//dbg("BEFORE: free_size: %d\n", cheb->free_size);
63 	chfs_change_size_free(chmp, cheb, -size);
64 	chfs_change_size_dirty(chmp, cheb, size);
65 	//dbg(" AFTER: free_size: %d\n", cheb->free_size);
66 	mutex_exit(&chmp->chm_lock_sizes);
67 	return 0;
68 }
69 
70 /**
71  * chfs_add_node_to_list - adds a data node ref to vnode cache's dnode list
72  * @sbi: super block informations
73  * @new: node ref to insert
74  * @list: head of the list
75  * This function inserts a data node ref to the list of vnode cache.
76  * The list is sorted by data node's lnr and offset.
77  */
78 void
79 chfs_add_node_to_list(struct chfs_mount *chmp,
80     struct chfs_vnode_cache *vc,
81     struct chfs_node_ref *new, struct chfs_node_ref **list)
82 {
83 	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
84 
85 	struct chfs_node_ref *nextref = *list;
86 	struct chfs_node_ref *prevref = NULL;
87 
88 	while (nextref && nextref != (struct chfs_node_ref *)vc &&
89 	    (nextref->nref_lnr <= new->nref_lnr)) {
90 		if (nextref->nref_lnr == new->nref_lnr) {
91 			while (nextref && nextref !=
92 			    (struct chfs_node_ref *)vc &&
93 			    (CHFS_GET_OFS(nextref->nref_offset) <
94 				CHFS_GET_OFS(new->nref_offset))) {
95 				prevref = nextref;
96 				nextref = nextref->nref_next;
97 			}
98 			break;
99 		}
100 		prevref = nextref;
101 		nextref = nextref->nref_next;
102 	}
103 
104 	if (nextref && nextref != (struct chfs_node_ref *)vc &&
105 	    nextref->nref_lnr == new->nref_lnr &&
106 	    CHFS_GET_OFS(nextref->nref_offset) ==
107 	    CHFS_GET_OFS(new->nref_offset)) {
108 		new->nref_next = nextref->nref_next;
109 		chfs_mark_node_obsolete(chmp, nextref);
110 	} else {
111 		new->nref_next = nextref;
112 	}
113 
114 	KASSERT(new->nref_next != NULL);
115 
116 	if (prevref) {
117 		prevref->nref_next = new;
118 	} else {
119 		*list = new;
120 	}
121 }
122 
123 /*
124  * Removes a node from a list. Usually used for removing data nodes.
125  */
126 void
127 chfs_remove_node_from_list(struct chfs_mount *chmp,
128 	struct chfs_vnode_cache *vc,
129 	struct chfs_node_ref *old_nref, struct chfs_node_ref **list)
130 {
131 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
132 	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
133 
134 	struct chfs_node_ref *tmpnref;
135 
136 	if (*list == (struct chfs_node_ref *)vc) {
137 		return;
138 	}
139 
140 	KASSERT(old_nref->nref_next != NULL);
141 
142 	if (*list == old_nref) {
143 		*list = old_nref->nref_next;
144 	} else {
145 		tmpnref = *list;
146 		while (tmpnref->nref_next &&
147 			tmpnref->nref_next != (struct chfs_node_ref *)vc) {
148 			if (tmpnref->nref_next == old_nref) {
149 				tmpnref->nref_next = old_nref->nref_next;
150 				break;
151 			}
152 			tmpnref = tmpnref->nref_next;
153 		}
154 	}
155 }
156 
157 /*
158  * Removes a node from a list and obsoletes the nref.
159  * We should use this function carefully on data nodes,
160  * because removing a frag will obsolete the node ref.
161  */
162 void
163 chfs_remove_and_obsolete(struct chfs_mount *chmp,
164 	struct chfs_vnode_cache *vc,
165 	struct chfs_node_ref *old_nref, struct chfs_node_ref **list)
166 {
167 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
168 	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
169 
170 	chfs_remove_node_from_list(chmp, vc, old_nref, list);
171 
172 	dbg("[MARK] vno: %llu lnr: %u ofs: %u\n", vc->vno, old_nref->nref_lnr,
173 		old_nref->nref_offset);
174 	chfs_mark_node_obsolete(chmp, old_nref);
175 }
176 
177 void
178 chfs_add_fd_to_inode(struct chfs_mount *chmp,
179     struct chfs_inode *parent, struct chfs_dirent *new)
180 {
181 //	struct chfs_dirent **prev = &parent->dents;
182 	struct chfs_dirent *fd, *tmpfd;
183 
184 	if (new->version > parent->chvc->highest_version) {
185 		parent->chvc->highest_version = new->version;
186 	}
187 
188 	//mutex_enter(&parent->inode_lock);
189 	TAILQ_FOREACH_SAFE(fd, &parent->dents, fds, tmpfd) {
190 		if (fd->nhash > new->nhash) {
191 			/* insert new before fd */
192 			TAILQ_INSERT_BEFORE(fd, new, fds);
193 			return;
194 		} else if (fd->nhash == new->nhash &&
195 		    !strcmp(fd->name, new->name)) {
196 			if (new->version > fd->version) {
197 //				new->next = fd->next;
198 				/* replace fd with new */
199 				TAILQ_INSERT_BEFORE(fd, new, fds);
200 				TAILQ_REMOVE(&parent->dents, fd, fds);
201 				if (fd->nref) {
202 					mutex_enter(&chmp->chm_lock_vnocache);
203 					chfs_remove_and_obsolete(chmp, parent->chvc, fd->nref,
204 						&parent->chvc->dirents);
205 					mutex_exit(&chmp->chm_lock_vnocache);
206 				}
207 				chfs_free_dirent(fd);
208 //				*prev = new;//XXX
209 			} else {
210 				chfs_mark_node_obsolete(chmp, new->nref);
211 				chfs_free_dirent(new);
212 			}
213 			return;
214 		}
215 	}
216 	/* if we couldnt fit it elsewhere, lets add to the end */
217 	/* FIXME insert tail or insert head? */
218 	TAILQ_INSERT_HEAD(&parent->dents, new, fds);
219 	//mutex_exit(&parent->inode_lock);
220 #if 0
221    	while ((*prev) && (*prev)->nhash <= new->nhash) {
222 		if ((*prev)->nhash == new->nhash &&
223 		    !strcmp((*prev)->name, new->name)) {
224 			if (new->version > (*prev)->version) {
225 				new->next = (*prev)->next;
226 				if ((*prev)->nref) {
227 					chfs_mark_node_obsolete(chmp,
228 					    (*prev)->nref);
229 				}
230 				chfs_free_dirent(*prev);
231 				*prev = new;
232 			} else {
233 				chfs_mark_node_obsolete(chmp, new->nref);
234 				chfs_free_dirent(new);
235 			}
236 			return;
237 		}
238 		prev = &((*prev)->next);
239 	}
240 
241 	new->next = *prev;
242 	*prev = new;
243 #endif
244 }
245 
246 void
247 chfs_add_vnode_ref_to_vc(struct chfs_mount *chmp,
248     struct chfs_vnode_cache *vc, struct chfs_node_ref *new)
249 {
250 	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));
251 	struct chfs_node_ref *nref;
252 
253 	while (vc->v != (struct chfs_node_ref *)vc) {
254 		nref = vc->v;
255 		chfs_remove_and_obsolete(chmp, vc, nref, &vc->v);
256 	}
257 
258 	new->nref_next = (struct chfs_node_ref *)vc;
259 	vc->v = new;
260 }
261 
262 struct chfs_node_ref *
263 chfs_nref_next(struct chfs_node_ref *nref)
264 {
265 //	dbg("check nref: %u - %u\n", nref->nref_lnr, nref->nref_offset);
266 	nref++;
267 //	dbg("next nref: %u - %u\n", nref->nref_lnr, nref->nref_offset);
268 	if (nref->nref_lnr == REF_LINK_TO_NEXT) {
269 		//End of chain
270 		if (!nref->nref_next)
271 			return NULL;
272 
273 		nref = nref->nref_next;
274 	}
275 	//end of chain
276 	if (nref->nref_lnr == REF_EMPTY_NODE)
277 		return NULL;
278 
279 	return nref;
280 }
281 
282 int
283 chfs_nref_len(struct chfs_mount *chmp,
284     struct chfs_eraseblock *cheb, struct chfs_node_ref *nref)
285 {
286 	struct chfs_node_ref *next;
287 
288 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
289 
290 	if (!cheb)
291 		cheb = &chmp->chm_blocks[nref->nref_lnr];
292 
293 	next = chfs_nref_next(nref);
294 
295 	if (!next) {
296 		//dbg("next null\n");
297 		return chmp->chm_ebh->eb_size - cheb->free_size -
298 		    CHFS_GET_OFS(nref->nref_offset);
299 	}
300 	//dbg("size: %d\n", CHFS_GET_OFS(next->nref_offset) - CHFS_GET_OFS(nref->nref_offset));
301 	return CHFS_GET_OFS(next->nref_offset) -
302 	    CHFS_GET_OFS(nref->nref_offset);
303 }
304 
305 /**
306  * chfs_mark_node_obsolete - marks a node obsolete
307  */
308 void
309 chfs_mark_node_obsolete(struct chfs_mount *chmp,
310     struct chfs_node_ref *nref)
311 {
312 	int len;
313 	struct chfs_eraseblock *cheb;
314 
315 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
316 
317 	KASSERT(!CHFS_REF_OBSOLETE(nref));
318 
319 	KASSERT(nref->nref_lnr <= chmp->chm_ebh->peb_nr);
320 	cheb = &chmp->chm_blocks[nref->nref_lnr];
321 
322 #ifdef DIAGNOSTIC
323 	if (cheb->used_size + cheb->free_size + cheb->dirty_size +
324 	    cheb->unchecked_size + cheb->wasted_size != chmp->chm_ebh->eb_size) {
325 		dbg("eraseblock leak detected!\nused: %u\nfree: %u\n"
326 		    "dirty: %u\nunchecked: %u\nwasted: %u\ntotal: %u\nshould be: %zu\n",
327 		    cheb->used_size, cheb->free_size, cheb->dirty_size,
328 		    cheb->unchecked_size, cheb->wasted_size, cheb->used_size + cheb->free_size +
329 		    cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size,
330 		    chmp->chm_ebh->eb_size);
331 	}
332 #endif
333 
334 	len = chfs_nref_len(chmp, cheb, nref);
335 	//dbg("len: %u\n", len);
336 	//dbg("1. used: %u\n", cheb->used_size);
337 
338 	mutex_enter(&chmp->chm_lock_sizes);
339 
340 	if (CHFS_REF_FLAGS(nref) == CHFS_UNCHECKED_NODE_MASK) {
341 		//dbg("UNCHECKED mark an unchecked node\n");
342 		chfs_change_size_unchecked(chmp, cheb, -len);
343 		//dbg("unchecked: %u\n", chmp->chm_unchecked_size);
344 	} else {
345 		chfs_change_size_used(chmp, cheb, -len);
346 
347 		//dbg("2. used: %u\n", cheb->used_size);
348 		KASSERT(cheb->used_size <= chmp->chm_ebh->eb_size);
349 	}
350 	chfs_change_size_dirty(chmp, cheb, len);
351 
352 #ifdef DIAGNOSTIC
353 	if (cheb->used_size + cheb->free_size + cheb->dirty_size +
354 	    cheb->unchecked_size + cheb->wasted_size != chmp->chm_ebh->eb_size) {
355 		panic("eraseblock leak detected!\nused: %u\nfree: %u\n"
356 		    "dirty: %u\nunchecked: %u\nwasted: %u\ntotal: %u\nshould be: %zu\n",
357 		    cheb->used_size, cheb->free_size, cheb->dirty_size,
358 		    cheb->unchecked_size, cheb->wasted_size, cheb->used_size + cheb->free_size +
359 		    cheb->dirty_size + cheb->unchecked_size + cheb->wasted_size,
360 		    chmp->chm_ebh->eb_size);
361 	}
362 #endif
363 	nref->nref_offset = CHFS_GET_OFS(nref->nref_offset) |
364 	    CHFS_OBSOLETE_NODE_MASK;
365 
366 	if (chmp->chm_flags & CHFS_MP_FLAG_SCANNING) {
367 		/*Scan is in progress, do nothing now*/
368 		mutex_exit(&chmp->chm_lock_sizes);
369 		return;
370 	}
371 
372 	if (cheb == chmp->chm_nextblock) {
373 		dbg("Not moving nextblock to dirty/erase_pending list\n");
374 	} else if (!cheb->used_size && !cheb->unchecked_size) {
375 		if (cheb == chmp->chm_gcblock) {
376 			dbg("gcblock is completely dirtied\n");
377 			chmp->chm_gcblock = NULL;
378 		} else {
379 			//remove from a tailq, but we don't know which tailq contains this cheb
380 			//so we remove it from the dirty list now
381 			//TAILQ_REMOVE(&chmp->chm_dirty_queue, cheb, queue);
382 			int removed = 0;
383 			struct chfs_eraseblock *eb, *tmpeb;
384 			//XXX ugly code
385 			TAILQ_FOREACH_SAFE(eb, &chmp->chm_free_queue, queue, tmpeb) {
386 				if (eb == cheb) {
387 					TAILQ_REMOVE(&chmp->chm_free_queue, cheb, queue);
388 					removed = 1;
389 					break;
390 				}
391 			}
392 			if (removed == 0) {
393 				TAILQ_FOREACH_SAFE(eb, &chmp->chm_dirty_queue, queue, tmpeb) {
394 					if (eb == cheb) {
395 						TAILQ_REMOVE(&chmp->chm_dirty_queue, cheb, queue);
396 						removed = 1;
397 						break;
398 					}
399 				}
400 			}
401 			if (removed == 0) {
402 				TAILQ_FOREACH_SAFE(eb, &chmp->chm_very_dirty_queue, queue, tmpeb) {
403 					if (eb == cheb) {
404 						TAILQ_REMOVE(&chmp->chm_very_dirty_queue, cheb, queue);
405 						removed = 1;
406 						break;
407 					}
408 				}
409 			}
410 			if (removed == 0) {
411 				TAILQ_FOREACH_SAFE(eb, &chmp->chm_clean_queue, queue, tmpeb) {
412 					if (eb == cheb) {
413 						TAILQ_REMOVE(&chmp->chm_clean_queue, cheb, queue);
414 						removed = 1;
415 						break;
416 					}
417 				}
418 			}
419 		}
420 		if (chmp->chm_wbuf_len) {
421 			dbg("Adding block to erasable pending wbuf queue\n");
422 			TAILQ_INSERT_TAIL(&chmp->chm_erasable_pending_wbuf_queue,
423 			    cheb, queue);
424 		} else {
425 			TAILQ_INSERT_TAIL(&chmp->chm_erase_pending_queue,
426 			    cheb, queue);
427 			chmp->chm_nr_erasable_blocks++;
428 		}
429 		chfs_remap_leb(chmp);
430 	} else if (cheb == chmp->chm_gcblock) {
431 		dbg("Not moving gcblock to dirty list\n");
432 	} else if (cheb->dirty_size > MAX_DIRTY_TO_CLEAN &&
433 	    cheb->dirty_size - len <= MAX_DIRTY_TO_CLEAN) {
434 		dbg("Freshly dirtied, remove it from clean queue and "
435 		    "add it to dirty\n");
436 		TAILQ_REMOVE(&chmp->chm_clean_queue, cheb, queue);
437 		TAILQ_INSERT_TAIL(&chmp->chm_dirty_queue, cheb, queue);
438 	} else if (VERY_DIRTY(chmp, cheb->dirty_size) &&
439 	    !VERY_DIRTY(chmp, cheb->dirty_size - len)) {
440 		dbg("Becomes now very dirty, remove it from dirty "
441 		    "queue and add it to very dirty\n");
442 		TAILQ_REMOVE(&chmp->chm_dirty_queue, cheb, queue);
443 		TAILQ_INSERT_TAIL(&chmp->chm_very_dirty_queue, cheb, queue);
444 	} else {
445 		dbg("Leave cheb where it is\n");
446 	}
447 	mutex_exit(&chmp->chm_lock_sizes);
448 	return;
449 }
450 
451 /**
452  * chfs_close_eraseblock - close an eraseblock
453  * @chmp: chfs mount structure
454  * @cheb: eraseblock informations
455  *
456  * This function close the physical chain of the nodes on the eraseblock,
457  * convert its free size to dirty and add it to clean, dirty or very dirty list.
458  */
459 int
460 chfs_close_eraseblock(struct chfs_mount *chmp,
461     struct chfs_eraseblock *cheb)
462 {
463 	uint32_t offset;
464 	struct chfs_node_ref *nref;
465 
466 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
467 
468 	offset = chmp->chm_ebh->eb_size - cheb->free_size;
469 
470 	// Close the chain
471 	nref = chfs_alloc_node_ref(cheb);
472 	if (!nref)
473 		return ENOMEM;
474 
475 	nref->nref_next = NULL;
476 	nref->nref_offset = offset;
477 
478 	// Mark space as dirty
479 	chfs_update_eb_dirty(chmp, cheb, cheb->free_size);
480 
481 	if (cheb->dirty_size < MAX_DIRTY_TO_CLEAN) {
482 		TAILQ_INSERT_TAIL(&chmp->chm_clean_queue, cheb, queue);
483 	} else if (VERY_DIRTY(chmp, cheb->dirty_size)) {
484 		TAILQ_INSERT_TAIL(&chmp->chm_very_dirty_queue, cheb, queue);
485 	} else {
486 		TAILQ_INSERT_TAIL(&chmp->chm_dirty_queue, cheb, queue);
487 	}
488 	return 0;
489 }
490 
491 int
492 chfs_reserve_space_normal(struct chfs_mount *chmp, uint32_t size, int prio)
493 {
494 	int ret;
495 
496 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
497 
498 	mutex_enter(&chmp->chm_lock_sizes);
499 	while (chmp->chm_nr_free_blocks + chmp->chm_nr_erasable_blocks < chmp->chm_resv_blocks_write) {
500 		dbg("free: %d, erasable: %d, resv: %d\n", chmp->chm_nr_free_blocks, chmp->chm_nr_erasable_blocks, chmp->chm_resv_blocks_write);
501 		uint32_t avail, dirty;
502 		if (prio == ALLOC_DELETION && chmp->chm_nr_free_blocks + chmp->chm_nr_erasable_blocks >= chmp->chm_resv_blocks_deletion)
503 			break;
504 
505 		dirty = chmp->chm_dirty_size - chmp->chm_nr_erasable_blocks * chmp->chm_ebh->eb_size + chmp->chm_unchecked_size;
506 		if (dirty < chmp->chm_nospc_dirty) {
507 			dbg("dirty: %u < nospc_dirty: %u\n", dirty, chmp->chm_nospc_dirty);
508 			ret = ENOSPC;
509 			mutex_exit(&chmp->chm_lock_sizes);
510 			goto out;
511 		}
512 
513 		avail = chmp->chm_free_size - (chmp->chm_resv_blocks_write * chmp->chm_ebh->eb_size);
514 		if (size > avail) {
515 			dbg("size: %u > avail: %u\n", size, avail);
516 			ret = ENOSPC;
517 			mutex_exit(&chmp->chm_lock_sizes);
518 			goto out;
519 		}
520 
521 		mutex_exit(&chmp->chm_lock_sizes);
522 		ret = chfs_gcollect_pass(chmp);
523 		mutex_enter(&chmp->chm_lock_sizes);
524 
525 		if (chmp->chm_nr_erasable_blocks ||
526 		    !TAILQ_EMPTY(&chmp->chm_erasable_pending_wbuf_queue) ||
527 		    ret == EAGAIN) {
528 			ret = chfs_remap_leb(chmp);
529 		}
530 
531 		if (ret) {
532 			mutex_exit(&chmp->chm_lock_sizes);
533 			goto out;
534 		}
535 	}
536 
537 	mutex_exit(&chmp->chm_lock_sizes);
538 	ret = chfs_reserve_space(chmp, size);
539 out:
540 	return ret;
541 }
542 
543 
544 int
545 chfs_reserve_space_gc(struct chfs_mount *chmp, uint32_t size)
546 {
547 	int ret;
548 
549 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
550 
551 	mutex_enter(&chmp->chm_lock_sizes);
552 	chfs_remap_leb(chmp);
553 
554 	if (size > chmp->chm_free_size) {
555 		dbg("size: %u\n", size);
556 		mutex_exit(&chmp->chm_lock_sizes);
557 		return ENOSPC;
558 	}
559 
560 	mutex_exit(&chmp->chm_lock_sizes);
561 	ret = chfs_reserve_space(chmp, size);
562 	return ret;
563 }
564 
565 /**
566  * chfs_reserve_space - finds a block which free size is >= requested size
567  * @chmp: chfs mount point
568  * @size: requested size
569  * @len: reserved spaced will be returned in this variable;
570  * Returns zero in case of success, error code in case of fail.
571  */
572 int
573 chfs_reserve_space(struct chfs_mount *chmp, uint32_t size)
574 {
575 	//TODO define minimum reserved blocks, which is needed for writing
576 	//TODO check we have enough free blocks to write
577 	//TODO if no: need erase and GC
578 
579 	int err;
580 	struct chfs_eraseblock *cheb;
581 
582 	KASSERT(mutex_owned(&chmp->chm_lock_mountfields));
583 	KASSERT(!mutex_owned(&chmp->chm_lock_sizes));
584 
585 	cheb = chmp->chm_nextblock;
586 	//if (cheb)
587 	    //dbg("cheb->free_size %u\n", cheb->free_size);
588 	if (cheb && size > cheb->free_size) {
589 		dbg("size: %u > free_size: %u\n", size, cheb->free_size);
590 		/*
591 		 * There isn't enough space on this eraseblock, we mark this as
592 		 * dirty and close the physical chain of the node refs.
593 		 */
594 		//Write out pending data if any
595 		if (chmp->chm_wbuf_len) {
596 			chfs_flush_pending_wbuf(chmp);
597 			//FIXME need goto restart here?
598 		}
599 
600 		while (chmp->chm_wbuf_ofs < chmp->chm_ebh->eb_size) {
601 			dbg("wbuf ofs: %zu - eb_size: %zu\n",
602 			    chmp->chm_wbuf_ofs, chmp->chm_ebh->eb_size);
603 			chfs_flush_pending_wbuf(chmp);
604 		}
605 
606 		if (!(chmp->chm_wbuf_ofs % chmp->chm_ebh->eb_size) && !chmp->chm_wbuf_len)
607 			chmp->chm_wbuf_ofs = 0xffffffff;
608 
609 		err = chfs_close_eraseblock(chmp, cheb);
610 		if (err)
611 			return err;
612 
613 		cheb = NULL;
614 	}
615 	if (!cheb) {
616 		//get a block for nextblock
617 		if (TAILQ_EMPTY(&chmp->chm_free_queue)) {
618 			// If this succeeds there will be a block on free_queue
619 			dbg("cheb remap (free: %d)\n", chmp->chm_nr_free_blocks);
620 			err = chfs_remap_leb(chmp);
621 			if (err)
622 				return err;
623 		}
624 		cheb = TAILQ_FIRST(&chmp->chm_free_queue);
625 		TAILQ_REMOVE(&chmp->chm_free_queue, cheb, queue);
626 		chmp->chm_nextblock = cheb;
627 		chmp->chm_nr_free_blocks--;
628 	}
629 
630 	return 0;
631 }
632 
633