xref: /minix3/sys/ufs/chfs/chfs_malloc.c (revision f14fb602092e015ff630df58e17c2a9cd57d29b3)
1 /*	$NetBSD: chfs_malloc.c,v 1.1 2011/11/24 15:51:31 ahoka Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 Department of Software Engineering,
5  *		      University of Szeged, Hungary
6  * Copyright (C) 2010 Tamas Toth <ttoth@inf.u-szeged.hu>
7  * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
8  * All rights reserved.
9  *
10  * This code is derived from software contributed to The NetBSD Foundation
11  * by the Department of Software Engineering, University of Szeged, Hungary
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include "chfs.h"
36 #include <sys/pool.h>
37 
38 pool_cache_t chfs_vnode_cache;
39 pool_cache_t chfs_nrefs_cache;
40 pool_cache_t chfs_flash_vnode_cache;
41 pool_cache_t chfs_flash_dirent_cache;
42 pool_cache_t chfs_flash_dnode_cache;
43 pool_cache_t chfs_node_frag_cache;
44 pool_cache_t chfs_tmp_dnode_cache;
45 pool_cache_t chfs_tmp_dnode_info_cache;
46 
47 int
48 chfs_alloc_pool_caches()
49 {
50 	chfs_vnode_cache = pool_cache_init(
51 		sizeof(struct chfs_vnode_cache),
52 		0, 0, 0, "chfs_vnode_cache", NULL, IPL_NONE, NULL, NULL,
53 		NULL);
54 	if (!chfs_vnode_cache)
55 		goto err_vnode;
56 
57 	chfs_nrefs_cache = pool_cache_init(
58 		(REFS_BLOCK_LEN + 1) * sizeof(struct chfs_node_ref), 0, 0,
59 		0, "chfs_nrefs_pool", NULL, IPL_NONE, NULL, NULL, NULL);
60 	if (!chfs_nrefs_cache)
61 		goto err_nrefs;
62 
63 	chfs_flash_vnode_cache = pool_cache_init(
64 		sizeof(struct chfs_flash_vnode), 0, 0, 0,
65 		"chfs_flash_vnode_pool", NULL, IPL_NONE, NULL, NULL, NULL);
66 	if (!chfs_flash_vnode_cache)
67 		goto err_flash_vnode;
68 
69 	chfs_flash_dirent_cache = pool_cache_init(
70 		sizeof(struct chfs_flash_dirent_node), 0, 0, 0,
71 		"chfs_flash_dirent_pool", NULL, IPL_NONE, NULL, NULL, NULL);
72 	if (!chfs_flash_dirent_cache)
73 		goto err_flash_dirent;
74 
75 	chfs_flash_dnode_cache = pool_cache_init(
76 		sizeof(struct chfs_flash_data_node), 0, 0, 0,
77 		"chfs_flash_dnode_pool", NULL, IPL_NONE, NULL, NULL, NULL);
78 	if (!chfs_flash_dnode_cache)
79 		goto err_flash_dnode;
80 
81 	chfs_node_frag_cache = pool_cache_init(
82 		sizeof(struct chfs_node_frag), 0, 0, 0,
83 		"chfs_node_frag_pool", NULL, IPL_NONE, NULL, NULL, NULL);
84 	if (!chfs_node_frag_cache)
85 		goto err_node_frag;
86 
87 	chfs_tmp_dnode_cache = pool_cache_init(
88 		sizeof(struct chfs_tmp_dnode), 0, 0, 0,
89 		"chfs_tmp_dnode_pool", NULL, IPL_NONE, NULL, NULL, NULL);
90 	if (!chfs_tmp_dnode_cache)
91 		goto err_tmp_dnode;
92 
93 	chfs_tmp_dnode_info_cache = pool_cache_init(
94 		sizeof(struct chfs_tmp_dnode_info), 0, 0, 0,
95 		"chfs_tmp_dnode_info_pool", NULL, IPL_NONE, NULL, NULL, NULL);
96 	if (!chfs_tmp_dnode_info_cache)
97 		goto err_tmp_dnode_info;
98 
99 	return 0;
100 
101 err_tmp_dnode_info:
102 	pool_cache_destroy(chfs_tmp_dnode_cache);
103 err_tmp_dnode:
104 	pool_cache_destroy(chfs_node_frag_cache);
105 err_node_frag:
106 	pool_cache_destroy(chfs_flash_dnode_cache);
107 err_flash_dnode:
108 	pool_cache_destroy(chfs_flash_dirent_cache);
109 err_flash_dirent:
110 	pool_cache_destroy(chfs_flash_vnode_cache);
111 err_flash_vnode:
112 	pool_cache_destroy(chfs_nrefs_cache);
113 err_nrefs:
114 	pool_cache_destroy(chfs_vnode_cache);
115 err_vnode:
116 
117 	return ENOMEM;
118 }
119 
120 void
121 chfs_destroy_pool_caches()
122 {
123 	if (chfs_vnode_cache)
124 		pool_cache_destroy(chfs_vnode_cache);
125 
126 	if (chfs_nrefs_cache)
127 		pool_cache_destroy(chfs_nrefs_cache);
128 
129 	if (chfs_flash_vnode_cache)
130 		pool_cache_destroy(chfs_flash_vnode_cache);
131 
132 	if (chfs_flash_dirent_cache)
133 		pool_cache_destroy(chfs_flash_dirent_cache);
134 
135 	if (chfs_flash_dnode_cache)
136 		pool_cache_destroy(chfs_flash_dnode_cache);
137 
138 	if (chfs_node_frag_cache)
139 		pool_cache_destroy(chfs_node_frag_cache);
140 
141 	if (chfs_tmp_dnode_cache)
142 		pool_cache_destroy(chfs_tmp_dnode_cache);
143 
144 	if (chfs_tmp_dnode_info_cache)
145 		pool_cache_destroy(chfs_tmp_dnode_info_cache);
146 }
147 
148 struct chfs_vnode_cache *
149 chfs_vnode_cache_alloc(ino_t vno)
150 {
151 	struct chfs_vnode_cache* vc;
152 	vc = pool_cache_get(chfs_vnode_cache, PR_WAITOK);
153 
154 	memset(vc, 0, sizeof(*vc));
155 	vc->vno = vno;
156 	vc->v = (void *)vc;
157 	vc->dirents = (void *)vc;
158 	vc->dnode = (void *)vc;
159 	TAILQ_INIT(&vc->scan_dirents);
160 	vc->highest_version = 0;
161 
162 	return vc;
163 }
164 
165 void
166 chfs_vnode_cache_free(struct chfs_vnode_cache *vc)
167 {
168 	//kmem_free(vc->vno_version, sizeof(uint64_t));
169 	pool_cache_put(chfs_vnode_cache, vc);
170 }
171 
172 /**
173  * chfs_alloc_refblock - allocating a refblock
174  *
175  * Returns a pointer of the first element in the block.
176  *
177  * We are not allocating just one node ref, instead we allocating REFS_BLOCK_LEN
178  * number of node refs, the last element will be a pointer to the next block.
179  * We do this, because we need a chain of nodes which have been ordered by the
180  * physical address of them.
181  *
182  */
183 struct chfs_node_ref*
184 chfs_alloc_refblock(void)
185 {
186 	int i;
187 	struct chfs_node_ref *nref;
188 	nref = pool_cache_get(chfs_nrefs_cache, PR_WAITOK);
189 
190 	for (i = 0; i < REFS_BLOCK_LEN; i++) {
191 		nref[i].nref_lnr = REF_EMPTY_NODE;
192 		nref[i].nref_next = NULL;
193 	}
194 	i = REFS_BLOCK_LEN;
195 	nref[i].nref_lnr = REF_LINK_TO_NEXT;
196 	nref[i].nref_next = NULL;
197 
198 	return nref;
199 }
200 
201 /**
202  * chfs_free_refblock - freeing a refblock
203  */
204 void
205 chfs_free_refblock(struct chfs_node_ref *nref)
206 {
207 	pool_cache_put(chfs_nrefs_cache, nref);
208 }
209 
210 /**
211  * chfs_alloc_node_ref - allocating a node ref from a refblock
212  * @cheb: eraseblock information structure
213  *
214  * Allocating a node ref from a refblock, it there isn't any free element in the
215  * block, a new block will be allocated and be linked to the current block.
216  */
217 struct chfs_node_ref*
218 chfs_alloc_node_ref(struct chfs_eraseblock *cheb)
219 {
220 	struct chfs_node_ref *nref, *new, *old;
221 	old = cheb->last_node;
222 	nref = cheb->last_node;
223 
224 	if (!nref) {
225 		//There haven't been any nref allocated for this block yet
226 		nref = chfs_alloc_refblock();
227 
228 		cheb->first_node = nref;
229 		cheb->last_node = nref;
230 		nref->nref_lnr = cheb->lnr;
231 		KASSERT(cheb->lnr == nref->nref_lnr);
232 
233 		return nref;
234 	}
235 
236 	nref++;
237 	if (nref->nref_lnr == REF_LINK_TO_NEXT) {
238 		new = chfs_alloc_refblock();
239 		nref->nref_next = new;
240 		nref = new;
241 	}
242 
243 	cheb->last_node = nref;
244 	nref->nref_lnr = cheb->lnr;
245 
246 	KASSERT(old->nref_lnr == nref->nref_lnr &&
247 	    nref->nref_lnr == cheb->lnr);
248 
249 	return nref;
250 }
251 
252 /**
253  * chfs_free_node_refs - freeing an eraseblock's node refs
254  * @cheb: eraseblock information structure
255  */
256 void
257 chfs_free_node_refs(struct chfs_eraseblock *cheb)
258 {
259 	struct chfs_node_ref *nref, *block;
260 
261 	block = nref = cheb->first_node;
262 
263 	while (nref) {
264 		if (nref->nref_lnr == REF_LINK_TO_NEXT) {
265 			nref = nref->nref_next;
266 			chfs_free_refblock(block);
267 			block = nref;
268 			continue;
269 		}
270 		nref++;
271 	}
272 }
273 
274 struct chfs_dirent*
275 chfs_alloc_dirent(int namesize)
276 {
277 	struct chfs_dirent *ret;
278 	size_t size = sizeof(struct chfs_dirent) + namesize;
279 
280 	ret = kmem_alloc(size, KM_SLEEP);
281 	//ret->alloc_size = size;
282 
283 	return ret;
284 }
285 
286 void
287 chfs_free_dirent(struct chfs_dirent *dirent)
288 {
289 	//size_t size = dirent->alloc_size;
290 	size_t size = sizeof(struct chfs_dirent) + dirent->nsize + 1;
291 
292 	kmem_free(dirent, size);
293 }
294 
295 struct chfs_full_dnode*
296 chfs_alloc_full_dnode()
297 {
298 	struct chfs_full_dnode *ret;
299 	ret = kmem_alloc(sizeof(struct chfs_full_dnode), KM_SLEEP);
300 	return ret;
301 }
302 
303 void
304 chfs_free_full_dnode(struct chfs_full_dnode *fd)
305 {
306 	kmem_free(fd,(sizeof(struct chfs_full_dnode)));
307 }
308 
309 struct chfs_flash_vnode*
310 chfs_alloc_flash_vnode()
311 {
312 	struct chfs_flash_vnode *ret;
313 	ret = pool_cache_get(chfs_flash_vnode_cache, 0);
314 	return ret;
315 }
316 
317 void
318 chfs_free_flash_vnode(struct chfs_flash_vnode *fvnode)
319 {
320 	pool_cache_put(chfs_flash_vnode_cache, fvnode);
321 }
322 
323 struct chfs_flash_dirent_node*
324 chfs_alloc_flash_dirent()
325 {
326 	struct chfs_flash_dirent_node *ret;
327 	ret = pool_cache_get(chfs_flash_dirent_cache, 0);
328 	return ret;
329 }
330 
331 void
332 chfs_free_flash_dirent(struct chfs_flash_dirent_node *fdnode)
333 {
334 	pool_cache_put(chfs_flash_dirent_cache, fdnode);
335 }
336 
337 struct chfs_flash_data_node*
338 chfs_alloc_flash_dnode()
339 {
340 	struct chfs_flash_data_node *ret;
341 	ret = pool_cache_get(chfs_flash_dnode_cache, 0);
342 	return ret;
343 }
344 
345 void
346 chfs_free_flash_dnode(struct chfs_flash_data_node *fdnode)
347 {
348 	pool_cache_put(chfs_flash_dnode_cache, fdnode);
349 }
350 
351 
352 struct chfs_node_frag*
353 chfs_alloc_node_frag()
354 {
355 	struct chfs_node_frag *ret;
356 	ret = pool_cache_get(chfs_node_frag_cache, 0);
357 	return ret;
358 
359 }
360 
361 void
362 chfs_free_node_frag(struct chfs_node_frag *frag)
363 {
364 	pool_cache_put(chfs_node_frag_cache, frag);
365 }
366 
367 struct chfs_tmp_dnode *
368 chfs_alloc_tmp_dnode()
369 {
370 	struct chfs_tmp_dnode *ret;
371 	ret = pool_cache_get(chfs_tmp_dnode_cache, 0);
372 	ret->next = NULL;
373 	return ret;
374 }
375 
376 void
377 chfs_free_tmp_dnode(struct chfs_tmp_dnode *td)
378 {
379 	pool_cache_put(chfs_tmp_dnode_cache, td);
380 }
381 
382 struct chfs_tmp_dnode_info *
383 chfs_alloc_tmp_dnode_info()
384 {
385 	struct chfs_tmp_dnode_info *ret;
386 	ret = pool_cache_get(chfs_tmp_dnode_info_cache, 0);
387 	ret->tmpnode = NULL;
388 	return ret;
389 }
390 
391 void
392 chfs_free_tmp_dnode_info(struct chfs_tmp_dnode_info *di)
393 {
394 	pool_cache_put(chfs_tmp_dnode_info_cache, di);
395 }
396 
397