1 /* $NetBSD: chfs_malloc.c,v 1.7 2021/12/07 21:37:37 andvar Exp $ */ 2 3 /*- 4 * Copyright (c) 2010 Department of Software Engineering, 5 * University of Szeged, Hungary 6 * Copyright (C) 2010 Tamas Toth <ttoth@inf.u-szeged.hu> 7 * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org> 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to The NetBSD Foundation 11 * by the Department of Software Engineering, University of Szeged, Hungary 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include "chfs.h" 36 #include <sys/pool.h> 37 38 pool_cache_t chfs_vnode_cache; 39 pool_cache_t chfs_nrefs_cache; 40 pool_cache_t chfs_flash_vnode_cache; 41 pool_cache_t chfs_flash_dirent_cache; 42 pool_cache_t chfs_flash_dnode_cache; 43 pool_cache_t chfs_node_frag_cache; 44 pool_cache_t chfs_tmp_dnode_cache; 45 pool_cache_t chfs_tmp_dnode_info_cache; 46 47 /* chfs_alloc_pool_caches - allocating pool caches */ 48 int 49 chfs_alloc_pool_caches(void) 50 { 51 chfs_vnode_cache = pool_cache_init( 52 sizeof(struct chfs_vnode_cache), 53 0, 0, 0, "chfs_vnode_cache", NULL, IPL_NONE, NULL, NULL, 54 NULL); 55 if (!chfs_vnode_cache) 56 goto err_vnode; 57 58 chfs_nrefs_cache = pool_cache_init( 59 (REFS_BLOCK_LEN + 1) * sizeof(struct chfs_node_ref), 0, 0, 60 0, "chfs_nrefs_pool", NULL, IPL_NONE, NULL, NULL, NULL); 61 if (!chfs_nrefs_cache) 62 goto err_nrefs; 63 64 chfs_flash_vnode_cache = pool_cache_init( 65 sizeof(struct chfs_flash_vnode), 0, 0, 0, 66 "chfs_flash_vnode_pool", NULL, IPL_NONE, NULL, NULL, NULL); 67 if (!chfs_flash_vnode_cache) 68 goto err_flash_vnode; 69 70 chfs_flash_dirent_cache = pool_cache_init( 71 sizeof(struct chfs_flash_dirent_node), 0, 0, 0, 72 "chfs_flash_dirent_pool", NULL, IPL_NONE, NULL, NULL, NULL); 73 if (!chfs_flash_dirent_cache) 74 goto err_flash_dirent; 75 76 chfs_flash_dnode_cache = pool_cache_init( 77 sizeof(struct chfs_flash_data_node), 0, 0, 0, 78 "chfs_flash_dnode_pool", NULL, IPL_NONE, NULL, NULL, NULL); 79 if (!chfs_flash_dnode_cache) 80 goto err_flash_dnode; 81 82 chfs_node_frag_cache = pool_cache_init( 83 sizeof(struct chfs_node_frag), 0, 0, 0, 84 "chfs_node_frag_pool", NULL, IPL_NONE, NULL, NULL, NULL); 85 if (!chfs_node_frag_cache) 86 goto err_node_frag; 87 88 chfs_tmp_dnode_cache = pool_cache_init( 89 sizeof(struct chfs_tmp_dnode), 0, 0, 0, 90 "chfs_tmp_dnode_pool", NULL, IPL_NONE, NULL, NULL, NULL); 91 if (!chfs_tmp_dnode_cache) 92 goto err_tmp_dnode; 93 94 chfs_tmp_dnode_info_cache = pool_cache_init( 95 sizeof(struct chfs_tmp_dnode_info), 0, 0, 0, 96 "chfs_tmp_dnode_info_pool", NULL, IPL_NONE, NULL, NULL, NULL); 97 if (!chfs_tmp_dnode_info_cache) 98 goto err_tmp_dnode_info; 99 100 return 0; 101 102 err_tmp_dnode_info: 103 pool_cache_destroy(chfs_tmp_dnode_cache); 104 err_tmp_dnode: 105 pool_cache_destroy(chfs_node_frag_cache); 106 err_node_frag: 107 pool_cache_destroy(chfs_flash_dnode_cache); 108 err_flash_dnode: 109 pool_cache_destroy(chfs_flash_dirent_cache); 110 err_flash_dirent: 111 pool_cache_destroy(chfs_flash_vnode_cache); 112 err_flash_vnode: 113 pool_cache_destroy(chfs_nrefs_cache); 114 err_nrefs: 115 pool_cache_destroy(chfs_vnode_cache); 116 err_vnode: 117 118 return ENOMEM; 119 } 120 121 /* chfs_destroy_pool_caches - destroying pool caches */ 122 void 123 chfs_destroy_pool_caches(void) 124 { 125 if (chfs_vnode_cache) 126 pool_cache_destroy(chfs_vnode_cache); 127 128 if (chfs_nrefs_cache) 129 pool_cache_destroy(chfs_nrefs_cache); 130 131 if (chfs_flash_vnode_cache) 132 pool_cache_destroy(chfs_flash_vnode_cache); 133 134 if (chfs_flash_dirent_cache) 135 pool_cache_destroy(chfs_flash_dirent_cache); 136 137 if (chfs_flash_dnode_cache) 138 pool_cache_destroy(chfs_flash_dnode_cache); 139 140 if (chfs_node_frag_cache) 141 pool_cache_destroy(chfs_node_frag_cache); 142 143 if (chfs_tmp_dnode_cache) 144 pool_cache_destroy(chfs_tmp_dnode_cache); 145 146 if (chfs_tmp_dnode_info_cache) 147 pool_cache_destroy(chfs_tmp_dnode_info_cache); 148 } 149 150 /* chfs_vnode_cache_alloc - allocating and initializing a vnode cache */ 151 struct chfs_vnode_cache * 152 chfs_vnode_cache_alloc(ino_t vno) 153 { 154 struct chfs_vnode_cache* vc; 155 vc = pool_cache_get(chfs_vnode_cache, PR_WAITOK); 156 157 memset(vc, 0, sizeof(*vc)); 158 vc->vno = vno; 159 /* vnode cache is the last element of all chain */ 160 vc->v = (void *)vc; 161 vc->dirents = (void *)vc; 162 vc->dnode = (void *)vc; 163 TAILQ_INIT(&vc->scan_dirents); 164 vc->highest_version = 0; 165 166 return vc; 167 } 168 169 /* chfs_vnode_cache_free - freeing a vnode cache */ 170 void 171 chfs_vnode_cache_free(struct chfs_vnode_cache *vc) 172 { 173 pool_cache_put(chfs_vnode_cache, vc); 174 } 175 176 /* 177 * chfs_alloc_refblock - allocating a refblock 178 * 179 * Returns a pointer of the first element in the block. 180 * 181 * We are not allocating just one node ref, instead we allocating REFS_BLOCK_LEN 182 * number of node refs, the last element will be a pointer to the next block. 183 * We do this, because we need a chain of nodes which have been ordered by the 184 * physical address of them. 185 * 186 */ 187 struct chfs_node_ref* 188 chfs_alloc_refblock(void) 189 { 190 int i; 191 struct chfs_node_ref *nref; 192 nref = pool_cache_get(chfs_nrefs_cache, PR_WAITOK); 193 194 for (i = 0; i < REFS_BLOCK_LEN; i++) { 195 nref[i].nref_lnr = REF_EMPTY_NODE; 196 nref[i].nref_next = NULL; 197 } 198 i = REFS_BLOCK_LEN; 199 nref[i].nref_lnr = REF_LINK_TO_NEXT; 200 nref[i].nref_next = NULL; 201 202 return nref; 203 } 204 205 /* chfs_free_refblock - freeing a refblock */ 206 void 207 chfs_free_refblock(struct chfs_node_ref *nref) 208 { 209 pool_cache_put(chfs_nrefs_cache, nref); 210 } 211 212 /* 213 * chfs_alloc_node_ref - allocating a node ref from a refblock 214 * 215 * Allocating a node ref from a refblock, it there isn't any free element in the 216 * block, a new block will be allocated and be linked to the current block. 217 */ 218 struct chfs_node_ref* 219 chfs_alloc_node_ref(struct chfs_eraseblock *cheb) 220 { 221 struct chfs_node_ref *nref, *new, *old __diagused; 222 old = cheb->last_node; 223 nref = cheb->last_node; 224 225 if (!nref) { 226 /* There haven't been any nref allocated for this block yet */ 227 nref = chfs_alloc_refblock(); 228 229 cheb->first_node = nref; 230 cheb->last_node = nref; 231 nref->nref_lnr = cheb->lnr; 232 KASSERT(cheb->lnr == nref->nref_lnr); 233 234 return nref; 235 } 236 237 nref++; 238 if (nref->nref_lnr == REF_LINK_TO_NEXT) { 239 /* this was the last element, allocate a new block */ 240 new = chfs_alloc_refblock(); 241 nref->nref_next = new; 242 nref = new; 243 } 244 245 cheb->last_node = nref; 246 nref->nref_lnr = cheb->lnr; 247 248 KASSERT(old->nref_lnr == nref->nref_lnr && 249 nref->nref_lnr == cheb->lnr); 250 251 return nref; 252 } 253 254 /* chfs_free_node_refs - freeing an eraseblock's node refs */ 255 void 256 chfs_free_node_refs(struct chfs_eraseblock *cheb) 257 { 258 struct chfs_node_ref *nref, *block; 259 260 block = nref = cheb->first_node; 261 262 while (nref) { 263 if (nref->nref_lnr == REF_LINK_TO_NEXT) { 264 nref = nref->nref_next; 265 chfs_free_refblock(block); 266 block = nref; 267 continue; 268 } 269 nref++; 270 } 271 } 272 273 /* chfs_alloc_dirent - allocating a directory entry */ 274 struct chfs_dirent* 275 chfs_alloc_dirent(int namesize) 276 { 277 struct chfs_dirent *ret; 278 size_t size = sizeof(struct chfs_dirent) + namesize; 279 280 ret = kmem_alloc(size, KM_SLEEP); 281 282 return ret; 283 } 284 285 /* chfs_free_dirent - freeing a directory entry */ 286 void 287 chfs_free_dirent(struct chfs_dirent *dirent) 288 { 289 size_t size = sizeof(struct chfs_dirent) + dirent->nsize + 1; 290 291 kmem_free(dirent, size); 292 } 293 294 /* chfs_alloc_full_dnode - allocating a full data node */ 295 struct chfs_full_dnode* 296 chfs_alloc_full_dnode(void) 297 { 298 struct chfs_full_dnode *ret; 299 ret = kmem_alloc(sizeof(struct chfs_full_dnode), KM_SLEEP); 300 ret->nref = NULL; 301 ret->frags = 0; 302 return ret; 303 } 304 305 /* chfs_free_full_dnode - freeing a full data node */ 306 void 307 chfs_free_full_dnode(struct chfs_full_dnode *fd) 308 { 309 kmem_free(fd,(sizeof(struct chfs_full_dnode))); 310 } 311 312 /* chfs_alloc_flash_vnode - allocating vnode info (used on flash) */ 313 struct chfs_flash_vnode* 314 chfs_alloc_flash_vnode(void) 315 { 316 struct chfs_flash_vnode *ret; 317 ret = pool_cache_get(chfs_flash_vnode_cache, PR_WAITOK); 318 return ret; 319 } 320 321 /* chfs_free_flash_vnode - freeing vnode info */ 322 void 323 chfs_free_flash_vnode(struct chfs_flash_vnode *fvnode) 324 { 325 pool_cache_put(chfs_flash_vnode_cache, fvnode); 326 } 327 328 /* chfs_alloc_flash_dirent - allocating a directory entry (used on flash) */ 329 struct chfs_flash_dirent_node* 330 chfs_alloc_flash_dirent(void) 331 { 332 struct chfs_flash_dirent_node *ret; 333 ret = pool_cache_get(chfs_flash_dirent_cache, PR_WAITOK); 334 return ret; 335 } 336 337 /* chfs_free_flash_dirent - freeing a (flash) directory entry */ 338 void 339 chfs_free_flash_dirent(struct chfs_flash_dirent_node *fdnode) 340 { 341 pool_cache_put(chfs_flash_dirent_cache, fdnode); 342 } 343 344 /* chfs_alloc_flash_dnode - allocating a data node (used on flash) */ 345 struct chfs_flash_data_node* 346 chfs_alloc_flash_dnode(void) 347 { 348 struct chfs_flash_data_node *ret; 349 ret = pool_cache_get(chfs_flash_dnode_cache, PR_WAITOK); 350 return ret; 351 } 352 353 /* chfs_free_flash_dnode - freeing a (flash) data node */ 354 void 355 chfs_free_flash_dnode(struct chfs_flash_data_node *fdnode) 356 { 357 pool_cache_put(chfs_flash_dnode_cache, fdnode); 358 } 359 360 /* chfs_alloc_node_frag - allocating a fragment of a node */ 361 struct chfs_node_frag* 362 chfs_alloc_node_frag(void) 363 { 364 struct chfs_node_frag *ret; 365 ret = pool_cache_get(chfs_node_frag_cache, PR_WAITOK); 366 return ret; 367 } 368 369 /* chfs_free_node_frag - freeing a fragment of a node */ 370 void 371 chfs_free_node_frag(struct chfs_node_frag *frag) 372 { 373 pool_cache_put(chfs_node_frag_cache, frag); 374 } 375 376 /* chfs_alloc_tmp_dnode - allocating a temporarily used dnode */ 377 struct chfs_tmp_dnode * 378 chfs_alloc_tmp_dnode(void) 379 { 380 struct chfs_tmp_dnode *ret; 381 ret = pool_cache_get(chfs_tmp_dnode_cache, PR_WAITOK); 382 ret->next = NULL; 383 return ret; 384 } 385 386 /* chfs_free_tmp_dnode - freeing a temporarily used dnode */ 387 void 388 chfs_free_tmp_dnode(struct chfs_tmp_dnode *td) 389 { 390 pool_cache_put(chfs_tmp_dnode_cache, td); 391 } 392 393 /* chfs_alloc_tmp_dnode_info - allocating a temporarily used dnode descriptor */ 394 struct chfs_tmp_dnode_info * 395 chfs_alloc_tmp_dnode_info(void) 396 { 397 struct chfs_tmp_dnode_info *ret; 398 ret = pool_cache_get(chfs_tmp_dnode_info_cache, PR_WAITOK); 399 ret->tmpnode = NULL; 400 return ret; 401 } 402 403 /* chfs_free_tmp_dnode_info - freeing a temporarily used dnode descriptor */ 404 void 405 chfs_free_tmp_dnode_info(struct chfs_tmp_dnode_info *di) 406 { 407 pool_cache_put(chfs_tmp_dnode_info_cache, di); 408 } 409 410