1 /* $NetBSD: chfs_readinode.c,v 1.13 2022/04/08 10:27:04 andvar Exp $ */ 2 3 /*- 4 * Copyright (c) 2010 Department of Software Engineering, 5 * University of Szeged, Hungary 6 * Copyright (C) 2010 David Tengeri <dtengeri@inf.u-szeged.hu> 7 * Copyright (C) 2010 Tamas Toth <ttoth@inf.u-szeged.hu> 8 * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org> 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to The NetBSD Foundation 12 * by the Department of Software Engineering, University of Szeged, Hungary 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 */ 35 36 #include <sys/buf.h> 37 38 #include "chfs.h" 39 40 /* tmp node operations */ 41 int chfs_check_td_data(struct chfs_mount *, 42 struct chfs_tmp_dnode *); 43 int chfs_check_td_node(struct chfs_mount *, 44 struct chfs_tmp_dnode *); 45 struct chfs_node_ref *chfs_first_valid_data_ref(struct chfs_node_ref *); 46 int chfs_add_tmp_dnode_to_tree(struct chfs_mount *, 47 struct chfs_readinode_info *, 48 struct chfs_tmp_dnode *); 49 void chfs_add_tmp_dnode_to_tdi(struct chfs_tmp_dnode_info *, 50 struct chfs_tmp_dnode *); 51 void chfs_remove_tmp_dnode_from_tdi(struct chfs_tmp_dnode_info *, 52 struct chfs_tmp_dnode *); 53 static void chfs_kill_td(struct chfs_mount *, 54 struct chfs_tmp_dnode *); 55 static void chfs_kill_tdi(struct chfs_mount *, 56 struct chfs_tmp_dnode_info *); 57 /* frag node operations */ 58 struct chfs_node_frag *new_fragment(struct chfs_full_dnode *, 59 uint32_t, 60 uint32_t); 61 int no_overlapping_node(struct rb_tree *, struct chfs_node_frag *, 62 struct chfs_node_frag *, uint32_t); 63 int chfs_add_frag_to_fragtree(struct chfs_mount *, 64 struct rb_tree *, 65 struct chfs_node_frag *); 66 void chfs_obsolete_node_frag(struct chfs_mount *, 67 struct chfs_node_frag *); 68 /* general node operations */ 69 int chfs_get_data_nodes(struct chfs_mount *, 70 struct chfs_inode *, 71 struct chfs_readinode_info *); 72 int chfs_build_fragtree(struct chfs_mount *, 73 struct chfs_inode *, 74 struct chfs_readinode_info *); 75 76 77 78 /* tmp node rbtree operations */ 79 static signed int 80 tmp_node_compare_nodes(void *ctx, const void *n1, const void *n2) 81 { 82 const struct chfs_tmp_dnode_info *tdi1 = n1; 83 const struct chfs_tmp_dnode_info *tdi2 = n2; 84 85 return (tdi1->tmpnode->node->ofs - tdi2->tmpnode->node->ofs); 86 } 87 88 static signed int 89 tmp_node_compare_key(void *ctx, const void *n, const void *key) 90 { 91 const struct chfs_tmp_dnode_info *tdi = n; 92 uint64_t ofs = *(const uint64_t *)key; 93 94 return (tdi->tmpnode->node->ofs - ofs); 95 } 96 97 const rb_tree_ops_t tmp_node_rbtree_ops = { 98 .rbto_compare_nodes = tmp_node_compare_nodes, 99 .rbto_compare_key = tmp_node_compare_key, 100 .rbto_node_offset = offsetof(struct chfs_tmp_dnode_info, rb_node), 101 .rbto_context = NULL 102 }; 103 104 105 /* frag node rbtree operations */ 106 static signed int 107 frag_compare_nodes(void *ctx, const void *n1, const void *n2) 108 { 109 const struct chfs_node_frag *frag1 = n1; 110 const struct chfs_node_frag *frag2 = n2; 111 112 return (frag1->ofs - frag2->ofs); 113 } 114 115 static signed int 116 frag_compare_key(void *ctx, const void *n, const void *key) 117 { 118 const struct chfs_node_frag *frag = n; 119 uint64_t ofs = *(const uint64_t *)key; 120 121 return (frag->ofs - ofs); 122 } 123 124 const rb_tree_ops_t frag_rbtree_ops = { 125 .rbto_compare_nodes = frag_compare_nodes, 126 .rbto_compare_key = frag_compare_key, 127 .rbto_node_offset = offsetof(struct chfs_node_frag, rb_node), 128 .rbto_context = NULL 129 }; 130 131 132 /* 133 * chfs_check_td_data - checks the data CRC of the node 134 * 135 * Returns: 0 - if everything OK; 136 * 1 - if CRC is incorrect; 137 * 2 - else; 138 * error code if an error occurred. 139 */ 140 int 141 chfs_check_td_data(struct chfs_mount *chmp, 142 struct chfs_tmp_dnode *td) 143 { 144 int err; 145 size_t retlen, len, totlen; 146 uint32_t crc; 147 uint64_t ofs; 148 char *buf; 149 struct chfs_node_ref *nref = td->node->nref; 150 151 KASSERT(mutex_owned(&chmp->chm_lock_mountfields)); 152 KASSERT(!mutex_owned(&chmp->chm_lock_sizes)); 153 154 ofs = CHFS_GET_OFS(nref->nref_offset) + sizeof(struct chfs_flash_data_node); 155 len = td->node->size; 156 if (!len) 157 return 0; 158 159 /* Read data. */ 160 buf = kmem_alloc(len, KM_SLEEP); 161 err = chfs_read_leb(chmp, nref->nref_lnr, buf, ofs, len, &retlen); 162 if (err) { 163 dbg("error while reading: %d\n", err); 164 err = 2; 165 goto out; 166 } 167 168 /* Check crc. */ 169 if (len != retlen) { 170 dbg("len:%zu, retlen:%zu\n", len, retlen); 171 err = 2; 172 goto out; 173 } 174 crc = crc32(0, (uint8_t *)buf, len); 175 176 if (crc != td->data_crc) { 177 dbg("crc failed, calculated: 0x%x, orig: 0x%x\n", crc, td->data_crc); 178 kmem_free(buf, len); 179 return 1; 180 } 181 182 /* Correct sizes. */ 183 CHFS_MARK_REF_NORMAL(nref); 184 totlen = CHFS_PAD(sizeof(struct chfs_flash_data_node) + len); 185 186 mutex_enter(&chmp->chm_lock_sizes); 187 chfs_change_size_unchecked(chmp, &chmp->chm_blocks[nref->nref_lnr], -totlen); 188 chfs_change_size_used(chmp, &chmp->chm_blocks[nref->nref_lnr], totlen); 189 mutex_exit(&chmp->chm_lock_sizes); 190 KASSERT(chmp->chm_blocks[nref->nref_lnr].used_size <= chmp->chm_ebh->eb_size); 191 192 err = 0; 193 out: 194 kmem_free(buf, len); 195 return err; 196 } 197 198 /* chfs_check_td_node - checks a temporary node */ 199 int 200 chfs_check_td_node(struct chfs_mount *chmp, struct chfs_tmp_dnode *td) 201 { 202 int ret; 203 204 if (CHFS_REF_FLAGS(td->node->nref) != CHFS_UNCHECKED_NODE_MASK) 205 return 0; 206 207 ret = chfs_check_td_data(chmp, td); 208 return ret; 209 } 210 211 /* 212 * chfs_first_valid_data_ref - 213 * returns the first valid nref after the given nref 214 */ 215 struct chfs_node_ref * 216 chfs_first_valid_data_ref(struct chfs_node_ref *nref) 217 { 218 while (nref) { 219 if (!CHFS_REF_OBSOLETE(nref)) { 220 #ifdef DGB_MSG_GC 221 if (nref->nref_lnr == REF_EMPTY_NODE) { 222 dbg("FIRST VALID IS EMPTY!\n"); 223 } 224 #endif 225 return nref; 226 } 227 228 if (nref->nref_next) { 229 nref = nref->nref_next; 230 } else 231 break; 232 } 233 return NULL; 234 } 235 236 /* 237 * chfs_add_tmp_dnode_to_tdi - 238 * adds a temporary node to a temporary node descriptor 239 */ 240 void 241 chfs_add_tmp_dnode_to_tdi(struct chfs_tmp_dnode_info *tdi, 242 struct chfs_tmp_dnode *td) 243 { 244 if (!tdi->tmpnode) { 245 /* The chain is empty. */ 246 tdi->tmpnode = td; 247 } else { 248 /* Insert into the chain. */ 249 struct chfs_tmp_dnode *tmp = tdi->tmpnode; 250 while (tmp->next) { 251 tmp = tmp->next; 252 } 253 tmp->next = td; 254 } 255 } 256 257 /* 258 * chfs_remove_tmp_dnode_from_tdi - 259 * removes a temporary node from its descriptor 260 */ 261 void 262 chfs_remove_tmp_dnode_from_tdi(struct chfs_tmp_dnode_info *tdi, 263 struct chfs_tmp_dnode *td) 264 { 265 if (tdi->tmpnode == td) { 266 /* It's the first in the chain. */ 267 tdi->tmpnode = tdi->tmpnode->next; 268 } else { 269 /* Remove from the middle of the chain. */ 270 struct chfs_tmp_dnode *tmp = tdi->tmpnode->next; 271 while (tmp->next && tmp->next != td) { 272 tmp = tmp->next; 273 } 274 if (tmp->next) { 275 tmp->next = td->next; 276 } 277 } 278 } 279 280 /* chfs_kill_td - removes all components of a temporary node */ 281 static void 282 chfs_kill_td(struct chfs_mount *chmp, 283 struct chfs_tmp_dnode *td) 284 { 285 struct chfs_vnode_cache *vc; 286 if (td->node) { 287 mutex_enter(&chmp->chm_lock_vnocache); 288 /* Remove the node from the vnode cache's data node chain. */ 289 vc = chfs_nref_to_vc(td->node->nref); 290 chfs_remove_and_obsolete(chmp, vc, td->node->nref, &vc->dnode); 291 mutex_exit(&chmp->chm_lock_vnocache); 292 } 293 294 chfs_free_tmp_dnode(td); 295 } 296 297 /* chfs_kill_tdi - removes a temporary node descriptor */ 298 static void 299 chfs_kill_tdi(struct chfs_mount *chmp, 300 struct chfs_tmp_dnode_info *tdi) 301 { 302 struct chfs_tmp_dnode *next, *tmp = tdi->tmpnode; 303 304 /* Iterate the chain and remove all temporary node from it. */ 305 while (tmp) { 306 next = tmp->next; 307 chfs_kill_td(chmp, tmp); 308 tmp = next; 309 } 310 311 chfs_free_tmp_dnode_info(tdi); 312 } 313 314 /* 315 * chfs_add_tmp_dnode_to_tree - 316 * adds a temporary node to the temporary tree 317 */ 318 int 319 chfs_add_tmp_dnode_to_tree(struct chfs_mount *chmp, 320 struct chfs_readinode_info *rii, 321 struct chfs_tmp_dnode *newtd) 322 { 323 uint64_t end_ofs = newtd->node->ofs + newtd->node->size; 324 struct chfs_tmp_dnode_info *this; 325 struct rb_node *node, *prev_node; 326 struct chfs_tmp_dnode_info *newtdi; 327 328 node = rb_tree_find_node(&rii->tdi_root, &newtd->node->ofs); 329 if (node) { 330 this = (struct chfs_tmp_dnode_info *)node; 331 while (this->tmpnode->overlapped) { 332 prev_node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_LEFT); 333 if (!prev_node) { 334 this->tmpnode->overlapped = 0; 335 break; 336 } 337 node = prev_node; 338 this = (struct chfs_tmp_dnode_info *)node; 339 } 340 } 341 342 while (node) { 343 this = (struct chfs_tmp_dnode_info *)node; 344 if (this->tmpnode->node->ofs > end_ofs) 345 break; 346 347 struct chfs_tmp_dnode *tmp_td = this->tmpnode; 348 while (tmp_td) { 349 if (tmp_td->version == newtd->version) { 350 /* This is a new version of an old node. */ 351 if (!chfs_check_td_node(chmp, tmp_td)) { 352 dbg("calling kill td 0\n"); 353 chfs_kill_td(chmp, newtd); 354 return 0; 355 } else { 356 chfs_remove_tmp_dnode_from_tdi(this, tmp_td); 357 chfs_kill_td(chmp, tmp_td); 358 chfs_add_tmp_dnode_to_tdi(this, newtd); 359 return 0; 360 } 361 } 362 if (tmp_td->version < newtd->version && 363 tmp_td->node->ofs >= newtd->node->ofs && 364 tmp_td->node->ofs + tmp_td->node->size <= end_ofs) { 365 /* New node entirely overlaps 'this' */ 366 if (chfs_check_td_node(chmp, newtd)) { 367 dbg("calling kill td 2\n"); 368 chfs_kill_td(chmp, newtd); 369 return 0; 370 } 371 /* ... and is good. Kill 'this' and any subsequent nodes which are also overlapped */ 372 while (tmp_td && tmp_td->node->ofs + tmp_td->node->size <= end_ofs) { 373 struct rb_node *next = rb_tree_iterate(&rii->tdi_root, this, RB_DIR_RIGHT); 374 struct chfs_tmp_dnode_info *next_tdi = (struct chfs_tmp_dnode_info *)next; 375 struct chfs_tmp_dnode *next_td = NULL; 376 if (tmp_td->next) { 377 next_td = tmp_td->next; 378 } else if (next_tdi) { 379 next_td = next_tdi->tmpnode; 380 } 381 if (tmp_td->version < newtd->version) { 382 chfs_remove_tmp_dnode_from_tdi(this, tmp_td); 383 chfs_kill_td(chmp, tmp_td); 384 if (!this->tmpnode) { 385 rb_tree_remove_node(&rii->tdi_root, this); 386 chfs_kill_tdi(chmp, this); 387 this = next_tdi; 388 } 389 } 390 tmp_td = next_td; 391 } 392 continue; 393 } 394 if (tmp_td->version > newtd->version && 395 tmp_td->node->ofs <= newtd->node->ofs && 396 tmp_td->node->ofs + tmp_td->node->size >= end_ofs) { 397 /* New node entirely overlapped by 'this' */ 398 if (!chfs_check_td_node(chmp, tmp_td)) { 399 dbg("this version: %llu\n", 400 (unsigned long long)tmp_td->version); 401 dbg("this ofs: %llu, size: %u\n", 402 (unsigned long long)tmp_td->node->ofs, 403 tmp_td->node->size); 404 dbg("calling kill td 4\n"); 405 chfs_kill_td(chmp, newtd); 406 return 0; 407 } 408 /* ... but 'this' was bad. Replace it... */ 409 chfs_remove_tmp_dnode_from_tdi(this, tmp_td); 410 chfs_kill_td(chmp, tmp_td); 411 if (!this->tmpnode) { 412 rb_tree_remove_node(&rii->tdi_root, this); 413 chfs_kill_tdi(chmp, this); 414 } 415 dbg("calling kill td 5\n"); 416 chfs_kill_td(chmp, newtd); 417 break; 418 } 419 tmp_td = tmp_td->next; 420 } 421 node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_RIGHT); 422 } 423 424 newtdi = chfs_alloc_tmp_dnode_info(); 425 chfs_add_tmp_dnode_to_tdi(newtdi, newtd); 426 /* We neither completely obsoleted nor were completely 427 obsoleted by an earlier node. Insert into the tree */ 428 struct chfs_tmp_dnode_info *tmp_tdi = rb_tree_insert_node(&rii->tdi_root, newtdi); 429 if (tmp_tdi != newtdi) { 430 chfs_remove_tmp_dnode_from_tdi(newtdi, newtd); 431 chfs_add_tmp_dnode_to_tdi(tmp_tdi, newtd); 432 chfs_kill_tdi(chmp, newtdi); 433 } 434 435 /* If there's anything behind that overlaps us, note it */ 436 node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_LEFT); 437 if (node) { 438 while (1) { 439 this = (struct chfs_tmp_dnode_info *)node; 440 if (this->tmpnode->node->ofs + this->tmpnode->node->size > newtd->node->ofs) { 441 newtd->overlapped = 1; 442 } 443 if (!this->tmpnode->overlapped) 444 break; 445 446 prev_node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_LEFT); 447 if (!prev_node) { 448 this->tmpnode->overlapped = 0; 449 break; 450 } 451 node = prev_node; 452 } 453 } 454 455 /* If the new node overlaps anything ahead, note it */ 456 node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_RIGHT); 457 this = (struct chfs_tmp_dnode_info *)node; 458 while (this && this->tmpnode->node->ofs < end_ofs) { 459 this->tmpnode->overlapped = 1; 460 node = rb_tree_iterate(&rii->tdi_root, node, RB_DIR_RIGHT); 461 this = (struct chfs_tmp_dnode_info *)node; 462 } 463 return 0; 464 } 465 466 467 /* new_fragment - creates a new fragment for a data node */ 468 struct chfs_node_frag * 469 new_fragment(struct chfs_full_dnode *fdn, uint32_t ofs, uint32_t size) 470 { 471 struct chfs_node_frag *newfrag; 472 newfrag = chfs_alloc_node_frag(); 473 if (newfrag) { 474 /* Initialize fragment. */ 475 newfrag->ofs = ofs; 476 newfrag->size = size; 477 newfrag->node = fdn; 478 if (newfrag->node) { 479 newfrag->node->frags++; 480 } 481 } else { 482 chfs_err("cannot allocate a chfs_node_frag object\n"); 483 } 484 return newfrag; 485 } 486 487 /* 488 * no_overlapping_node - inserts a node to the fragtree 489 * Puts hole frag into the holes between fragments. 490 */ 491 int 492 no_overlapping_node(struct rb_tree *fragtree, 493 struct chfs_node_frag *newfrag, 494 struct chfs_node_frag *this, uint32_t lastend) 495 { 496 if (lastend < newfrag->node->ofs) { 497 struct chfs_node_frag *holefrag; 498 499 holefrag = new_fragment(NULL, lastend, newfrag->node->ofs - lastend); 500 if (!holefrag) { 501 chfs_free_node_frag(newfrag); 502 return ENOMEM; 503 } 504 505 rb_tree_insert_node(fragtree, holefrag); 506 } 507 508 rb_tree_insert_node(fragtree, newfrag); 509 510 return 0; 511 } 512 513 /* 514 * chfs_add_frag_to_fragtree - 515 * adds a fragment to a data node's fragtree 516 */ 517 int 518 chfs_add_frag_to_fragtree(struct chfs_mount *chmp, 519 struct rb_tree *fragtree, 520 struct chfs_node_frag *newfrag) 521 { 522 struct chfs_node_frag *this; 523 uint32_t lastend; 524 KASSERT(mutex_owned(&chmp->chm_lock_mountfields)); 525 526 /* Find the offset of frag which is before the new one. */ 527 this = (struct chfs_node_frag *)rb_tree_find_node_leq(fragtree, &newfrag->ofs); 528 529 if (this) { 530 lastend = this->ofs + this->size; 531 } else { 532 lastend = 0; 533 } 534 535 /* New fragment is end of the file and there is no overlapping. */ 536 if (lastend <= newfrag->ofs) { 537 if (lastend && (lastend - 1) >> PAGE_SHIFT == newfrag->ofs >> PAGE_SHIFT) { 538 if (this->node) 539 CHFS_MARK_REF_NORMAL(this->node->nref); 540 CHFS_MARK_REF_NORMAL(newfrag->node->nref); 541 } 542 return no_overlapping_node(fragtree, newfrag, this, lastend); 543 } 544 545 if (newfrag->ofs > this->ofs) { 546 CHFS_MARK_REF_NORMAL(newfrag->node->nref); 547 if (this->node) 548 CHFS_MARK_REF_NORMAL(this->node->nref); 549 550 if (this->ofs + this->size > newfrag->ofs + newfrag->size) { 551 /* Newfrag is inside of this. */ 552 struct chfs_node_frag *newfrag2; 553 554 newfrag2 = new_fragment(this->node, newfrag->ofs + newfrag->size, 555 this->ofs + this->size - newfrag->ofs - newfrag->size); 556 if (!newfrag2) 557 return ENOMEM; 558 559 this->size = newfrag->ofs - this->ofs; 560 561 rb_tree_insert_node(fragtree, newfrag); 562 rb_tree_insert_node(fragtree, newfrag2); 563 564 return 0; 565 } 566 /* Newfrag is bottom of this. */ 567 this->size = newfrag->ofs - this->ofs; 568 rb_tree_insert_node(fragtree, newfrag); 569 } else { 570 /* Newfrag start at same point */ 571 //TODO replace instead of remove and insert 572 rb_tree_remove_node(fragtree, this); 573 rb_tree_insert_node(fragtree, newfrag); 574 575 if (newfrag->ofs + newfrag->size >= this->ofs+this->size) { 576 chfs_obsolete_node_frag(chmp, this); 577 } else { 578 this->ofs += newfrag->size; 579 this->size -= newfrag->size; 580 581 rb_tree_insert_node(fragtree, this); 582 return 0; 583 } 584 } 585 /* OK, now we have newfrag added in the correct place in the tree, but 586 frag_next(newfrag) may be a fragment which is overlapped by it 587 */ 588 while ((this = frag_next(fragtree, newfrag)) && newfrag->ofs + newfrag->size >= this->ofs + this->size) { 589 rb_tree_remove_node(fragtree, this); 590 chfs_obsolete_node_frag(chmp, this); 591 } 592 593 if (!this || newfrag->ofs + newfrag->size == this->ofs) 594 return 0; 595 596 this->size = (this->ofs + this->size) - (newfrag->ofs + newfrag->size); 597 this->ofs = newfrag->ofs + newfrag->size; 598 599 if (this->node) 600 CHFS_MARK_REF_NORMAL(this->node->nref); 601 CHFS_MARK_REF_NORMAL(newfrag->node->nref); 602 603 return 0; 604 } 605 606 /* 607 * chfs_remove_frags_of_node - 608 * removes all fragments from a fragtree and DOESN'T OBSOLETE them 609 */ 610 void 611 chfs_remove_frags_of_node(struct chfs_mount *chmp, struct rb_tree *fragtree, 612 struct chfs_node_ref *nref) 613 { 614 KASSERT(mutex_owned(&chmp->chm_lock_mountfields)); 615 struct chfs_node_frag *this, *next; 616 617 if (nref == NULL) { 618 return; 619 } 620 621 /* Iterate the tree and clean all elements. */ 622 this = (struct chfs_node_frag *)RB_TREE_MIN(fragtree); 623 while (this) { 624 next = frag_next(fragtree, this); 625 if (this->node->nref == nref) { 626 rb_tree_remove_node(fragtree, this); 627 chfs_free_node_frag(this); 628 } 629 this = next; 630 } 631 } 632 633 /* 634 * chfs_kill_fragtree - 635 * removes all fragments from a fragtree and OBSOLETES them 636 */ 637 void 638 chfs_kill_fragtree(struct chfs_mount *chmp, struct rb_tree *fragtree) 639 { 640 KASSERT(mutex_owned(&chmp->chm_lock_mountfields)); 641 struct chfs_node_frag *this, *next; 642 643 /* Iterate the tree and clean all elements. */ 644 this = (struct chfs_node_frag *)RB_TREE_MIN(fragtree); 645 while (this) { 646 next = frag_next(fragtree, this); 647 rb_tree_remove_node(fragtree, this); 648 chfs_obsolete_node_frag(chmp, this); 649 this = next; 650 } 651 } 652 653 /* chfs_truncate_fragtree - truncates the tree to a specified size */ 654 uint32_t 655 chfs_truncate_fragtree(struct chfs_mount *chmp, 656 struct rb_tree *fragtree, uint32_t size) 657 { 658 KASSERT(mutex_owned(&chmp->chm_lock_mountfields)); 659 struct chfs_node_frag *frag; 660 661 dbg("truncate to size: %u\n", size); 662 663 frag = (struct chfs_node_frag *)rb_tree_find_node_leq(fragtree, &size); 664 665 /* Find the last frag before size and set its new size. */ 666 if (frag && frag->ofs != size) { 667 if (frag->ofs + frag->size > size) { 668 frag->size = size - frag->ofs; 669 } 670 frag = frag_next(fragtree, frag); 671 } 672 673 /* Delete frags after new size. */ 674 while (frag && frag->ofs >= size) { 675 struct chfs_node_frag *next = frag_next(fragtree, frag); 676 677 rb_tree_remove_node(fragtree, frag); 678 chfs_obsolete_node_frag(chmp, frag); 679 frag = next; 680 } 681 682 if (size == 0) { 683 return 0; 684 } 685 686 frag = frag_last(fragtree); 687 688 if (!frag) { 689 return 0; 690 } 691 692 if (frag->ofs + frag->size < size) { 693 return frag->ofs + frag->size; 694 } 695 696 /* FIXME Should we check the position of the last node? (PAGE_CACHE size, etc.) */ 697 if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) { 698 frag->node->nref->nref_offset = 699 CHFS_GET_OFS(frag->node->nref->nref_offset) | CHFS_PRISTINE_NODE_MASK; 700 } 701 702 return size; 703 } 704 705 /* chfs_obsolete_node_frag - obsoletes a fragment of a node */ 706 void 707 chfs_obsolete_node_frag(struct chfs_mount *chmp, 708 struct chfs_node_frag *this) 709 { 710 struct chfs_vnode_cache *vc; 711 KASSERT(mutex_owned(&chmp->chm_lock_mountfields)); 712 if (this->node) { 713 /* The fragment is in a node. */ 714 KASSERT(this->node->frags != 0); 715 this->node->frags--; 716 if (this->node->frags == 0) { 717 /* This is the last fragment. (There is no more.) */ 718 KASSERT(!CHFS_REF_OBSOLETE(this->node->nref)); 719 mutex_enter(&chmp->chm_lock_vnocache); 720 vc = chfs_nref_to_vc(this->node->nref); 721 dbg("[MARK] lnr: %u ofs: %u\n", this->node->nref->nref_lnr, 722 this->node->nref->nref_offset); 723 724 chfs_remove_and_obsolete(chmp, vc, this->node->nref, &vc->dnode); 725 mutex_exit(&chmp->chm_lock_vnocache); 726 727 chfs_free_full_dnode(this->node); 728 } else { 729 /* There is more frags in the node. */ 730 CHFS_MARK_REF_NORMAL(this->node->nref); 731 } 732 } 733 chfs_free_node_frag(this); 734 } 735 736 /* chfs_add_full_dnode_to_inode - adds a data node to an inode */ 737 int 738 chfs_add_full_dnode_to_inode(struct chfs_mount *chmp, 739 struct chfs_inode *ip, 740 struct chfs_full_dnode *fd) 741 { 742 int ret; 743 struct chfs_node_frag *newfrag; 744 KASSERT(mutex_owned(&chmp->chm_lock_mountfields)); 745 746 if (unlikely(!fd->size)) 747 return 0; 748 749 /* Create a new fragment from the data node and add it to the fragtree. */ 750 newfrag = new_fragment(fd, fd->ofs, fd->size); 751 if (unlikely(!newfrag)) 752 return ENOMEM; 753 754 ret = chfs_add_frag_to_fragtree(chmp, &ip->fragtree, newfrag); 755 if (ret) 756 return ret; 757 758 /* Check previous fragment. */ 759 if (newfrag->ofs & (PAGE_SIZE - 1)) { 760 struct chfs_node_frag *prev = frag_prev(&ip->fragtree, newfrag); 761 762 CHFS_MARK_REF_NORMAL(fd->nref); 763 if (prev->node) 764 CHFS_MARK_REF_NORMAL(prev->node->nref); 765 } 766 767 /* Check next fragment. */ 768 if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE - 1)) { 769 struct chfs_node_frag *next = frag_next(&ip->fragtree, newfrag); 770 771 if (next) { 772 CHFS_MARK_REF_NORMAL(fd->nref); 773 if (next->node) 774 CHFS_MARK_REF_NORMAL(next->node->nref); 775 } 776 } 777 778 return 0; 779 } 780 781 782 /* chfs_get_data_nodes - get temporary nodes of an inode */ 783 int 784 chfs_get_data_nodes(struct chfs_mount *chmp, 785 struct chfs_inode *ip, 786 struct chfs_readinode_info *rii) 787 { 788 uint32_t crc; 789 int err; 790 size_t len, retlen; 791 struct chfs_node_ref *nref; 792 struct chfs_flash_data_node *dnode; 793 struct chfs_tmp_dnode *td; 794 char* buf; 795 796 len = sizeof(struct chfs_flash_data_node); 797 buf = kmem_alloc(len, KM_SLEEP); 798 dnode = kmem_alloc(len, KM_SLEEP); 799 nref = chfs_first_valid_data_ref(ip->chvc->dnode); 800 801 /* Update highest version. */ 802 rii->highest_version = ip->chvc->highest_version; 803 804 while(nref && (struct chfs_vnode_cache *)nref != ip->chvc) { 805 err = chfs_read_leb(chmp, nref->nref_lnr, buf, CHFS_GET_OFS(nref->nref_offset), len, &retlen); 806 if (err || len != retlen) 807 goto out; 808 dnode = (struct chfs_flash_data_node*)buf; 809 810 /* Check header crc. */ 811 crc = crc32(0, (uint8_t *)dnode, CHFS_NODE_HDR_SIZE - 4); 812 if (crc != le32toh(dnode->hdr_crc)) { 813 chfs_err("CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->hdr_crc)); 814 goto cont; 815 } 816 817 /* Check header magic bitmask. */ 818 if (le16toh(dnode->magic) != CHFS_FS_MAGIC_BITMASK) { 819 chfs_err("Wrong magic bitmask.\n"); 820 goto cont; 821 } 822 823 /* Check node crc. */ 824 crc = crc32(0, (uint8_t *)dnode, sizeof(*dnode) - 4); 825 if (crc != le32toh(dnode->node_crc)) { 826 chfs_err("Node CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->node_crc)); 827 goto cont; 828 } 829 830 td = chfs_alloc_tmp_dnode(); 831 if (!td) { 832 chfs_err("Can't allocate tmp dnode info.\n"); 833 err = ENOMEM; 834 goto out; 835 } 836 837 /* We don't check data crc here, just add nodes to tmp frag tree, because 838 * we don't want to check nodes which have been overlapped by a new node 839 * with a higher version number. 840 */ 841 td->node = chfs_alloc_full_dnode(); 842 if (!td->node) { 843 chfs_err("Can't allocate full dnode info.\n"); 844 err = ENOMEM; 845 goto out_tmp_dnode; 846 } 847 td->version = le64toh(dnode->version); 848 td->node->ofs = le64toh(dnode->offset); 849 td->data_crc = le32toh(dnode->data_crc); 850 td->node->nref = nref; 851 td->node->size = le32toh(dnode->data_length); 852 td->node->frags = 1; 853 td->overlapped = 0; 854 855 if (td->version > rii->highest_version) { 856 rii->highest_version = td->version; 857 } 858 859 /* Add node to the tree. */ 860 err = chfs_add_tmp_dnode_to_tree(chmp, rii, td); 861 if (err) 862 goto out_full_dnode; 863 864 cont: 865 nref = chfs_first_valid_data_ref(nref->nref_next); 866 } 867 868 ip->chvc->highest_version = rii->highest_version; 869 return 0; 870 871 out_full_dnode: 872 chfs_free_full_dnode(td->node); 873 out_tmp_dnode: 874 chfs_free_tmp_dnode(td); 875 out: 876 kmem_free(buf, len); 877 kmem_free(dnode, len); 878 return err; 879 } 880 881 882 /* chfs_build_fragtree - builds fragtree from temporary tree */ 883 int 884 chfs_build_fragtree(struct chfs_mount *chmp, struct chfs_inode *ip, 885 struct chfs_readinode_info *rii) 886 { 887 struct chfs_tmp_dnode_info *pen, *last, *this; 888 struct rb_tree ver_tree; /* version tree, used only temporary */ 889 uint64_t high_ver = 0; 890 KASSERT(mutex_owned(&chmp->chm_lock_mountfields)); 891 892 rb_tree_init(&ver_tree, &tmp_node_rbtree_ops); 893 894 /* Update highest version and latest node reference. */ 895 if (rii->mdata_tn) { 896 high_ver = rii->mdata_tn->tmpnode->version; 897 rii->latest_ref = rii->mdata_tn->tmpnode->node->nref; 898 } 899 900 /* Iterate the temporary tree in reverse order. */ 901 pen = (struct chfs_tmp_dnode_info *)RB_TREE_MAX(&rii->tdi_root); 902 903 while((last = pen)) { 904 pen = (struct chfs_tmp_dnode_info *)rb_tree_iterate(&rii->tdi_root, last, RB_DIR_LEFT); 905 906 /* We build here a version tree from overlapped nodes. */ 907 rb_tree_remove_node(&rii->tdi_root, last); 908 rb_tree_insert_node(&ver_tree, last); 909 910 if (last->tmpnode->overlapped) { 911 if (pen) 912 continue; 913 914 last->tmpnode->overlapped = 0; 915 } 916 917 this = (struct chfs_tmp_dnode_info *)RB_TREE_MAX(&ver_tree); 918 919 /* Start to build the fragtree. */ 920 while (this) { 921 struct chfs_tmp_dnode_info *vers_next; 922 int ret; 923 924 vers_next = (struct chfs_tmp_dnode_info *)rb_tree_iterate(&ver_tree, this, RB_DIR_LEFT); 925 rb_tree_remove_node(&ver_tree, this); 926 927 struct chfs_tmp_dnode *tmp_td = this->tmpnode; 928 while (tmp_td) { 929 struct chfs_tmp_dnode *next_td = tmp_td->next; 930 931 /* Check temporary node. */ 932 if (chfs_check_td_node(chmp, tmp_td)) { 933 if (next_td) { 934 chfs_remove_tmp_dnode_from_tdi(this, tmp_td); 935 chfs_kill_td(chmp, tmp_td); 936 } else { 937 break; 938 } 939 } else { 940 if (tmp_td->version > high_ver) { 941 high_ver = tmp_td->version; 942 dbg("highver: %llu\n", (unsigned long long)high_ver); 943 rii->latest_ref = tmp_td->node->nref; 944 } 945 946 /* Add node to inode and its fragtree. */ 947 ret = chfs_add_full_dnode_to_inode(chmp, ip, tmp_td->node); 948 if (ret) { 949 /* On error, clean the whole version tree. */ 950 while (1) { 951 vers_next = (struct chfs_tmp_dnode_info *)rb_tree_iterate(&ver_tree, this, RB_DIR_LEFT); 952 while (tmp_td) { 953 next_td = tmp_td->next; 954 955 chfs_free_full_dnode(tmp_td->node); 956 chfs_remove_tmp_dnode_from_tdi(this, tmp_td); 957 chfs_kill_td(chmp, tmp_td); 958 tmp_td = next_td; 959 } 960 chfs_free_tmp_dnode_info(this); 961 this = vers_next; 962 if (!this) 963 break; 964 rb_tree_remove_node(&ver_tree, vers_next); 965 chfs_kill_tdi(chmp, vers_next); 966 } 967 return ret; 968 } 969 970 /* Remove temporary node from temporary descriptor. 971 * Shouldn't obsolete tmp_td here, because tmp_td->node 972 * was added to the inode. */ 973 chfs_remove_tmp_dnode_from_tdi(this, tmp_td); 974 chfs_free_tmp_dnode(tmp_td); 975 } 976 tmp_td = next_td; 977 } 978 /* Continue with the previous element of version tree. */ 979 chfs_kill_tdi(chmp, this); 980 this = vers_next; 981 } 982 } 983 984 return 0; 985 } 986 987 /* chfs_read_inode - checks the state of the inode then reads and builds it */ 988 int chfs_read_inode(struct chfs_mount *chmp, struct chfs_inode *ip) 989 { 990 struct chfs_vnode_cache *vc = ip->chvc; 991 992 KASSERT(mutex_owned(&chmp->chm_lock_mountfields)); 993 994 retry: 995 mutex_enter(&chmp->chm_lock_vnocache); 996 switch (vc->state) { 997 case VNO_STATE_UNCHECKED: 998 /* FALLTHROUGH */ 999 case VNO_STATE_CHECKEDABSENT: 1000 vc->state = VNO_STATE_READING; 1001 break; 1002 case VNO_STATE_CHECKING: 1003 /* FALLTHROUGH */ 1004 case VNO_STATE_GC: 1005 mutex_exit(&chmp->chm_lock_vnocache); 1006 goto retry; 1007 break; 1008 case VNO_STATE_PRESENT: 1009 /* FALLTHROUGH */ 1010 case VNO_STATE_READING: 1011 chfs_err("Reading inode #%llu in state %d!\n", 1012 (unsigned long long)vc->vno, vc->state); 1013 chfs_err("wants to read a nonexistent ino %llu\n", 1014 (unsigned long long)vc->vno); 1015 mutex_exit(&chmp->chm_lock_vnocache); 1016 return ENOENT; 1017 default: 1018 panic("BUG() Bad vno cache state."); 1019 } 1020 mutex_exit(&chmp->chm_lock_vnocache); 1021 1022 return chfs_read_inode_internal(chmp, ip); 1023 } 1024 1025 /* 1026 * chfs_read_inode_internal - reads and builds an inode 1027 * Firstly get temporary nodes then build fragtree. 1028 */ 1029 int 1030 chfs_read_inode_internal(struct chfs_mount *chmp, struct chfs_inode *ip) 1031 { 1032 int err; 1033 size_t len, retlen; 1034 char* buf; 1035 struct chfs_readinode_info rii; 1036 struct chfs_flash_vnode *fvnode; 1037 1038 KASSERT(mutex_owned(&chmp->chm_lock_mountfields)); 1039 1040 len = sizeof(*fvnode); 1041 1042 memset(&rii, 0, sizeof(rii)); 1043 1044 rb_tree_init(&rii.tdi_root, &tmp_node_rbtree_ops); 1045 1046 /* Build a temporary node tree. */ 1047 err = chfs_get_data_nodes(chmp, ip, &rii); 1048 if (err) { 1049 if (ip->chvc->state == VNO_STATE_READING) 1050 ip->chvc->state = VNO_STATE_CHECKEDABSENT; 1051 /* FIXME Should we kill fragtree or something here? */ 1052 return err; 1053 } 1054 1055 /* Build fragtree from temp nodes. */ 1056 rb_tree_init(&ip->fragtree, &frag_rbtree_ops); 1057 1058 err = chfs_build_fragtree(chmp, ip, &rii); 1059 if (err) { 1060 if (ip->chvc->state == VNO_STATE_READING) 1061 ip->chvc->state = VNO_STATE_CHECKEDABSENT; 1062 /* FIXME Should we kill fragtree or something here? */ 1063 return err; 1064 } 1065 1066 if (!rii.latest_ref) { 1067 return 0; 1068 } 1069 1070 buf = kmem_alloc(len, KM_SLEEP); 1071 1072 /* Set inode size from its vnode information node. */ 1073 err = chfs_read_leb(chmp, ip->chvc->v->nref_lnr, buf, CHFS_GET_OFS(ip->chvc->v->nref_offset), len, &retlen); 1074 if (err || retlen != len) { 1075 kmem_free(buf, len); 1076 return err?err:EIO; 1077 } 1078 1079 fvnode = (struct chfs_flash_vnode*)buf; 1080 1081 dbg("set size from v: %u\n", fvnode->dn_size); 1082 chfs_set_vnode_size(ITOV(ip), fvnode->dn_size); 1083 uint32_t retsize = chfs_truncate_fragtree(chmp, &ip->fragtree, fvnode->dn_size); 1084 if (retsize != fvnode->dn_size) { 1085 dbg("Truncating failed. It is %u instead of %u\n", retsize, fvnode->dn_size); 1086 } 1087 1088 kmem_free(buf, len); 1089 1090 if (ip->chvc->state == VNO_STATE_READING) { 1091 ip->chvc->state = VNO_STATE_PRESENT; 1092 } 1093 1094 return 0; 1095 } 1096 1097 /* chfs_read_data - reads and checks data of a file */ 1098 int 1099 chfs_read_data(struct chfs_mount* chmp, struct vnode *vp, 1100 struct buf *bp) 1101 { 1102 off_t ofs; 1103 struct chfs_node_frag *frag; 1104 char * buf; 1105 int err = 0; 1106 size_t size, retlen; 1107 uint32_t crc; 1108 struct chfs_inode *ip = VTOI(vp); 1109 struct chfs_flash_data_node *dnode; 1110 struct chfs_node_ref *nref; 1111 1112 memset(bp->b_data, 0, bp->b_bcount); 1113 1114 /* Calculate the size of the file from its fragtree. */ 1115 ofs = bp->b_blkno * PAGE_SIZE; 1116 frag = (struct chfs_node_frag *)rb_tree_find_node_leq(&ip->fragtree, &ofs); 1117 1118 if (!frag || frag->ofs > ofs || frag->ofs + frag->size <= ofs) { 1119 bp->b_resid = 0; 1120 dbg("not found in frag tree\n"); 1121 return 0; 1122 } 1123 1124 if (!frag->node) { 1125 dbg("no node in frag\n"); 1126 return 0; 1127 } 1128 1129 nref = frag->node->nref; 1130 size = sizeof(*dnode) + frag->size; 1131 1132 buf = kmem_alloc(size, KM_SLEEP); 1133 1134 /* Read node from flash. */ 1135 dbg("reading from lnr: %u, offset: %u, size: %zu\n", nref->nref_lnr, CHFS_GET_OFS(nref->nref_offset), size); 1136 err = chfs_read_leb(chmp, nref->nref_lnr, buf, CHFS_GET_OFS(nref->nref_offset), size, &retlen); 1137 if (err) { 1138 chfs_err("error after reading: %d\n", err); 1139 goto out; 1140 } 1141 if (retlen != size) { 1142 chfs_err("retlen: %zu != size: %zu\n", retlen, size); 1143 err = EIO; 1144 goto out; 1145 } 1146 1147 /* Read data from flash. */ 1148 dnode = (struct chfs_flash_data_node *)buf; 1149 crc = crc32(0, (uint8_t *)dnode, CHFS_NODE_HDR_SIZE - 4); 1150 if (crc != le32toh(dnode->hdr_crc)) { 1151 chfs_err("CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->hdr_crc)); 1152 err = EIO; 1153 goto out; 1154 } 1155 1156 /* Check header magic bitmask. */ 1157 if (le16toh(dnode->magic) != CHFS_FS_MAGIC_BITMASK) { 1158 chfs_err("Wrong magic bitmask.\n"); 1159 err = EIO; 1160 goto out; 1161 } 1162 1163 /* Check crc of node. */ 1164 crc = crc32(0, (uint8_t *)dnode, sizeof(*dnode) - 4); 1165 if (crc != le32toh(dnode->node_crc)) { 1166 chfs_err("Node CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->node_crc)); 1167 err = EIO; 1168 goto out; 1169 } 1170 1171 /* Check crc of data. */ 1172 crc = crc32(0, (uint8_t *)dnode->data, dnode->data_length); 1173 if (crc != le32toh(dnode->data_crc)) { 1174 chfs_err("Data CRC check failed. calc: 0x%x orig: 0x%x\n", crc, le32toh(dnode->data_crc)); 1175 err = EIO; 1176 goto out; 1177 } 1178 1179 memcpy(bp->b_data, dnode->data, dnode->data_length); 1180 bp->b_resid = 0; 1181 1182 out: 1183 kmem_free(buf, size); 1184 return err; 1185 } 1186