1 /* $NetBSD: lfs_syscalls.c,v 1.150 2013/10/29 09:53:51 hannken Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007, 2007, 2008 5 * The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Konrad E. Schroder <perseant@hhhh.org>. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 /*- 33 * Copyright (c) 1991, 1993, 1994 34 * The Regents of the University of California. All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. Neither the name of the University nor the names of its contributors 45 * may be used to endorse or promote products derived from this software 46 * without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * SUCH DAMAGE. 59 * 60 * @(#)lfs_syscalls.c 8.10 (Berkeley) 5/14/95 61 */ 62 63 #include <sys/cdefs.h> 64 __KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.150 2013/10/29 09:53:51 hannken Exp $"); 65 66 #ifndef LFS 67 # define LFS /* for prototypes in syscallargs.h */ 68 #endif 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/proc.h> 73 #include <sys/buf.h> 74 #include <sys/mount.h> 75 #include <sys/vnode.h> 76 #include <sys/kernel.h> 77 #include <sys/kauth.h> 78 #include <sys/syscallargs.h> 79 80 #include <ufs/lfs/ulfs_inode.h> 81 #include <ufs/lfs/ulfsmount.h> 82 #include <ufs/lfs/ulfs_extern.h> 83 84 #include <ufs/lfs/lfs.h> 85 #include <ufs/lfs/lfs_kernel.h> 86 #include <ufs/lfs/lfs_extern.h> 87 88 struct buf *lfs_fakebuf(struct lfs *, struct vnode *, int, size_t, void *); 89 int lfs_fasthashget(dev_t, ino_t, struct vnode **); 90 91 pid_t lfs_cleaner_pid = 0; 92 93 /* 94 * sys_lfs_markv: 95 * 96 * This will mark inodes and blocks dirty, so they are written into the log. 97 * It will block until all the blocks have been written. The segment create 98 * time passed in the block_info and inode_info structures is used to decide 99 * if the data is valid for each block (in case some process dirtied a block 100 * or inode that is being cleaned between the determination that a block is 101 * live and the lfs_markv call). 102 * 103 * 0 on success 104 * -1/errno is return on error. 105 */ 106 #ifdef USE_64BIT_SYSCALLS 107 int 108 sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval) 109 { 110 /* { 111 syscallarg(fsid_t *) fsidp; 112 syscallarg(struct block_info *) blkiov; 113 syscallarg(int) blkcnt; 114 } */ 115 BLOCK_INFO *blkiov; 116 int blkcnt, error; 117 fsid_t fsid; 118 struct lfs *fs; 119 struct mount *mntp; 120 121 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS, 122 KAUTH_REQ_SYSTEM_LFS_MARKV, NULL, NULL, NULL); 123 if (error) 124 return (error); 125 126 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0) 127 return (error); 128 129 if ((mntp = vfs_getvfs(fsidp)) == NULL) 130 return (ENOENT); 131 fs = VFSTOULFS(mntp)->um_lfs; 132 133 blkcnt = SCARG(uap, blkcnt); 134 if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT) 135 return (EINVAL); 136 137 KERNEL_LOCK(1, NULL); 138 blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV); 139 if ((error = copyin(SCARG(uap, blkiov), blkiov, 140 blkcnt * sizeof(BLOCK_INFO))) != 0) 141 goto out; 142 143 if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0) 144 copyout(blkiov, SCARG(uap, blkiov), 145 blkcnt * sizeof(BLOCK_INFO)); 146 out: 147 lfs_free(fs, blkiov, LFS_NB_BLKIOV); 148 KERNEL_UNLOCK_ONE(NULL); 149 return error; 150 } 151 #else 152 int 153 sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval) 154 { 155 /* { 156 syscallarg(fsid_t *) fsidp; 157 syscallarg(struct block_info *) blkiov; 158 syscallarg(int) blkcnt; 159 } */ 160 BLOCK_INFO *blkiov; 161 BLOCK_INFO_15 *blkiov15; 162 int i, blkcnt, error; 163 fsid_t fsid; 164 struct lfs *fs; 165 struct mount *mntp; 166 167 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS, 168 KAUTH_REQ_SYSTEM_LFS_MARKV, NULL, NULL, NULL); 169 if (error) 170 return (error); 171 172 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0) 173 return (error); 174 175 if ((mntp = vfs_getvfs(&fsid)) == NULL) 176 return (ENOENT); 177 fs = VFSTOULFS(mntp)->um_lfs; 178 179 blkcnt = SCARG(uap, blkcnt); 180 if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT) 181 return (EINVAL); 182 183 KERNEL_LOCK(1, NULL); 184 blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV); 185 blkiov15 = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO_15), LFS_NB_BLKIOV); 186 if ((error = copyin(SCARG(uap, blkiov), blkiov15, 187 blkcnt * sizeof(BLOCK_INFO_15))) != 0) 188 goto out; 189 190 for (i = 0; i < blkcnt; i++) { 191 blkiov[i].bi_inode = blkiov15[i].bi_inode; 192 blkiov[i].bi_lbn = blkiov15[i].bi_lbn; 193 blkiov[i].bi_daddr = blkiov15[i].bi_daddr; 194 blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate; 195 blkiov[i].bi_version = blkiov15[i].bi_version; 196 blkiov[i].bi_bp = blkiov15[i].bi_bp; 197 blkiov[i].bi_size = blkiov15[i].bi_size; 198 } 199 200 if ((error = lfs_markv(l->l_proc, &fsid, blkiov, blkcnt)) == 0) { 201 for (i = 0; i < blkcnt; i++) { 202 blkiov15[i].bi_inode = blkiov[i].bi_inode; 203 blkiov15[i].bi_lbn = blkiov[i].bi_lbn; 204 blkiov15[i].bi_daddr = blkiov[i].bi_daddr; 205 blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate; 206 blkiov15[i].bi_version = blkiov[i].bi_version; 207 blkiov15[i].bi_bp = blkiov[i].bi_bp; 208 blkiov15[i].bi_size = blkiov[i].bi_size; 209 } 210 copyout(blkiov15, SCARG(uap, blkiov), 211 blkcnt * sizeof(BLOCK_INFO_15)); 212 } 213 out: 214 lfs_free(fs, blkiov, LFS_NB_BLKIOV); 215 lfs_free(fs, blkiov15, LFS_NB_BLKIOV); 216 KERNEL_UNLOCK_ONE(NULL); 217 return error; 218 } 219 #endif 220 221 #define LFS_MARKV_MAX_BLOCKS (LFS_MAX_BUFS) 222 223 int 224 lfs_markv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, 225 int blkcnt) 226 { 227 BLOCK_INFO *blkp; 228 IFILE *ifp; 229 struct buf *bp; 230 struct inode *ip = NULL; 231 struct lfs *fs; 232 struct mount *mntp; 233 struct vnode *vp = NULL; 234 ino_t lastino; 235 daddr_t b_daddr, v_daddr; 236 int cnt, error; 237 int do_again = 0; 238 int numrefed = 0; 239 ino_t maxino; 240 size_t obsize; 241 242 /* number of blocks/inodes that we have already bwrite'ed */ 243 int nblkwritten, ninowritten; 244 245 if ((mntp = vfs_getvfs(fsidp)) == NULL) 246 return (ENOENT); 247 248 fs = VFSTOULFS(mntp)->um_lfs; 249 250 if (fs->lfs_ronly) 251 return EROFS; 252 253 maxino = (lfs_fragstoblks(fs, VTOI(fs->lfs_ivnode)->i_ffs1_blocks) - 254 fs->lfs_cleansz - fs->lfs_segtabsz) * fs->lfs_ifpb; 255 256 cnt = blkcnt; 257 258 if ((error = vfs_busy(mntp, NULL)) != 0) 259 return (error); 260 261 /* 262 * This seglock is just to prevent the fact that we might have to sleep 263 * from allowing the possibility that our blocks might become 264 * invalid. 265 * 266 * It is also important to note here that unless we specify SEGM_CKP, 267 * any Ifile blocks that we might be asked to clean will never get 268 * to the disk. 269 */ 270 lfs_seglock(fs, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC); 271 272 /* Mark blocks/inodes dirty. */ 273 error = 0; 274 275 /* these were inside the initialization for the for loop */ 276 v_daddr = LFS_UNUSED_DADDR; 277 lastino = LFS_UNUSED_INUM; 278 nblkwritten = ninowritten = 0; 279 for (blkp = blkiov; cnt--; ++blkp) 280 { 281 /* Bounds-check incoming data, avoid panic for failed VGET */ 282 if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) { 283 error = EINVAL; 284 goto err3; 285 } 286 /* 287 * Get the IFILE entry (only once) and see if the file still 288 * exists. 289 */ 290 if (lastino != blkp->bi_inode) { 291 /* 292 * Finish the old file, if there was one. The presence 293 * of a usable vnode in vp is signaled by a valid v_daddr. 294 */ 295 if (v_daddr != LFS_UNUSED_DADDR) { 296 lfs_vunref(vp); 297 numrefed--; 298 } 299 300 /* 301 * Start a new file 302 */ 303 lastino = blkp->bi_inode; 304 if (blkp->bi_inode == LFS_IFILE_INUM) 305 v_daddr = fs->lfs_idaddr; 306 else { 307 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp); 308 /* XXX fix for force write */ 309 v_daddr = ifp->if_daddr; 310 brelse(bp, 0); 311 } 312 if (v_daddr == LFS_UNUSED_DADDR) 313 continue; 314 315 /* Get the vnode/inode. */ 316 error = lfs_fastvget(mntp, blkp->bi_inode, v_daddr, 317 &vp, 318 (blkp->bi_lbn == LFS_UNUSED_LBN 319 ? blkp->bi_bp 320 : NULL)); 321 322 if (!error) { 323 numrefed++; 324 } 325 if (error) { 326 DLOG((DLOG_CLEAN, "lfs_markv: lfs_fastvget" 327 " failed with %d (ino %d, segment %d)\n", 328 error, blkp->bi_inode, 329 lfs_dtosn(fs, blkp->bi_daddr))); 330 /* 331 * If we got EAGAIN, that means that the 332 * Inode was locked. This is 333 * recoverable: just clean the rest of 334 * this segment, and let the cleaner try 335 * again with another. (When the 336 * cleaner runs again, this segment will 337 * sort high on the list, since it is 338 * now almost entirely empty.) But, we 339 * still set v_daddr = LFS_UNUSED_ADDR 340 * so as not to test this over and over 341 * again. 342 */ 343 if (error == EAGAIN) { 344 error = 0; 345 do_again++; 346 } 347 #ifdef DIAGNOSTIC 348 else if (error != ENOENT) 349 panic("lfs_markv VFS_VGET FAILED"); 350 #endif 351 /* lastino = LFS_UNUSED_INUM; */ 352 v_daddr = LFS_UNUSED_DADDR; 353 vp = NULL; 354 ip = NULL; 355 continue; 356 } 357 ip = VTOI(vp); 358 ninowritten++; 359 } else if (v_daddr == LFS_UNUSED_DADDR) { 360 /* 361 * This can only happen if the vnode is dead (or 362 * in any case we can't get it...e.g., it is 363 * inlocked). Keep going. 364 */ 365 continue; 366 } 367 368 /* Past this point we are guaranteed that vp, ip are valid. */ 369 370 /* Can't clean VU_DIROP directories in case of truncation */ 371 /* XXX - maybe we should mark removed dirs specially? */ 372 if (vp->v_type == VDIR && (vp->v_uflag & VU_DIROP)) { 373 do_again++; 374 continue; 375 } 376 377 /* If this BLOCK_INFO didn't contain a block, keep going. */ 378 if (blkp->bi_lbn == LFS_UNUSED_LBN) { 379 /* XXX need to make sure that the inode gets written in this case */ 380 /* XXX but only write the inode if it's the right one */ 381 if (blkp->bi_inode != LFS_IFILE_INUM) { 382 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp); 383 if (ifp->if_daddr == blkp->bi_daddr) { 384 mutex_enter(&lfs_lock); 385 LFS_SET_UINO(ip, IN_CLEANING); 386 mutex_exit(&lfs_lock); 387 } 388 brelse(bp, 0); 389 } 390 continue; 391 } 392 393 b_daddr = 0; 394 if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) || 395 LFS_DBTOFSB(fs, b_daddr) != blkp->bi_daddr) 396 { 397 if (lfs_dtosn(fs, LFS_DBTOFSB(fs, b_daddr)) == 398 lfs_dtosn(fs, blkp->bi_daddr)) 399 { 400 DLOG((DLOG_CLEAN, "lfs_markv: wrong da same seg: %llx vs %llx\n", 401 (long long)blkp->bi_daddr, (long long)LFS_DBTOFSB(fs, b_daddr))); 402 } 403 do_again++; 404 continue; 405 } 406 407 /* 408 * Check block sizes. The blocks being cleaned come from 409 * disk, so they should have the same size as their on-disk 410 * counterparts. 411 */ 412 if (blkp->bi_lbn >= 0) 413 obsize = lfs_blksize(fs, ip, blkp->bi_lbn); 414 else 415 obsize = fs->lfs_bsize; 416 /* Check for fragment size change */ 417 if (blkp->bi_lbn >= 0 && blkp->bi_lbn < ULFS_NDADDR) { 418 obsize = ip->i_lfs_fragsize[blkp->bi_lbn]; 419 } 420 if (obsize != blkp->bi_size) { 421 DLOG((DLOG_CLEAN, "lfs_markv: ino %d lbn %lld wrong" 422 " size (%ld != %d), try again\n", 423 blkp->bi_inode, (long long)blkp->bi_lbn, 424 (long) obsize, blkp->bi_size)); 425 do_again++; 426 continue; 427 } 428 429 /* 430 * If we get to here, then we are keeping the block. If 431 * it is an indirect block, we want to actually put it 432 * in the buffer cache so that it can be updated in the 433 * finish_meta section. If it's not, we need to 434 * allocate a fake buffer so that writeseg can perform 435 * the copyin and write the buffer. 436 */ 437 if (ip->i_number != LFS_IFILE_INUM && blkp->bi_lbn >= 0) { 438 /* Data Block */ 439 bp = lfs_fakebuf(fs, vp, blkp->bi_lbn, 440 blkp->bi_size, blkp->bi_bp); 441 /* Pretend we used bread() to get it */ 442 bp->b_blkno = LFS_FSBTODB(fs, blkp->bi_daddr); 443 } else { 444 /* Indirect block or ifile */ 445 if (blkp->bi_size != fs->lfs_bsize && 446 ip->i_number != LFS_IFILE_INUM) 447 panic("lfs_markv: partial indirect block?" 448 " size=%d\n", blkp->bi_size); 449 bp = getblk(vp, blkp->bi_lbn, blkp->bi_size, 0, 0); 450 if (!(bp->b_oflags & (BO_DONE|BO_DELWRI))) { 451 /* 452 * The block in question was not found 453 * in the cache; i.e., the block that 454 * getblk() returned is empty. So, we 455 * can (and should) copy in the 456 * contents, because we've already 457 * determined that this was the right 458 * version of this block on disk. 459 * 460 * And, it can't have changed underneath 461 * us, because we have the segment lock. 462 */ 463 error = copyin(blkp->bi_bp, bp->b_data, blkp->bi_size); 464 if (error) 465 goto err2; 466 } 467 } 468 if ((error = lfs_bwrite_ext(bp, BW_CLEAN)) != 0) 469 goto err2; 470 471 nblkwritten++; 472 /* 473 * XXX should account indirect blocks and ifile pages as well 474 */ 475 if (nblkwritten + lfs_lblkno(fs, ninowritten * sizeof (struct ulfs1_dinode)) 476 > LFS_MARKV_MAX_BLOCKS) { 477 DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos\n", 478 nblkwritten, ninowritten)); 479 lfs_segwrite(mntp, SEGM_CLEAN); 480 nblkwritten = ninowritten = 0; 481 } 482 } 483 484 /* 485 * Finish the old file, if there was one 486 */ 487 if (v_daddr != LFS_UNUSED_DADDR) { 488 lfs_vunref(vp); 489 numrefed--; 490 } 491 492 #ifdef DIAGNOSTIC 493 if (numrefed != 0) 494 panic("lfs_markv: numrefed=%d", numrefed); 495 #endif 496 DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos (check point)\n", 497 nblkwritten, ninowritten)); 498 499 /* 500 * The last write has to be SEGM_SYNC, because of calling semantics. 501 * It also has to be SEGM_CKP, because otherwise we could write 502 * over the newly cleaned data contained in a checkpoint, and then 503 * we'd be unhappy at recovery time. 504 */ 505 lfs_segwrite(mntp, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC); 506 507 lfs_segunlock(fs); 508 509 vfs_unbusy(mntp, false, NULL); 510 if (error) 511 return (error); 512 else if (do_again) 513 return EAGAIN; 514 515 return 0; 516 517 err2: 518 DLOG((DLOG_CLEAN, "lfs_markv err2\n")); 519 520 /* 521 * XXX we're here because copyin() failed. 522 * XXX it means that we can't trust the cleanerd. too bad. 523 * XXX how can we recover from this? 524 */ 525 526 err3: 527 /* 528 * XXX should do segwrite here anyway? 529 */ 530 531 if (v_daddr != LFS_UNUSED_DADDR) { 532 lfs_vunref(vp); 533 --numrefed; 534 } 535 536 lfs_segunlock(fs); 537 vfs_unbusy(mntp, false, NULL); 538 #ifdef DIAGNOSTIC 539 if (numrefed != 0) 540 panic("lfs_markv: numrefed=%d", numrefed); 541 #endif 542 543 return (error); 544 } 545 546 /* 547 * sys_lfs_bmapv: 548 * 549 * This will fill in the current disk address for arrays of blocks. 550 * 551 * 0 on success 552 * -1/errno is return on error. 553 */ 554 #ifdef USE_64BIT_SYSCALLS 555 int 556 sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval) 557 { 558 /* { 559 syscallarg(fsid_t *) fsidp; 560 syscallarg(struct block_info *) blkiov; 561 syscallarg(int) blkcnt; 562 } */ 563 BLOCK_INFO *blkiov; 564 int blkcnt, error; 565 fsid_t fsid; 566 struct lfs *fs; 567 struct mount *mntp; 568 569 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS, 570 KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL); 571 if (error) 572 return (error); 573 574 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0) 575 return (error); 576 577 if ((mntp = vfs_getvfs(&fsid)) == NULL) 578 return (ENOENT); 579 fs = VFSTOULFS(mntp)->um_lfs; 580 581 blkcnt = SCARG(uap, blkcnt); 582 if ((u_int) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO)) 583 return (EINVAL); 584 KERNEL_LOCK(1, NULL); 585 blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV); 586 if ((error = copyin(SCARG(uap, blkiov), blkiov, 587 blkcnt * sizeof(BLOCK_INFO))) != 0) 588 goto out; 589 590 if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0) 591 copyout(blkiov, SCARG(uap, blkiov), 592 blkcnt * sizeof(BLOCK_INFO)); 593 out: 594 lfs_free(fs, blkiov, LFS_NB_BLKIOV); 595 KERNEL_UNLOCK_ONE(NULL); 596 return error; 597 } 598 #else 599 int 600 sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval) 601 { 602 /* { 603 syscallarg(fsid_t *) fsidp; 604 syscallarg(struct block_info *) blkiov; 605 syscallarg(int) blkcnt; 606 } */ 607 BLOCK_INFO *blkiov; 608 BLOCK_INFO_15 *blkiov15; 609 int i, blkcnt, error; 610 fsid_t fsid; 611 struct lfs *fs; 612 struct mount *mntp; 613 614 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS, 615 KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL); 616 if (error) 617 return (error); 618 619 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0) 620 return (error); 621 622 if ((mntp = vfs_getvfs(&fsid)) == NULL) 623 return (ENOENT); 624 fs = VFSTOULFS(mntp)->um_lfs; 625 626 blkcnt = SCARG(uap, blkcnt); 627 if ((size_t) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO)) 628 return (EINVAL); 629 KERNEL_LOCK(1, NULL); 630 blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV); 631 blkiov15 = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO_15), LFS_NB_BLKIOV); 632 if ((error = copyin(SCARG(uap, blkiov), blkiov15, 633 blkcnt * sizeof(BLOCK_INFO_15))) != 0) 634 goto out; 635 636 for (i = 0; i < blkcnt; i++) { 637 blkiov[i].bi_inode = blkiov15[i].bi_inode; 638 blkiov[i].bi_lbn = blkiov15[i].bi_lbn; 639 blkiov[i].bi_daddr = blkiov15[i].bi_daddr; 640 blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate; 641 blkiov[i].bi_version = blkiov15[i].bi_version; 642 blkiov[i].bi_bp = blkiov15[i].bi_bp; 643 blkiov[i].bi_size = blkiov15[i].bi_size; 644 } 645 646 if ((error = lfs_bmapv(l->l_proc, &fsid, blkiov, blkcnt)) == 0) { 647 for (i = 0; i < blkcnt; i++) { 648 blkiov15[i].bi_inode = blkiov[i].bi_inode; 649 blkiov15[i].bi_lbn = blkiov[i].bi_lbn; 650 blkiov15[i].bi_daddr = blkiov[i].bi_daddr; 651 blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate; 652 blkiov15[i].bi_version = blkiov[i].bi_version; 653 blkiov15[i].bi_bp = blkiov[i].bi_bp; 654 blkiov15[i].bi_size = blkiov[i].bi_size; 655 } 656 copyout(blkiov15, SCARG(uap, blkiov), 657 blkcnt * sizeof(BLOCK_INFO_15)); 658 } 659 out: 660 lfs_free(fs, blkiov, LFS_NB_BLKIOV); 661 lfs_free(fs, blkiov15, LFS_NB_BLKIOV); 662 KERNEL_UNLOCK_ONE(NULL); 663 return error; 664 } 665 #endif 666 667 int 668 lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt) 669 { 670 BLOCK_INFO *blkp; 671 IFILE *ifp; 672 struct buf *bp; 673 struct inode *ip = NULL; 674 struct lfs *fs; 675 struct mount *mntp; 676 struct ulfsmount *ump; 677 struct vnode *vp; 678 ino_t lastino; 679 daddr_t v_daddr; 680 int cnt, error; 681 int numrefed = 0; 682 683 lfs_cleaner_pid = p->p_pid; 684 685 if ((mntp = vfs_getvfs(fsidp)) == NULL) 686 return (ENOENT); 687 688 ump = VFSTOULFS(mntp); 689 if ((error = vfs_busy(mntp, NULL)) != 0) 690 return (error); 691 692 cnt = blkcnt; 693 694 fs = VFSTOULFS(mntp)->um_lfs; 695 696 error = 0; 697 698 /* these were inside the initialization for the for loop */ 699 v_daddr = LFS_UNUSED_DADDR; 700 lastino = LFS_UNUSED_INUM; 701 for (blkp = blkiov; cnt--; ++blkp) 702 { 703 /* 704 * Get the IFILE entry (only once) and see if the file still 705 * exists. 706 */ 707 if (lastino != blkp->bi_inode) { 708 /* 709 * Finish the old file, if there was one. The presence 710 * of a usable vnode in vp is signaled by a valid 711 * v_daddr. 712 */ 713 if (v_daddr != LFS_UNUSED_DADDR) { 714 lfs_vunref(vp); 715 if (VTOI(vp)->i_lfs_iflags & LFSI_BMAP) 716 vrecycle(vp, NULL); 717 numrefed--; 718 } 719 720 /* 721 * Start a new file 722 */ 723 lastino = blkp->bi_inode; 724 if (blkp->bi_inode == LFS_IFILE_INUM) 725 v_daddr = fs->lfs_idaddr; 726 else { 727 LFS_IENTRY(ifp, fs, blkp->bi_inode, bp); 728 v_daddr = ifp->if_daddr; 729 brelse(bp, 0); 730 } 731 if (v_daddr == LFS_UNUSED_DADDR) { 732 blkp->bi_daddr = LFS_UNUSED_DADDR; 733 continue; 734 } 735 /* 736 * A regular call to VFS_VGET could deadlock 737 * here. Instead, we try an unlocked access. 738 */ 739 mutex_enter(&ulfs_ihash_lock); 740 vp = ulfs_ihashlookup(ump->um_dev, blkp->bi_inode); 741 if (vp != NULL && !(vp->v_iflag & VI_XLOCK)) { 742 ip = VTOI(vp); 743 mutex_enter(vp->v_interlock); 744 mutex_exit(&ulfs_ihash_lock); 745 if (lfs_vref(vp)) { 746 v_daddr = LFS_UNUSED_DADDR; 747 continue; 748 } 749 numrefed++; 750 } else { 751 mutex_exit(&ulfs_ihash_lock); 752 /* 753 * Don't VFS_VGET if we're being unmounted, 754 * since we hold vfs_busy(). 755 */ 756 if (mntp->mnt_iflag & IMNT_UNMOUNT) { 757 v_daddr = LFS_UNUSED_DADDR; 758 continue; 759 } 760 error = VFS_VGET(mntp, blkp->bi_inode, &vp); 761 if (error) { 762 DLOG((DLOG_CLEAN, "lfs_bmapv: vget ino" 763 "%d failed with %d", 764 blkp->bi_inode,error)); 765 v_daddr = LFS_UNUSED_DADDR; 766 continue; 767 } else { 768 KASSERT(VOP_ISLOCKED(vp)); 769 VTOI(vp)->i_lfs_iflags |= LFSI_BMAP; 770 VOP_UNLOCK(vp); 771 numrefed++; 772 } 773 } 774 ip = VTOI(vp); 775 } else if (v_daddr == LFS_UNUSED_DADDR) { 776 /* 777 * This can only happen if the vnode is dead. 778 * Keep going. Note that we DO NOT set the 779 * bi_addr to anything -- if we failed to get 780 * the vnode, for example, we want to assume 781 * conservatively that all of its blocks *are* 782 * located in the segment in question. 783 * lfs_markv will throw them out if we are 784 * wrong. 785 */ 786 /* blkp->bi_daddr = LFS_UNUSED_DADDR; */ 787 continue; 788 } 789 790 /* Past this point we are guaranteed that vp, ip are valid. */ 791 792 if (blkp->bi_lbn == LFS_UNUSED_LBN) { 793 /* 794 * We just want the inode address, which is 795 * conveniently in v_daddr. 796 */ 797 blkp->bi_daddr = v_daddr; 798 } else { 799 daddr_t bi_daddr; 800 801 /* XXX ondisk32 */ 802 error = VOP_BMAP(vp, blkp->bi_lbn, NULL, 803 &bi_daddr, NULL); 804 if (error) 805 { 806 blkp->bi_daddr = LFS_UNUSED_DADDR; 807 continue; 808 } 809 blkp->bi_daddr = LFS_DBTOFSB(fs, bi_daddr); 810 /* Fill in the block size, too */ 811 if (blkp->bi_lbn >= 0) 812 blkp->bi_size = lfs_blksize(fs, ip, blkp->bi_lbn); 813 else 814 blkp->bi_size = fs->lfs_bsize; 815 } 816 } 817 818 /* 819 * Finish the old file, if there was one. The presence 820 * of a usable vnode in vp is signaled by a valid v_daddr. 821 */ 822 if (v_daddr != LFS_UNUSED_DADDR) { 823 lfs_vunref(vp); 824 /* Recycle as above. */ 825 if (ip->i_lfs_iflags & LFSI_BMAP) 826 vrecycle(vp, NULL); 827 numrefed--; 828 } 829 830 #ifdef DIAGNOSTIC 831 if (numrefed != 0) 832 panic("lfs_bmapv: numrefed=%d", numrefed); 833 #endif 834 835 vfs_unbusy(mntp, false, NULL); 836 837 return 0; 838 } 839 840 /* 841 * sys_lfs_segclean: 842 * 843 * Mark the segment clean. 844 * 845 * 0 on success 846 * -1/errno is return on error. 847 */ 848 int 849 sys_lfs_segclean(struct lwp *l, const struct sys_lfs_segclean_args *uap, register_t *retval) 850 { 851 /* { 852 syscallarg(fsid_t *) fsidp; 853 syscallarg(u_long) segment; 854 } */ 855 struct lfs *fs; 856 struct mount *mntp; 857 fsid_t fsid; 858 int error; 859 unsigned long segnum; 860 861 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS, 862 KAUTH_REQ_SYSTEM_LFS_SEGCLEAN, NULL, NULL, NULL); 863 if (error) 864 return (error); 865 866 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0) 867 return (error); 868 if ((mntp = vfs_getvfs(&fsid)) == NULL) 869 return (ENOENT); 870 871 fs = VFSTOULFS(mntp)->um_lfs; 872 segnum = SCARG(uap, segment); 873 874 if ((error = vfs_busy(mntp, NULL)) != 0) 875 return (error); 876 877 KERNEL_LOCK(1, NULL); 878 lfs_seglock(fs, SEGM_PROT); 879 error = lfs_do_segclean(fs, segnum); 880 lfs_segunlock(fs); 881 KERNEL_UNLOCK_ONE(NULL); 882 vfs_unbusy(mntp, false, NULL); 883 return error; 884 } 885 886 /* 887 * Actually mark the segment clean. 888 * Must be called with the segment lock held. 889 */ 890 int 891 lfs_do_segclean(struct lfs *fs, unsigned long segnum) 892 { 893 extern int lfs_dostats; 894 struct buf *bp; 895 CLEANERINFO *cip; 896 SEGUSE *sup; 897 898 if (lfs_dtosn(fs, fs->lfs_curseg) == segnum) { 899 return (EBUSY); 900 } 901 902 LFS_SEGENTRY(sup, fs, segnum, bp); 903 if (sup->su_nbytes) { 904 DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:" 905 " %d live bytes\n", segnum, sup->su_nbytes)); 906 brelse(bp, 0); 907 return (EBUSY); 908 } 909 if (sup->su_flags & SEGUSE_ACTIVE) { 910 DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:" 911 " segment is active\n", segnum)); 912 brelse(bp, 0); 913 return (EBUSY); 914 } 915 if (!(sup->su_flags & SEGUSE_DIRTY)) { 916 DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:" 917 " segment is already clean\n", segnum)); 918 brelse(bp, 0); 919 return (EALREADY); 920 } 921 922 fs->lfs_avail += lfs_segtod(fs, 1); 923 if (sup->su_flags & SEGUSE_SUPERBLOCK) 924 fs->lfs_avail -= lfs_btofsb(fs, LFS_SBPAD); 925 if (fs->lfs_version > 1 && segnum == 0 && 926 fs->lfs_start < lfs_btofsb(fs, LFS_LABELPAD)) 927 fs->lfs_avail -= lfs_btofsb(fs, LFS_LABELPAD) - fs->lfs_start; 928 mutex_enter(&lfs_lock); 929 fs->lfs_bfree += sup->su_nsums * lfs_btofsb(fs, fs->lfs_sumsize) + 930 lfs_btofsb(fs, sup->su_ninos * fs->lfs_ibsize); 931 fs->lfs_dmeta -= sup->su_nsums * lfs_btofsb(fs, fs->lfs_sumsize) + 932 lfs_btofsb(fs, sup->su_ninos * fs->lfs_ibsize); 933 if (fs->lfs_dmeta < 0) 934 fs->lfs_dmeta = 0; 935 mutex_exit(&lfs_lock); 936 sup->su_flags &= ~SEGUSE_DIRTY; 937 LFS_WRITESEGENTRY(sup, fs, segnum, bp); 938 939 LFS_CLEANERINFO(cip, fs, bp); 940 ++cip->clean; 941 --cip->dirty; 942 fs->lfs_nclean = cip->clean; 943 cip->bfree = fs->lfs_bfree; 944 mutex_enter(&lfs_lock); 945 cip->avail = fs->lfs_avail - fs->lfs_ravail - fs->lfs_favail; 946 wakeup(&fs->lfs_avail); 947 mutex_exit(&lfs_lock); 948 (void) LFS_BWRITE_LOG(bp); 949 950 if (lfs_dostats) 951 ++lfs_stats.segs_reclaimed; 952 953 return (0); 954 } 955 956 /* 957 * This will block until a segment in file system fsid is written. A timeout 958 * in milliseconds may be specified which will awake the cleaner automatically. 959 * An fsid of -1 means any file system, and a timeout of 0 means forever. 960 */ 961 int 962 lfs_segwait(fsid_t *fsidp, struct timeval *tv) 963 { 964 struct mount *mntp; 965 void *addr; 966 u_long timeout; 967 int error; 968 969 KERNEL_LOCK(1, NULL); 970 if (fsidp == NULL || (mntp = vfs_getvfs(fsidp)) == NULL) 971 addr = &lfs_allclean_wakeup; 972 else 973 addr = &VFSTOULFS(mntp)->um_lfs->lfs_nextseg; 974 /* 975 * XXX THIS COULD SLEEP FOREVER IF TIMEOUT IS {0,0}! 976 * XXX IS THAT WHAT IS INTENDED? 977 */ 978 timeout = tvtohz(tv); 979 error = tsleep(addr, PCATCH | PVFS, "segment", timeout); 980 KERNEL_UNLOCK_ONE(NULL); 981 return (error == ERESTART ? EINTR : 0); 982 } 983 984 /* 985 * sys_lfs_segwait: 986 * 987 * System call wrapper around lfs_segwait(). 988 * 989 * 0 on success 990 * 1 on timeout 991 * -1/errno is return on error. 992 */ 993 int 994 sys___lfs_segwait50(struct lwp *l, const struct sys___lfs_segwait50_args *uap, 995 register_t *retval) 996 { 997 /* { 998 syscallarg(fsid_t *) fsidp; 999 syscallarg(struct timeval *) tv; 1000 } */ 1001 struct timeval atv; 1002 fsid_t fsid; 1003 int error; 1004 1005 /* XXX need we be su to segwait? */ 1006 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS, 1007 KAUTH_REQ_SYSTEM_LFS_SEGWAIT, NULL, NULL, NULL); 1008 if (error) 1009 return (error); 1010 if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0) 1011 return (error); 1012 1013 if (SCARG(uap, tv)) { 1014 error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval)); 1015 if (error) 1016 return (error); 1017 if (itimerfix(&atv)) 1018 return (EINVAL); 1019 } else /* NULL or invalid */ 1020 atv.tv_sec = atv.tv_usec = 0; 1021 return lfs_segwait(&fsid, &atv); 1022 } 1023 1024 /* 1025 * VFS_VGET call specialized for the cleaner. The cleaner already knows the 1026 * daddr from the ifile, so don't look it up again. If the cleaner is 1027 * processing IINFO structures, it may have the ondisk inode already, so 1028 * don't go retrieving it again. 1029 * 1030 * we lfs_vref, and it is the caller's responsibility to lfs_vunref 1031 * when finished. 1032 */ 1033 1034 int 1035 lfs_fasthashget(dev_t dev, ino_t ino, struct vnode **vpp) 1036 { 1037 struct vnode *vp; 1038 1039 mutex_enter(&ulfs_ihash_lock); 1040 if ((vp = ulfs_ihashlookup(dev, ino)) != NULL) { 1041 mutex_enter(vp->v_interlock); 1042 mutex_exit(&ulfs_ihash_lock); 1043 if (vp->v_iflag & VI_XLOCK) { 1044 DLOG((DLOG_CLEAN, "lfs_fastvget: ino %d VI_XLOCK\n", 1045 ino)); 1046 lfs_stats.clean_vnlocked++; 1047 mutex_exit(vp->v_interlock); 1048 return EAGAIN; 1049 } 1050 if (lfs_vref(vp)) { 1051 DLOG((DLOG_CLEAN, "lfs_fastvget: lfs_vref failed" 1052 " for ino %d\n", ino)); 1053 lfs_stats.clean_inlocked++; 1054 return EAGAIN; 1055 } 1056 } else { 1057 mutex_exit(&ulfs_ihash_lock); 1058 } 1059 *vpp = vp; 1060 1061 return (0); 1062 } 1063 1064 int 1065 lfs_fastvget(struct mount *mp, ino_t ino, daddr_t daddr, struct vnode **vpp, 1066 struct ulfs1_dinode *dinp) 1067 { 1068 struct inode *ip; 1069 struct ulfs1_dinode *dip; 1070 struct vnode *vp; 1071 struct ulfsmount *ump; 1072 dev_t dev; 1073 int error, retries; 1074 struct buf *bp; 1075 struct lfs *fs; 1076 1077 ump = VFSTOULFS(mp); 1078 dev = ump->um_dev; 1079 fs = ump->um_lfs; 1080 1081 /* 1082 * Wait until the filesystem is fully mounted before allowing vget 1083 * to complete. This prevents possible problems with roll-forward. 1084 */ 1085 mutex_enter(&lfs_lock); 1086 while (fs->lfs_flags & LFS_NOTYET) { 1087 mtsleep(&fs->lfs_flags, PRIBIO+1, "lfs_fnotyet", 0, 1088 &lfs_lock); 1089 } 1090 mutex_exit(&lfs_lock); 1091 1092 /* 1093 * This is playing fast and loose. Someone may have the inode 1094 * locked, in which case they are going to be distinctly unhappy 1095 * if we trash something. 1096 */ 1097 1098 error = lfs_fasthashget(dev, ino, vpp); 1099 if (error != 0 || *vpp != NULL) 1100 return (error); 1101 1102 /* 1103 * getnewvnode(9) will call vfs_busy, which will block if the 1104 * filesystem is being unmounted; but umount(9) is waiting for 1105 * us because we're already holding the fs busy. 1106 * XXXMP 1107 */ 1108 if (mp->mnt_iflag & IMNT_UNMOUNT) { 1109 *vpp = NULL; 1110 return EDEADLK; 1111 } 1112 error = getnewvnode(VT_LFS, mp, lfs_vnodeop_p, NULL, &vp); 1113 if (error) { 1114 *vpp = NULL; 1115 return (error); 1116 } 1117 1118 mutex_enter(&ulfs_hashlock); 1119 error = lfs_fasthashget(dev, ino, vpp); 1120 if (error != 0 || *vpp != NULL) { 1121 mutex_exit(&ulfs_hashlock); 1122 ungetnewvnode(vp); 1123 return (error); 1124 } 1125 1126 /* Allocate new vnode/inode. */ 1127 lfs_vcreate(mp, ino, vp); 1128 1129 /* 1130 * Put it onto its hash chain and lock it so that other requests for 1131 * this inode will block if they arrive while we are sleeping waiting 1132 * for old data structures to be purged or for the contents of the 1133 * disk portion of this inode to be read. 1134 */ 1135 ip = VTOI(vp); 1136 ulfs_ihashins(ip); 1137 mutex_exit(&ulfs_hashlock); 1138 1139 #ifdef notyet 1140 /* Not found in the cache => this vnode was loaded only for cleaning. */ 1141 ip->i_lfs_iflags |= LFSI_BMAP; 1142 #endif 1143 1144 /* 1145 * XXX 1146 * This may not need to be here, logically it should go down with 1147 * the i_devvp initialization. 1148 * Ask Kirk. 1149 */ 1150 ip->i_lfs = fs; 1151 1152 /* Read in the disk contents for the inode, copy into the inode. */ 1153 if (dinp) { 1154 error = copyin(dinp, ip->i_din.ffs1_din, sizeof (struct ulfs1_dinode)); 1155 if (error) { 1156 DLOG((DLOG_CLEAN, "lfs_fastvget: dinode copyin failed" 1157 " for ino %d\n", ino)); 1158 ulfs_ihashrem(ip); 1159 1160 /* Unlock and discard unneeded inode. */ 1161 VOP_UNLOCK(vp); 1162 lfs_vunref(vp); 1163 *vpp = NULL; 1164 return (error); 1165 } 1166 if (ip->i_number != ino) 1167 panic("lfs_fastvget: I was fed the wrong inode!"); 1168 } else { 1169 retries = 0; 1170 again: 1171 error = bread(ump->um_devvp, LFS_FSBTODB(fs, daddr), fs->lfs_ibsize, 1172 NOCRED, 0, &bp); 1173 if (error) { 1174 DLOG((DLOG_CLEAN, "lfs_fastvget: bread failed (%d)\n", 1175 error)); 1176 /* 1177 * The inode does not contain anything useful, so it 1178 * would be misleading to leave it on its hash chain. 1179 * Iput() will return it to the free list. 1180 */ 1181 ulfs_ihashrem(ip); 1182 1183 /* Unlock and discard unneeded inode. */ 1184 VOP_UNLOCK(vp); 1185 lfs_vunref(vp); 1186 *vpp = NULL; 1187 return (error); 1188 } 1189 dip = lfs_ifind(ump->um_lfs, ino, bp); 1190 if (dip == NULL) { 1191 /* Assume write has not completed yet; try again */ 1192 brelse(bp, BC_INVAL); 1193 ++retries; 1194 if (retries > LFS_IFIND_RETRIES) 1195 panic("lfs_fastvget: dinode not found"); 1196 DLOG((DLOG_CLEAN, "lfs_fastvget: dinode not found," 1197 " retrying...\n")); 1198 goto again; 1199 } 1200 *ip->i_din.ffs1_din = *dip; 1201 brelse(bp, 0); 1202 } 1203 lfs_vinit(mp, &vp); 1204 1205 *vpp = vp; 1206 1207 KASSERT(VOP_ISLOCKED(vp)); 1208 VOP_UNLOCK(vp); 1209 1210 return (0); 1211 } 1212 1213 /* 1214 * Make up a "fake" cleaner buffer, copy the data from userland into it. 1215 */ 1216 struct buf * 1217 lfs_fakebuf(struct lfs *fs, struct vnode *vp, int lbn, size_t size, void *uaddr) 1218 { 1219 struct buf *bp; 1220 int error; 1221 1222 KASSERT(VTOI(vp)->i_number != LFS_IFILE_INUM); 1223 1224 bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, size, LFS_NB_CLEAN); 1225 error = copyin(uaddr, bp->b_data, size); 1226 if (error) { 1227 lfs_freebuf(fs, bp); 1228 return NULL; 1229 } 1230 KDASSERT(bp->b_iodone == lfs_callback); 1231 1232 #if 0 1233 mutex_enter(&lfs_lock); 1234 ++fs->lfs_iocount; 1235 mutex_exit(&lfs_lock); 1236 #endif 1237 bp->b_bufsize = size; 1238 bp->b_bcount = size; 1239 return (bp); 1240 } 1241