1 /* $NetBSD: lfs_vnops.c,v 1.269 2014/07/25 08:20:53 dholland Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Konrad E. Schroder <perseant@hhhh.org>. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 /* 32 * Copyright (c) 1986, 1989, 1991, 1993, 1995 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 3. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * @(#)lfs_vnops.c 8.13 (Berkeley) 6/10/95 60 */ 61 62 /* from NetBSD: ufs_vnops.c,v 1.213 2013/06/08 05:47:02 kardel Exp */ 63 /*- 64 * Copyright (c) 2008 The NetBSD Foundation, Inc. 65 * All rights reserved. 66 * 67 * This code is derived from software contributed to The NetBSD Foundation 68 * by Wasabi Systems, Inc. 69 * 70 * Redistribution and use in source and binary forms, with or without 71 * modification, are permitted provided that the following conditions 72 * are met: 73 * 1. Redistributions of source code must retain the above copyright 74 * notice, this list of conditions and the following disclaimer. 75 * 2. Redistributions in binary form must reproduce the above copyright 76 * notice, this list of conditions and the following disclaimer in the 77 * documentation and/or other materials provided with the distribution. 78 * 79 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 80 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 81 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 82 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 83 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 84 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 85 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 86 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 87 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 88 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 89 * POSSIBILITY OF SUCH DAMAGE. 90 */ 91 /* 92 * Copyright (c) 1982, 1986, 1989, 1993, 1995 93 * The Regents of the University of California. All rights reserved. 94 * (c) UNIX System Laboratories, Inc. 95 * All or some portions of this file are derived from material licensed 96 * to the University of California by American Telephone and Telegraph 97 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 98 * the permission of UNIX System Laboratories, Inc. 99 * 100 * Redistribution and use in source and binary forms, with or without 101 * modification, are permitted provided that the following conditions 102 * are met: 103 * 1. Redistributions of source code must retain the above copyright 104 * notice, this list of conditions and the following disclaimer. 105 * 2. Redistributions in binary form must reproduce the above copyright 106 * notice, this list of conditions and the following disclaimer in the 107 * documentation and/or other materials provided with the distribution. 108 * 3. Neither the name of the University nor the names of its contributors 109 * may be used to endorse or promote products derived from this software 110 * without specific prior written permission. 111 * 112 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 113 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 114 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 115 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 116 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 117 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 118 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 119 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 120 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 121 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 122 * SUCH DAMAGE. 123 * 124 * @(#)ufs_vnops.c 8.28 (Berkeley) 7/31/95 125 */ 126 127 #include <sys/cdefs.h> 128 __KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.269 2014/07/25 08:20:53 dholland Exp $"); 129 130 #ifdef _KERNEL_OPT 131 #include "opt_compat_netbsd.h" 132 #include "opt_uvm_page_trkown.h" 133 #endif 134 135 #include <sys/param.h> 136 #include <sys/systm.h> 137 #include <sys/namei.h> 138 #include <sys/resourcevar.h> 139 #include <sys/kernel.h> 140 #include <sys/file.h> 141 #include <sys/stat.h> 142 #include <sys/buf.h> 143 #include <sys/proc.h> 144 #include <sys/mount.h> 145 #include <sys/vnode.h> 146 #include <sys/pool.h> 147 #include <sys/signalvar.h> 148 #include <sys/kauth.h> 149 #include <sys/syslog.h> 150 #include <sys/fstrans.h> 151 152 #include <miscfs/fifofs/fifo.h> 153 #include <miscfs/genfs/genfs.h> 154 #include <miscfs/specfs/specdev.h> 155 156 #include <ufs/lfs/ulfs_inode.h> 157 #include <ufs/lfs/ulfsmount.h> 158 #include <ufs/lfs/ulfs_bswap.h> 159 #include <ufs/lfs/ulfs_extern.h> 160 161 #include <uvm/uvm.h> 162 #include <uvm/uvm_pmap.h> 163 #include <uvm/uvm_stat.h> 164 #include <uvm/uvm_pager.h> 165 166 #include <ufs/lfs/lfs.h> 167 #include <ufs/lfs/lfs_kernel.h> 168 #include <ufs/lfs/lfs_extern.h> 169 170 extern pid_t lfs_writer_daemon; 171 int lfs_ignore_lazy_sync = 1; 172 173 static int lfs_openextattr(void *v); 174 static int lfs_closeextattr(void *v); 175 static int lfs_getextattr(void *v); 176 static int lfs_setextattr(void *v); 177 static int lfs_listextattr(void *v); 178 static int lfs_deleteextattr(void *v); 179 180 /* 181 * A virgin directory (no blushing please). 182 */ 183 static const struct lfs_dirtemplate mastertemplate = { 184 0, 12, LFS_DT_DIR, 1, ".", 185 0, LFS_DIRBLKSIZ - 12, LFS_DT_DIR, 2, ".." 186 }; 187 188 /* Global vfs data structures for lfs. */ 189 int (**lfs_vnodeop_p)(void *); 190 const struct vnodeopv_entry_desc lfs_vnodeop_entries[] = { 191 { &vop_default_desc, vn_default_error }, 192 { &vop_lookup_desc, ulfs_lookup }, /* lookup */ 193 { &vop_create_desc, lfs_create }, /* create */ 194 { &vop_whiteout_desc, ulfs_whiteout }, /* whiteout */ 195 { &vop_mknod_desc, lfs_mknod }, /* mknod */ 196 { &vop_open_desc, ulfs_open }, /* open */ 197 { &vop_close_desc, lfs_close }, /* close */ 198 { &vop_access_desc, ulfs_access }, /* access */ 199 { &vop_getattr_desc, lfs_getattr }, /* getattr */ 200 { &vop_setattr_desc, lfs_setattr }, /* setattr */ 201 { &vop_read_desc, lfs_read }, /* read */ 202 { &vop_write_desc, lfs_write }, /* write */ 203 { &vop_fallocate_desc, genfs_eopnotsupp }, /* fallocate */ 204 { &vop_fdiscard_desc, genfs_eopnotsupp }, /* fdiscard */ 205 { &vop_ioctl_desc, ulfs_ioctl }, /* ioctl */ 206 { &vop_fcntl_desc, lfs_fcntl }, /* fcntl */ 207 { &vop_poll_desc, ulfs_poll }, /* poll */ 208 { &vop_kqfilter_desc, genfs_kqfilter }, /* kqfilter */ 209 { &vop_revoke_desc, ulfs_revoke }, /* revoke */ 210 { &vop_mmap_desc, lfs_mmap }, /* mmap */ 211 { &vop_fsync_desc, lfs_fsync }, /* fsync */ 212 { &vop_seek_desc, ulfs_seek }, /* seek */ 213 { &vop_remove_desc, lfs_remove }, /* remove */ 214 { &vop_link_desc, lfs_link }, /* link */ 215 { &vop_rename_desc, lfs_rename }, /* rename */ 216 { &vop_mkdir_desc, lfs_mkdir }, /* mkdir */ 217 { &vop_rmdir_desc, lfs_rmdir }, /* rmdir */ 218 { &vop_symlink_desc, lfs_symlink }, /* symlink */ 219 { &vop_readdir_desc, ulfs_readdir }, /* readdir */ 220 { &vop_readlink_desc, ulfs_readlink }, /* readlink */ 221 { &vop_abortop_desc, ulfs_abortop }, /* abortop */ 222 { &vop_inactive_desc, lfs_inactive }, /* inactive */ 223 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */ 224 { &vop_lock_desc, ulfs_lock }, /* lock */ 225 { &vop_unlock_desc, ulfs_unlock }, /* unlock */ 226 { &vop_bmap_desc, ulfs_bmap }, /* bmap */ 227 { &vop_strategy_desc, lfs_strategy }, /* strategy */ 228 { &vop_print_desc, ulfs_print }, /* print */ 229 { &vop_islocked_desc, ulfs_islocked }, /* islocked */ 230 { &vop_pathconf_desc, ulfs_pathconf }, /* pathconf */ 231 { &vop_advlock_desc, ulfs_advlock }, /* advlock */ 232 { &vop_bwrite_desc, lfs_bwrite }, /* bwrite */ 233 { &vop_getpages_desc, lfs_getpages }, /* getpages */ 234 { &vop_putpages_desc, lfs_putpages }, /* putpages */ 235 { &vop_openextattr_desc, lfs_openextattr }, /* openextattr */ 236 { &vop_closeextattr_desc, lfs_closeextattr }, /* closeextattr */ 237 { &vop_getextattr_desc, lfs_getextattr }, /* getextattr */ 238 { &vop_setextattr_desc, lfs_setextattr }, /* setextattr */ 239 { &vop_listextattr_desc, lfs_listextattr }, /* listextattr */ 240 { &vop_deleteextattr_desc, lfs_deleteextattr }, /* deleteextattr */ 241 { NULL, NULL } 242 }; 243 const struct vnodeopv_desc lfs_vnodeop_opv_desc = 244 { &lfs_vnodeop_p, lfs_vnodeop_entries }; 245 246 int (**lfs_specop_p)(void *); 247 const struct vnodeopv_entry_desc lfs_specop_entries[] = { 248 { &vop_default_desc, vn_default_error }, 249 { &vop_lookup_desc, spec_lookup }, /* lookup */ 250 { &vop_create_desc, spec_create }, /* create */ 251 { &vop_mknod_desc, spec_mknod }, /* mknod */ 252 { &vop_open_desc, spec_open }, /* open */ 253 { &vop_close_desc, lfsspec_close }, /* close */ 254 { &vop_access_desc, ulfs_access }, /* access */ 255 { &vop_getattr_desc, lfs_getattr }, /* getattr */ 256 { &vop_setattr_desc, lfs_setattr }, /* setattr */ 257 { &vop_read_desc, ulfsspec_read }, /* read */ 258 { &vop_write_desc, ulfsspec_write }, /* write */ 259 { &vop_fallocate_desc, spec_fallocate }, /* fallocate */ 260 { &vop_fdiscard_desc, spec_fdiscard }, /* fdiscard */ 261 { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ 262 { &vop_fcntl_desc, ulfs_fcntl }, /* fcntl */ 263 { &vop_poll_desc, spec_poll }, /* poll */ 264 { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */ 265 { &vop_revoke_desc, spec_revoke }, /* revoke */ 266 { &vop_mmap_desc, spec_mmap }, /* mmap */ 267 { &vop_fsync_desc, spec_fsync }, /* fsync */ 268 { &vop_seek_desc, spec_seek }, /* seek */ 269 { &vop_remove_desc, spec_remove }, /* remove */ 270 { &vop_link_desc, spec_link }, /* link */ 271 { &vop_rename_desc, spec_rename }, /* rename */ 272 { &vop_mkdir_desc, spec_mkdir }, /* mkdir */ 273 { &vop_rmdir_desc, spec_rmdir }, /* rmdir */ 274 { &vop_symlink_desc, spec_symlink }, /* symlink */ 275 { &vop_readdir_desc, spec_readdir }, /* readdir */ 276 { &vop_readlink_desc, spec_readlink }, /* readlink */ 277 { &vop_abortop_desc, spec_abortop }, /* abortop */ 278 { &vop_inactive_desc, lfs_inactive }, /* inactive */ 279 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */ 280 { &vop_lock_desc, ulfs_lock }, /* lock */ 281 { &vop_unlock_desc, ulfs_unlock }, /* unlock */ 282 { &vop_bmap_desc, spec_bmap }, /* bmap */ 283 { &vop_strategy_desc, spec_strategy }, /* strategy */ 284 { &vop_print_desc, ulfs_print }, /* print */ 285 { &vop_islocked_desc, ulfs_islocked }, /* islocked */ 286 { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ 287 { &vop_advlock_desc, spec_advlock }, /* advlock */ 288 { &vop_bwrite_desc, vn_bwrite }, /* bwrite */ 289 { &vop_getpages_desc, spec_getpages }, /* getpages */ 290 { &vop_putpages_desc, spec_putpages }, /* putpages */ 291 { &vop_openextattr_desc, lfs_openextattr }, /* openextattr */ 292 { &vop_closeextattr_desc, lfs_closeextattr }, /* closeextattr */ 293 { &vop_getextattr_desc, lfs_getextattr }, /* getextattr */ 294 { &vop_setextattr_desc, lfs_setextattr }, /* setextattr */ 295 { &vop_listextattr_desc, lfs_listextattr }, /* listextattr */ 296 { &vop_deleteextattr_desc, lfs_deleteextattr }, /* deleteextattr */ 297 { NULL, NULL } 298 }; 299 const struct vnodeopv_desc lfs_specop_opv_desc = 300 { &lfs_specop_p, lfs_specop_entries }; 301 302 int (**lfs_fifoop_p)(void *); 303 const struct vnodeopv_entry_desc lfs_fifoop_entries[] = { 304 { &vop_default_desc, vn_default_error }, 305 { &vop_lookup_desc, vn_fifo_bypass }, /* lookup */ 306 { &vop_create_desc, vn_fifo_bypass }, /* create */ 307 { &vop_mknod_desc, vn_fifo_bypass }, /* mknod */ 308 { &vop_open_desc, vn_fifo_bypass }, /* open */ 309 { &vop_close_desc, lfsfifo_close }, /* close */ 310 { &vop_access_desc, ulfs_access }, /* access */ 311 { &vop_getattr_desc, lfs_getattr }, /* getattr */ 312 { &vop_setattr_desc, lfs_setattr }, /* setattr */ 313 { &vop_read_desc, ulfsfifo_read }, /* read */ 314 { &vop_write_desc, ulfsfifo_write }, /* write */ 315 { &vop_fallocate_desc, vn_fifo_bypass }, /* fallocate */ 316 { &vop_fdiscard_desc, vn_fifo_bypass }, /* fdiscard */ 317 { &vop_ioctl_desc, vn_fifo_bypass }, /* ioctl */ 318 { &vop_fcntl_desc, ulfs_fcntl }, /* fcntl */ 319 { &vop_poll_desc, vn_fifo_bypass }, /* poll */ 320 { &vop_kqfilter_desc, vn_fifo_bypass }, /* kqfilter */ 321 { &vop_revoke_desc, vn_fifo_bypass }, /* revoke */ 322 { &vop_mmap_desc, vn_fifo_bypass }, /* mmap */ 323 { &vop_fsync_desc, vn_fifo_bypass }, /* fsync */ 324 { &vop_seek_desc, vn_fifo_bypass }, /* seek */ 325 { &vop_remove_desc, vn_fifo_bypass }, /* remove */ 326 { &vop_link_desc, vn_fifo_bypass }, /* link */ 327 { &vop_rename_desc, vn_fifo_bypass }, /* rename */ 328 { &vop_mkdir_desc, vn_fifo_bypass }, /* mkdir */ 329 { &vop_rmdir_desc, vn_fifo_bypass }, /* rmdir */ 330 { &vop_symlink_desc, vn_fifo_bypass }, /* symlink */ 331 { &vop_readdir_desc, vn_fifo_bypass }, /* readdir */ 332 { &vop_readlink_desc, vn_fifo_bypass }, /* readlink */ 333 { &vop_abortop_desc, vn_fifo_bypass }, /* abortop */ 334 { &vop_inactive_desc, lfs_inactive }, /* inactive */ 335 { &vop_reclaim_desc, lfs_reclaim }, /* reclaim */ 336 { &vop_lock_desc, ulfs_lock }, /* lock */ 337 { &vop_unlock_desc, ulfs_unlock }, /* unlock */ 338 { &vop_bmap_desc, vn_fifo_bypass }, /* bmap */ 339 { &vop_strategy_desc, vn_fifo_bypass }, /* strategy */ 340 { &vop_print_desc, ulfs_print }, /* print */ 341 { &vop_islocked_desc, ulfs_islocked }, /* islocked */ 342 { &vop_pathconf_desc, vn_fifo_bypass }, /* pathconf */ 343 { &vop_advlock_desc, vn_fifo_bypass }, /* advlock */ 344 { &vop_bwrite_desc, lfs_bwrite }, /* bwrite */ 345 { &vop_putpages_desc, vn_fifo_bypass }, /* putpages */ 346 { &vop_openextattr_desc, lfs_openextattr }, /* openextattr */ 347 { &vop_closeextattr_desc, lfs_closeextattr }, /* closeextattr */ 348 { &vop_getextattr_desc, lfs_getextattr }, /* getextattr */ 349 { &vop_setextattr_desc, lfs_setextattr }, /* setextattr */ 350 { &vop_listextattr_desc, lfs_listextattr }, /* listextattr */ 351 { &vop_deleteextattr_desc, lfs_deleteextattr }, /* deleteextattr */ 352 { NULL, NULL } 353 }; 354 const struct vnodeopv_desc lfs_fifoop_opv_desc = 355 { &lfs_fifoop_p, lfs_fifoop_entries }; 356 357 #define LFS_READWRITE 358 #include <ufs/lfs/ulfs_readwrite.c> 359 #undef LFS_READWRITE 360 361 /* 362 * Synch an open file. 363 */ 364 /* ARGSUSED */ 365 int 366 lfs_fsync(void *v) 367 { 368 struct vop_fsync_args /* { 369 struct vnode *a_vp; 370 kauth_cred_t a_cred; 371 int a_flags; 372 off_t offlo; 373 off_t offhi; 374 } */ *ap = v; 375 struct vnode *vp = ap->a_vp; 376 int error, wait; 377 struct inode *ip = VTOI(vp); 378 struct lfs *fs = ip->i_lfs; 379 380 /* If we're mounted read-only, don't try to sync. */ 381 if (fs->lfs_ronly) 382 return 0; 383 384 /* If a removed vnode is being cleaned, no need to sync here. */ 385 if ((ap->a_flags & FSYNC_RECLAIM) != 0 && ip->i_mode == 0) 386 return 0; 387 388 /* 389 * Trickle sync simply adds this vnode to the pager list, as if 390 * the pagedaemon had requested a pageout. 391 */ 392 if (ap->a_flags & FSYNC_LAZY) { 393 if (lfs_ignore_lazy_sync == 0) { 394 mutex_enter(&lfs_lock); 395 if (!(ip->i_flags & IN_PAGING)) { 396 ip->i_flags |= IN_PAGING; 397 TAILQ_INSERT_TAIL(&fs->lfs_pchainhd, ip, 398 i_lfs_pchain); 399 } 400 wakeup(&lfs_writer_daemon); 401 mutex_exit(&lfs_lock); 402 } 403 return 0; 404 } 405 406 /* 407 * If a vnode is bring cleaned, flush it out before we try to 408 * reuse it. This prevents the cleaner from writing files twice 409 * in the same partial segment, causing an accounting underflow. 410 */ 411 if (ap->a_flags & FSYNC_RECLAIM && ip->i_flags & IN_CLEANING) { 412 lfs_vflush(vp); 413 } 414 415 wait = (ap->a_flags & FSYNC_WAIT); 416 do { 417 mutex_enter(vp->v_interlock); 418 error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo), 419 round_page(ap->a_offhi), 420 PGO_CLEANIT | (wait ? PGO_SYNCIO : 0)); 421 if (error == EAGAIN) { 422 mutex_enter(&lfs_lock); 423 mtsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_fsync", 424 hz / 100 + 1, &lfs_lock); 425 mutex_exit(&lfs_lock); 426 } 427 } while (error == EAGAIN); 428 if (error) 429 return error; 430 431 if ((ap->a_flags & FSYNC_DATAONLY) == 0) 432 error = lfs_update(vp, NULL, NULL, wait ? UPDATE_WAIT : 0); 433 434 if (error == 0 && ap->a_flags & FSYNC_CACHE) { 435 int l = 0; 436 error = VOP_IOCTL(ip->i_devvp, DIOCCACHESYNC, &l, FWRITE, 437 curlwp->l_cred); 438 } 439 if (wait && !VPISEMPTY(vp)) 440 LFS_SET_UINO(ip, IN_MODIFIED); 441 442 return error; 443 } 444 445 /* 446 * Take IN_ADIROP off, then call ulfs_inactive. 447 */ 448 int 449 lfs_inactive(void *v) 450 { 451 struct vop_inactive_args /* { 452 struct vnode *a_vp; 453 } */ *ap = v; 454 455 lfs_unmark_vnode(ap->a_vp); 456 457 /* 458 * The Ifile is only ever inactivated on unmount. 459 * Streamline this process by not giving it more dirty blocks. 460 */ 461 if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM) { 462 mutex_enter(&lfs_lock); 463 LFS_CLR_UINO(VTOI(ap->a_vp), IN_ALLMOD); 464 mutex_exit(&lfs_lock); 465 VOP_UNLOCK(ap->a_vp); 466 return 0; 467 } 468 469 #ifdef DEBUG 470 /* 471 * This might happen on unmount. 472 * XXX If it happens at any other time, it should be a panic. 473 */ 474 if (ap->a_vp->v_uflag & VU_DIROP) { 475 struct inode *ip = VTOI(ap->a_vp); 476 printf("lfs_inactive: inactivating VU_DIROP? ino = %d\n", (int)ip->i_number); 477 } 478 #endif /* DIAGNOSTIC */ 479 480 return ulfs_inactive(v); 481 } 482 483 int 484 lfs_set_dirop(struct vnode *dvp, struct vnode *vp) 485 { 486 struct lfs *fs; 487 int error; 488 489 KASSERT(VOP_ISLOCKED(dvp)); 490 KASSERT(vp == NULL || VOP_ISLOCKED(vp)); 491 492 fs = VTOI(dvp)->i_lfs; 493 494 ASSERT_NO_SEGLOCK(fs); 495 /* 496 * LFS_NRESERVE calculates direct and indirect blocks as well 497 * as an inode block; an overestimate in most cases. 498 */ 499 if ((error = lfs_reserve(fs, dvp, vp, LFS_NRESERVE(fs))) != 0) 500 return (error); 501 502 restart: 503 mutex_enter(&lfs_lock); 504 if (fs->lfs_dirops == 0) { 505 mutex_exit(&lfs_lock); 506 lfs_check(dvp, LFS_UNUSED_LBN, 0); 507 mutex_enter(&lfs_lock); 508 } 509 while (fs->lfs_writer) { 510 error = mtsleep(&fs->lfs_dirops, (PRIBIO + 1) | PCATCH, 511 "lfs_sdirop", 0, &lfs_lock); 512 if (error == EINTR) { 513 mutex_exit(&lfs_lock); 514 goto unreserve; 515 } 516 } 517 if (lfs_dirvcount > LFS_MAX_DIROP && fs->lfs_dirops == 0) { 518 wakeup(&lfs_writer_daemon); 519 mutex_exit(&lfs_lock); 520 preempt(); 521 goto restart; 522 } 523 524 if (lfs_dirvcount > LFS_MAX_DIROP) { 525 DLOG((DLOG_DIROP, "lfs_set_dirop: sleeping with dirops=%d, " 526 "dirvcount=%d\n", fs->lfs_dirops, lfs_dirvcount)); 527 if ((error = mtsleep(&lfs_dirvcount, 528 PCATCH | PUSER | PNORELOCK, "lfs_maxdirop", 0, 529 &lfs_lock)) != 0) { 530 goto unreserve; 531 } 532 goto restart; 533 } 534 535 ++fs->lfs_dirops; 536 /* fs->lfs_doifile = 1; */ /* XXX why? --ks */ 537 mutex_exit(&lfs_lock); 538 539 /* Hold a reference so SET_ENDOP will be happy */ 540 vref(dvp); 541 if (vp) { 542 vref(vp); 543 MARK_VNODE(vp); 544 } 545 546 MARK_VNODE(dvp); 547 return 0; 548 549 unreserve: 550 lfs_reserve(fs, dvp, vp, -LFS_NRESERVE(fs)); 551 return error; 552 } 553 554 /* 555 * Opposite of lfs_set_dirop... mostly. For now at least must call 556 * UNMARK_VNODE(dvp) explicitly first. (XXX: clean that up) 557 */ 558 void 559 lfs_unset_dirop(struct lfs *fs, struct vnode *dvp, const char *str) 560 { 561 mutex_enter(&lfs_lock); 562 --fs->lfs_dirops; 563 if (!fs->lfs_dirops) { 564 if (fs->lfs_nadirop) { 565 panic("lfs_unset_dirop: %s: no dirops but " 566 " nadirop=%d", str, 567 fs->lfs_nadirop); 568 } 569 wakeup(&fs->lfs_writer); 570 mutex_exit(&lfs_lock); 571 lfs_check(dvp, LFS_UNUSED_LBN, 0); 572 } else { 573 mutex_exit(&lfs_lock); 574 } 575 lfs_reserve(fs, dvp, NULL, -LFS_NRESERVE(fs)); 576 } 577 578 void 579 lfs_mark_vnode(struct vnode *vp) 580 { 581 struct inode *ip = VTOI(vp); 582 struct lfs *fs = ip->i_lfs; 583 584 mutex_enter(&lfs_lock); 585 if (!(ip->i_flag & IN_ADIROP)) { 586 if (!(vp->v_uflag & VU_DIROP)) { 587 mutex_exit(&lfs_lock); 588 mutex_enter(vp->v_interlock); 589 if (lfs_vref(vp) != 0) 590 panic("lfs_mark_vnode: could not vref"); 591 mutex_enter(&lfs_lock); 592 ++lfs_dirvcount; 593 ++fs->lfs_dirvcount; 594 TAILQ_INSERT_TAIL(&fs->lfs_dchainhd, ip, i_lfs_dchain); 595 vp->v_uflag |= VU_DIROP; 596 } 597 ++fs->lfs_nadirop; 598 ip->i_flag &= ~IN_CDIROP; 599 ip->i_flag |= IN_ADIROP; 600 } else 601 KASSERT(vp->v_uflag & VU_DIROP); 602 mutex_exit(&lfs_lock); 603 } 604 605 void 606 lfs_unmark_vnode(struct vnode *vp) 607 { 608 struct inode *ip = VTOI(vp); 609 610 mutex_enter(&lfs_lock); 611 if (ip && (ip->i_flag & IN_ADIROP)) { 612 KASSERT(vp->v_uflag & VU_DIROP); 613 --ip->i_lfs->lfs_nadirop; 614 ip->i_flag &= ~IN_ADIROP; 615 } 616 mutex_exit(&lfs_lock); 617 } 618 619 int 620 lfs_symlink(void *v) 621 { 622 struct vop_symlink_v3_args /* { 623 struct vnode *a_dvp; 624 struct vnode **a_vpp; 625 struct componentname *a_cnp; 626 struct vattr *a_vap; 627 char *a_target; 628 } */ *ap = v; 629 struct lfs *fs; 630 struct vnode *dvp, **vpp; 631 struct inode *ip; 632 struct ulfs_lookup_results *ulr; 633 ssize_t len; /* XXX should be size_t */ 634 int error; 635 636 dvp = ap->a_dvp; 637 vpp = ap->a_vpp; 638 639 KASSERT(vpp != NULL); 640 KASSERT(*vpp == NULL); 641 642 /* XXX should handle this material another way */ 643 ulr = &VTOI(ap->a_dvp)->i_crap; 644 ULFS_CHECK_CRAPCOUNTER(VTOI(ap->a_dvp)); 645 646 fs = VFSTOULFS(dvp->v_mount)->um_lfs; 647 ASSERT_NO_SEGLOCK(fs); 648 if (fs->lfs_ronly) { 649 return EROFS; 650 } 651 652 /* 653 * Get a new vnode *before* adjusting the dirop count, to 654 * avoid a deadlock in getnewvnode(), if we have a stacked 655 * filesystem mounted on top of us. 656 * 657 * NB: this means we have to destroy the new vnode on error. 658 */ 659 660 error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, NULL, vpp); 661 if (error) { 662 DLOG((DLOG_ALLOC, "lfs_mkdir: dvp %p error %d\n", dvp, error)); 663 return error; 664 } 665 KASSERT(*vpp != NULL); 666 667 error = lfs_set_dirop(dvp, NULL); 668 if (error) { 669 ungetnewvnode(*vpp); 670 *vpp = NULL; 671 return error; 672 } 673 674 fstrans_start(dvp->v_mount, FSTRANS_SHARED); 675 error = ulfs_makeinode(LFS_IFLNK | ap->a_vap->va_mode, dvp, ulr, 676 vpp, ap->a_cnp); 677 if (error) { 678 goto out; 679 } 680 681 VN_KNOTE(ap->a_dvp, NOTE_WRITE); 682 ip = VTOI(*vpp); 683 684 len = strlen(ap->a_target); 685 if (len < ip->i_lfs->um_maxsymlinklen) { 686 memcpy((char *)SHORTLINK(ip), ap->a_target, len); 687 ip->i_size = len; 688 DIP_ASSIGN(ip, size, len); 689 uvm_vnp_setsize(*vpp, ip->i_size); 690 ip->i_flag |= IN_CHANGE | IN_UPDATE; 691 if ((*vpp)->v_mount->mnt_flag & MNT_RELATIME) 692 ip->i_flag |= IN_ACCESS; 693 } else { 694 error = vn_rdwr(UIO_WRITE, *vpp, ap->a_target, len, (off_t)0, 695 UIO_SYSSPACE, IO_NODELOCKED | IO_JOURNALLOCKED, 696 ap->a_cnp->cn_cred, NULL, NULL); 697 } 698 699 VOP_UNLOCK(*vpp); 700 if (error) 701 vrele(*vpp); 702 703 out: 704 fstrans_done(dvp->v_mount); 705 706 UNMARK_VNODE(dvp); 707 /* XXX: is it even possible for the symlink to get MARK'd? */ 708 UNMARK_VNODE(*vpp); 709 if (!((*vpp)->v_uflag & VU_DIROP)) { 710 KASSERT(error != 0); 711 ungetnewvnode(*vpp); 712 *vpp = NULL; 713 } 714 else { 715 KASSERT(error == 0); 716 } 717 lfs_unset_dirop(fs, dvp, "symlink"); 718 719 vrele(dvp); 720 return (error); 721 } 722 723 int 724 lfs_mknod(void *v) 725 { 726 struct vop_mknod_v3_args /* { 727 struct vnode *a_dvp; 728 struct vnode **a_vpp; 729 struct componentname *a_cnp; 730 struct vattr *a_vap; 731 } */ *ap = v; 732 struct lfs *fs; 733 struct vnode *dvp, **vpp; 734 struct vattr *vap; 735 struct inode *ip; 736 int error; 737 struct mount *mp; 738 ino_t ino; 739 struct ulfs_lookup_results *ulr; 740 741 dvp = ap->a_dvp; 742 vpp = ap->a_vpp; 743 vap = ap->a_vap; 744 745 KASSERT(vpp != NULL); 746 KASSERT(*vpp == NULL); 747 748 /* XXX should handle this material another way */ 749 ulr = &VTOI(dvp)->i_crap; 750 ULFS_CHECK_CRAPCOUNTER(VTOI(dvp)); 751 752 fs = VFSTOULFS(dvp->v_mount)->um_lfs; 753 ASSERT_NO_SEGLOCK(fs); 754 if (fs->lfs_ronly) { 755 return EROFS; 756 } 757 758 /* 759 * Get a new vnode *before* adjusting the dirop count, to 760 * avoid a deadlock in getnewvnode(), if we have a stacked 761 * filesystem mounted on top of us. 762 * 763 * NB: this means we have to destroy the new vnode on error. 764 */ 765 766 error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, NULL, vpp); 767 if (error) { 768 DLOG((DLOG_ALLOC, "lfs_mknod: dvp %p error %d\n", dvp, error)); 769 return error; 770 } 771 KASSERT(*vpp != NULL); 772 773 error = lfs_set_dirop(dvp, NULL); 774 if (error) { 775 ungetnewvnode(*vpp); 776 *vpp = NULL; 777 return error; 778 } 779 780 fstrans_start(ap->a_dvp->v_mount, FSTRANS_SHARED); 781 error = ulfs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode), 782 dvp, ulr, vpp, ap->a_cnp); 783 784 /* Either way we're done with the dirop at this point */ 785 UNMARK_VNODE(dvp); 786 UNMARK_VNODE(*vpp); 787 if (!((*vpp)->v_uflag & VU_DIROP)) { 788 KASSERT(error != 0); 789 ungetnewvnode(*vpp); 790 *vpp = NULL; 791 } 792 else { 793 KASSERT(error == 0); 794 } 795 lfs_unset_dirop(fs, dvp, "mknod"); 796 /* 797 * XXX this is where this used to be (though inside some evil 798 * macros) but it clearly should be moved further down. 799 * - dholland 20140515 800 */ 801 vrele(dvp); 802 803 if (error) { 804 fstrans_done(ap->a_dvp->v_mount); 805 *vpp = NULL; 806 return (error); 807 } 808 809 VN_KNOTE(dvp, NOTE_WRITE); 810 ip = VTOI(*vpp); 811 mp = (*vpp)->v_mount; 812 ino = ip->i_number; 813 ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; 814 if (vap->va_rdev != VNOVAL) { 815 struct ulfsmount *ump = ip->i_ump; 816 KASSERT(fs == ip->i_lfs); 817 /* 818 * Want to be able to use this to make badblock 819 * inodes, so don't truncate the dev number. 820 */ 821 if (ump->um_fstype == ULFS1) 822 ip->i_ffs1_rdev = ulfs_rw32(vap->va_rdev, 823 ULFS_MPNEEDSWAP(fs)); 824 else 825 ip->i_ffs2_rdev = ulfs_rw64(vap->va_rdev, 826 ULFS_MPNEEDSWAP(fs)); 827 } 828 829 /* 830 * Call fsync to write the vnode so that we don't have to deal with 831 * flushing it when it's marked VU_DIROP or reclaiming. 832 * 833 * XXX KS - If we can't flush we also can't call vgone(), so must 834 * return. But, that leaves this vnode in limbo, also not good. 835 * Can this ever happen (barring hardware failure)? 836 */ 837 if ((error = VOP_FSYNC(*vpp, NOCRED, FSYNC_WAIT, 0, 0)) != 0) { 838 panic("lfs_mknod: couldn't fsync (ino %llu)", 839 (unsigned long long)ino); 840 /* return (error); */ 841 } 842 /* 843 * Remove vnode so that it will be reloaded by VFS_VGET and 844 * checked to see if it is an alias of an existing entry in 845 * the inode cache. 846 */ 847 /* Used to be vput, but that causes us to call VOP_INACTIVE twice. */ 848 849 (*vpp)->v_type = VNON; 850 VOP_UNLOCK(*vpp); 851 vgone(*vpp); 852 error = VFS_VGET(mp, ino, vpp); 853 854 fstrans_done(ap->a_dvp->v_mount); 855 if (error != 0) { 856 *vpp = NULL; 857 return (error); 858 } 859 VOP_UNLOCK(*vpp); 860 return (0); 861 } 862 863 /* 864 * Create a regular file 865 */ 866 int 867 lfs_create(void *v) 868 { 869 struct vop_create_v3_args /* { 870 struct vnode *a_dvp; 871 struct vnode **a_vpp; 872 struct componentname *a_cnp; 873 struct vattr *a_vap; 874 } */ *ap = v; 875 struct lfs *fs; 876 struct vnode *dvp, **vpp; 877 struct vattr *vap; 878 struct ulfs_lookup_results *ulr; 879 int error; 880 881 dvp = ap->a_dvp; 882 vpp = ap->a_vpp; 883 vap = ap->a_vap; 884 885 KASSERT(vpp != NULL); 886 KASSERT(*vpp == NULL); 887 888 /* XXX should handle this material another way */ 889 ulr = &VTOI(dvp)->i_crap; 890 ULFS_CHECK_CRAPCOUNTER(VTOI(dvp)); 891 892 fs = VFSTOULFS(dvp->v_mount)->um_lfs; 893 ASSERT_NO_SEGLOCK(fs); 894 if (fs->lfs_ronly) { 895 return EROFS; 896 } 897 898 /* 899 * Get a new vnode *before* adjusting the dirop count, to 900 * avoid a deadlock in getnewvnode(), if we have a stacked 901 * filesystem mounted on top of us. 902 * 903 * NB: this means we have to destroy the new vnode on error. 904 */ 905 906 error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, NULL, vpp); 907 if (error) { 908 DLOG((DLOG_ALLOC, "lfs_create: dvp %p error %d\n", dvp,error)); 909 return error; 910 } 911 error = lfs_set_dirop(dvp, NULL); 912 if (error) { 913 ungetnewvnode(*vpp); 914 *vpp = NULL; 915 return error; 916 } 917 918 fstrans_start(dvp->v_mount, FSTRANS_SHARED); 919 error = ulfs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode), 920 dvp, ulr, vpp, ap->a_cnp); 921 if (error) { 922 fstrans_done(dvp->v_mount); 923 goto out; 924 } 925 fstrans_done(dvp->v_mount); 926 VN_KNOTE(dvp, NOTE_WRITE); 927 VOP_UNLOCK(*vpp); 928 929 out: 930 931 UNMARK_VNODE(dvp); 932 UNMARK_VNODE(*vpp); 933 if (!((*vpp)->v_uflag & VU_DIROP)) { 934 KASSERT(error != 0); 935 ungetnewvnode(*vpp); 936 *vpp = NULL; 937 } 938 else { 939 KASSERT(error == 0); 940 } 941 lfs_unset_dirop(fs, dvp, "create"); 942 943 vrele(dvp); 944 return (error); 945 } 946 947 int 948 lfs_mkdir(void *v) 949 { 950 struct vop_mkdir_v3_args /* { 951 struct vnode *a_dvp; 952 struct vnode **a_vpp; 953 struct componentname *a_cnp; 954 struct vattr *a_vap; 955 } */ *ap = v; 956 struct lfs *fs; 957 struct vnode *dvp, *tvp, **vpp; 958 struct inode *dp, *ip; 959 struct componentname *cnp; 960 struct vattr *vap; 961 struct ulfs_lookup_results *ulr; 962 struct buf *bp; 963 struct lfs_dirtemplate dirtemplate; 964 struct lfs_direct *newdir; 965 int dirblksiz; 966 int dmode; 967 int error; 968 969 dvp = ap->a_dvp; 970 tvp = NULL; 971 vpp = ap->a_vpp; 972 cnp = ap->a_cnp; 973 vap = ap->a_vap; 974 975 dp = VTOI(dvp); 976 ip = NULL; 977 978 KASSERT(vpp != NULL); 979 KASSERT(*vpp == NULL); 980 981 /* XXX should handle this material another way */ 982 ulr = &dp->i_crap; 983 ULFS_CHECK_CRAPCOUNTER(dp); 984 985 fs = VFSTOULFS(dvp->v_mount)->um_lfs; 986 ASSERT_NO_SEGLOCK(fs); 987 if (fs->lfs_ronly) { 988 return EROFS; 989 } 990 dirblksiz = fs->um_dirblksiz; 991 992 /* 993 * Get a new vnode *before* adjusting the dirop count, to 994 * avoid a deadlock in getnewvnode(), if we have a stacked 995 * filesystem mounted on top of us. 996 * 997 * NB: this means we have to destroy the new vnode on error. 998 */ 999 1000 error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, NULL, vpp); 1001 if (error) { 1002 DLOG((DLOG_ALLOC, "lfs_mkdir: dvp %p error %d\n", dvp, error)); 1003 return error; 1004 } 1005 error = lfs_set_dirop(dvp, NULL); 1006 if (error) { 1007 ungetnewvnode(*vpp); 1008 *vpp = NULL; 1009 return error; 1010 } 1011 1012 fstrans_start(dvp->v_mount, FSTRANS_SHARED); 1013 1014 if ((nlink_t)dp->i_nlink >= LINK_MAX) { 1015 error = EMLINK; 1016 goto out; 1017 } 1018 1019 dmode = vap->va_mode & ACCESSPERMS; 1020 dmode |= LFS_IFDIR; 1021 /* 1022 * Must simulate part of ulfs_makeinode here to acquire the inode, 1023 * but not have it entered in the parent directory. The entry is 1024 * made later after writing "." and ".." entries. 1025 */ 1026 if ((error = lfs_valloc(dvp, dmode, cnp->cn_cred, vpp)) != 0) 1027 goto out; 1028 1029 tvp = *vpp; 1030 ip = VTOI(tvp); 1031 1032 ip->i_uid = kauth_cred_geteuid(cnp->cn_cred); 1033 DIP_ASSIGN(ip, uid, ip->i_uid); 1034 ip->i_gid = dp->i_gid; 1035 DIP_ASSIGN(ip, gid, ip->i_gid); 1036 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2) 1037 if ((error = lfs_chkiq(ip, 1, cnp->cn_cred, 0))) { 1038 lfs_vfree(tvp, ip->i_number, dmode); 1039 fstrans_done(dvp->v_mount); 1040 vput(tvp); 1041 goto out2; 1042 } 1043 #endif 1044 ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; 1045 ip->i_mode = dmode; 1046 DIP_ASSIGN(ip, mode, dmode); 1047 tvp->v_type = VDIR; /* Rest init'd in getnewvnode(). */ 1048 ip->i_nlink = 2; 1049 DIP_ASSIGN(ip, nlink, 2); 1050 if (cnp->cn_flags & ISWHITEOUT) { 1051 ip->i_flags |= UF_OPAQUE; 1052 DIP_ASSIGN(ip, flags, ip->i_flags); 1053 } 1054 1055 /* 1056 * Bump link count in parent directory to reflect work done below. 1057 */ 1058 dp->i_nlink++; 1059 DIP_ASSIGN(dp, nlink, dp->i_nlink); 1060 dp->i_flag |= IN_CHANGE; 1061 if ((error = lfs_update(dvp, NULL, NULL, UPDATE_DIROP)) != 0) 1062 goto bad; 1063 1064 /* 1065 * Initialize directory with "." and ".." from static template. 1066 */ 1067 dirtemplate = mastertemplate; 1068 dirtemplate.dotdot_reclen = dirblksiz - dirtemplate.dot_reclen; 1069 dirtemplate.dot_ino = ulfs_rw32(ip->i_number, ULFS_MPNEEDSWAP(fs)); 1070 dirtemplate.dotdot_ino = ulfs_rw32(dp->i_number, ULFS_MPNEEDSWAP(fs)); 1071 dirtemplate.dot_reclen = ulfs_rw16(dirtemplate.dot_reclen, 1072 ULFS_MPNEEDSWAP(fs)); 1073 dirtemplate.dotdot_reclen = ulfs_rw16(dirtemplate.dotdot_reclen, 1074 ULFS_MPNEEDSWAP(fs)); 1075 if (fs->um_maxsymlinklen <= 0) { 1076 #if BYTE_ORDER == LITTLE_ENDIAN 1077 if (ULFS_MPNEEDSWAP(fs) == 0) 1078 #else 1079 if (ULFS_MPNEEDSWAP(fs) != 0) 1080 #endif 1081 { 1082 dirtemplate.dot_type = dirtemplate.dot_namlen; 1083 dirtemplate.dotdot_type = dirtemplate.dotdot_namlen; 1084 dirtemplate.dot_namlen = dirtemplate.dotdot_namlen = 0; 1085 } else 1086 dirtemplate.dot_type = dirtemplate.dotdot_type = 0; 1087 } 1088 if ((error = lfs_balloc(tvp, (off_t)0, dirblksiz, cnp->cn_cred, 1089 B_CLRBUF, &bp)) != 0) 1090 goto bad; 1091 ip->i_size = dirblksiz; 1092 DIP_ASSIGN(ip, size, dirblksiz); 1093 ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE; 1094 uvm_vnp_setsize(tvp, ip->i_size); 1095 memcpy((void *)bp->b_data, (void *)&dirtemplate, sizeof dirtemplate); 1096 1097 /* 1098 * Directory set up; now install its entry in the parent directory. 1099 */ 1100 if ((error = VOP_BWRITE(bp->b_vp, bp)) != 0) 1101 goto bad; 1102 if ((error = lfs_update(tvp, NULL, NULL, UPDATE_DIROP)) != 0) { 1103 goto bad; 1104 } 1105 newdir = pool_cache_get(ulfs_direct_cache, PR_WAITOK); 1106 ulfs_makedirentry(ip, cnp, newdir); 1107 error = ulfs_direnter(dvp, ulr, tvp, newdir, cnp, bp); 1108 pool_cache_put(ulfs_direct_cache, newdir); 1109 bad: 1110 if (error == 0) { 1111 VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); 1112 VOP_UNLOCK(tvp); 1113 } else { 1114 dp->i_nlink--; 1115 DIP_ASSIGN(dp, nlink, dp->i_nlink); 1116 dp->i_flag |= IN_CHANGE; 1117 /* 1118 * No need to do an explicit lfs_truncate here, vrele will 1119 * do this for us because we set the link count to 0. 1120 */ 1121 ip->i_nlink = 0; 1122 DIP_ASSIGN(ip, nlink, 0); 1123 ip->i_flag |= IN_CHANGE; 1124 /* If IN_ADIROP, account for it */ 1125 lfs_unmark_vnode(tvp); 1126 vput(tvp); 1127 } 1128 1129 out: 1130 fstrans_done(dvp->v_mount); 1131 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2) 1132 out2: 1133 #endif 1134 1135 UNMARK_VNODE(dvp); 1136 UNMARK_VNODE(*vpp); 1137 if (!((*vpp)->v_uflag & VU_DIROP)) { 1138 KASSERT(error != 0); 1139 ungetnewvnode(*vpp); 1140 *vpp = NULL; 1141 } 1142 else { 1143 KASSERT(error == 0); 1144 } 1145 lfs_unset_dirop(fs, dvp, "mkdir"); 1146 1147 vrele(dvp); 1148 return (error); 1149 } 1150 1151 int 1152 lfs_remove(void *v) 1153 { 1154 struct vop_remove_args /* { 1155 struct vnode *a_dvp; 1156 struct vnode *a_vp; 1157 struct componentname *a_cnp; 1158 } */ *ap = v; 1159 struct vnode *dvp, *vp; 1160 struct inode *ip; 1161 int error; 1162 1163 dvp = ap->a_dvp; 1164 vp = ap->a_vp; 1165 ip = VTOI(vp); 1166 if ((error = lfs_set_dirop(dvp, vp)) != 0) { 1167 if (dvp == vp) 1168 vrele(vp); 1169 else 1170 vput(vp); 1171 vput(dvp); 1172 return error; 1173 } 1174 error = ulfs_remove(ap); 1175 if (ip->i_nlink == 0) 1176 lfs_orphan(ip->i_lfs, ip->i_number); 1177 1178 UNMARK_VNODE(dvp); 1179 if (ap->a_vp) { 1180 UNMARK_VNODE(ap->a_vp); 1181 } 1182 lfs_unset_dirop(ip->i_lfs, dvp, "remove"); 1183 vrele(dvp); 1184 if (ap->a_vp) { 1185 vrele(ap->a_vp); 1186 } 1187 1188 return (error); 1189 } 1190 1191 int 1192 lfs_rmdir(void *v) 1193 { 1194 struct vop_rmdir_args /* { 1195 struct vnodeop_desc *a_desc; 1196 struct vnode *a_dvp; 1197 struct vnode *a_vp; 1198 struct componentname *a_cnp; 1199 } */ *ap = v; 1200 struct vnode *vp; 1201 struct inode *ip; 1202 int error; 1203 1204 vp = ap->a_vp; 1205 ip = VTOI(vp); 1206 if ((error = lfs_set_dirop(ap->a_dvp, ap->a_vp)) != 0) { 1207 if (ap->a_dvp == vp) 1208 vrele(ap->a_dvp); 1209 else 1210 vput(ap->a_dvp); 1211 vput(vp); 1212 return error; 1213 } 1214 error = ulfs_rmdir(ap); 1215 if (ip->i_nlink == 0) 1216 lfs_orphan(ip->i_lfs, ip->i_number); 1217 1218 UNMARK_VNODE(ap->a_dvp); 1219 if (ap->a_vp) { 1220 UNMARK_VNODE(ap->a_vp); 1221 } 1222 lfs_unset_dirop(ip->i_lfs, ap->a_dvp, "rmdir"); 1223 vrele(ap->a_dvp); 1224 if (ap->a_vp) { 1225 vrele(ap->a_vp); 1226 } 1227 1228 return (error); 1229 } 1230 1231 int 1232 lfs_link(void *v) 1233 { 1234 struct vop_link_args /* { 1235 struct vnode *a_dvp; 1236 struct vnode *a_vp; 1237 struct componentname *a_cnp; 1238 } */ *ap = v; 1239 struct lfs *fs; 1240 struct vnode *dvp; 1241 int error; 1242 1243 dvp = ap->a_dvp; 1244 1245 fs = VFSTOULFS(dvp->v_mount)->um_lfs; 1246 ASSERT_NO_SEGLOCK(fs); 1247 if (fs->lfs_ronly) { 1248 return EROFS; 1249 } 1250 1251 error = lfs_set_dirop(dvp, NULL); 1252 if (error) { 1253 /* 1254 * XXX dholland 20140515 this was here before but must 1255 * be wrong. 1256 */ 1257 vput(dvp); 1258 1259 return error; 1260 } 1261 1262 error = ulfs_link(ap); 1263 1264 UNMARK_VNODE(dvp); 1265 lfs_unset_dirop(fs, dvp, "link"); 1266 vrele(dvp); 1267 1268 return (error); 1269 } 1270 1271 /* XXX hack to avoid calling ITIMES in getattr */ 1272 int 1273 lfs_getattr(void *v) 1274 { 1275 struct vop_getattr_args /* { 1276 struct vnode *a_vp; 1277 struct vattr *a_vap; 1278 kauth_cred_t a_cred; 1279 } */ *ap = v; 1280 struct vnode *vp = ap->a_vp; 1281 struct inode *ip = VTOI(vp); 1282 struct vattr *vap = ap->a_vap; 1283 struct lfs *fs = ip->i_lfs; 1284 1285 fstrans_start(vp->v_mount, FSTRANS_SHARED); 1286 /* 1287 * Copy from inode table 1288 */ 1289 vap->va_fsid = ip->i_dev; 1290 vap->va_fileid = ip->i_number; 1291 vap->va_mode = ip->i_mode & ~LFS_IFMT; 1292 vap->va_nlink = ip->i_nlink; 1293 vap->va_uid = ip->i_uid; 1294 vap->va_gid = ip->i_gid; 1295 vap->va_rdev = (dev_t)ip->i_ffs1_rdev; 1296 vap->va_size = vp->v_size; 1297 vap->va_atime.tv_sec = ip->i_ffs1_atime; 1298 vap->va_atime.tv_nsec = ip->i_ffs1_atimensec; 1299 vap->va_mtime.tv_sec = ip->i_ffs1_mtime; 1300 vap->va_mtime.tv_nsec = ip->i_ffs1_mtimensec; 1301 vap->va_ctime.tv_sec = ip->i_ffs1_ctime; 1302 vap->va_ctime.tv_nsec = ip->i_ffs1_ctimensec; 1303 vap->va_flags = ip->i_flags; 1304 vap->va_gen = ip->i_gen; 1305 /* this doesn't belong here */ 1306 if (vp->v_type == VBLK) 1307 vap->va_blocksize = BLKDEV_IOSIZE; 1308 else if (vp->v_type == VCHR) 1309 vap->va_blocksize = MAXBSIZE; 1310 else 1311 vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize; 1312 vap->va_bytes = lfs_fsbtob(fs, (u_quad_t)ip->i_lfs_effnblks); 1313 vap->va_type = vp->v_type; 1314 vap->va_filerev = ip->i_modrev; 1315 fstrans_done(vp->v_mount); 1316 return (0); 1317 } 1318 1319 /* 1320 * Check to make sure the inode blocks won't choke the buffer 1321 * cache, then call ulfs_setattr as usual. 1322 */ 1323 int 1324 lfs_setattr(void *v) 1325 { 1326 struct vop_setattr_args /* { 1327 struct vnode *a_vp; 1328 struct vattr *a_vap; 1329 kauth_cred_t a_cred; 1330 } */ *ap = v; 1331 struct vnode *vp = ap->a_vp; 1332 1333 lfs_check(vp, LFS_UNUSED_LBN, 0); 1334 return ulfs_setattr(v); 1335 } 1336 1337 /* 1338 * Release the block we hold on lfs_newseg wrapping. Called on file close, 1339 * or explicitly from LFCNWRAPGO. Called with the interlock held. 1340 */ 1341 static int 1342 lfs_wrapgo(struct lfs *fs, struct inode *ip, int waitfor) 1343 { 1344 if (fs->lfs_stoplwp != curlwp) 1345 return EBUSY; 1346 1347 fs->lfs_stoplwp = NULL; 1348 cv_signal(&fs->lfs_stopcv); 1349 1350 KASSERT(fs->lfs_nowrap > 0); 1351 if (fs->lfs_nowrap <= 0) { 1352 return 0; 1353 } 1354 1355 if (--fs->lfs_nowrap == 0) { 1356 log(LOG_NOTICE, "%s: re-enabled log wrap\n", fs->lfs_fsmnt); 1357 wakeup(&fs->lfs_wrappass); 1358 lfs_wakeup_cleaner(fs); 1359 } 1360 if (waitfor) { 1361 mtsleep(&fs->lfs_nextseg, PCATCH | PUSER, "segment", 1362 0, &lfs_lock); 1363 } 1364 1365 return 0; 1366 } 1367 1368 /* 1369 * Close called. 1370 * 1371 * Update the times on the inode. 1372 */ 1373 /* ARGSUSED */ 1374 int 1375 lfs_close(void *v) 1376 { 1377 struct vop_close_args /* { 1378 struct vnode *a_vp; 1379 int a_fflag; 1380 kauth_cred_t a_cred; 1381 } */ *ap = v; 1382 struct vnode *vp = ap->a_vp; 1383 struct inode *ip = VTOI(vp); 1384 struct lfs *fs = ip->i_lfs; 1385 1386 if ((ip->i_number == ULFS_ROOTINO || ip->i_number == LFS_IFILE_INUM) && 1387 fs->lfs_stoplwp == curlwp) { 1388 mutex_enter(&lfs_lock); 1389 log(LOG_NOTICE, "lfs_close: releasing log wrap control\n"); 1390 lfs_wrapgo(fs, ip, 0); 1391 mutex_exit(&lfs_lock); 1392 } 1393 1394 if (vp == ip->i_lfs->lfs_ivnode && 1395 vp->v_mount->mnt_iflag & IMNT_UNMOUNT) 1396 return 0; 1397 1398 fstrans_start(vp->v_mount, FSTRANS_SHARED); 1399 if (vp->v_usecount > 1 && vp != ip->i_lfs->lfs_ivnode) { 1400 LFS_ITIMES(ip, NULL, NULL, NULL); 1401 } 1402 fstrans_done(vp->v_mount); 1403 return (0); 1404 } 1405 1406 /* 1407 * Close wrapper for special devices. 1408 * 1409 * Update the times on the inode then do device close. 1410 */ 1411 int 1412 lfsspec_close(void *v) 1413 { 1414 struct vop_close_args /* { 1415 struct vnode *a_vp; 1416 int a_fflag; 1417 kauth_cred_t a_cred; 1418 } */ *ap = v; 1419 struct vnode *vp; 1420 struct inode *ip; 1421 1422 vp = ap->a_vp; 1423 ip = VTOI(vp); 1424 if (vp->v_usecount > 1) { 1425 LFS_ITIMES(ip, NULL, NULL, NULL); 1426 } 1427 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap)); 1428 } 1429 1430 /* 1431 * Close wrapper for fifo's. 1432 * 1433 * Update the times on the inode then do device close. 1434 */ 1435 int 1436 lfsfifo_close(void *v) 1437 { 1438 struct vop_close_args /* { 1439 struct vnode *a_vp; 1440 int a_fflag; 1441 kauth_cred_ a_cred; 1442 } */ *ap = v; 1443 struct vnode *vp; 1444 struct inode *ip; 1445 1446 vp = ap->a_vp; 1447 ip = VTOI(vp); 1448 if (ap->a_vp->v_usecount > 1) { 1449 LFS_ITIMES(ip, NULL, NULL, NULL); 1450 } 1451 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap)); 1452 } 1453 1454 /* 1455 * Reclaim an inode so that it can be used for other purposes. 1456 */ 1457 1458 int 1459 lfs_reclaim(void *v) 1460 { 1461 struct vop_reclaim_args /* { 1462 struct vnode *a_vp; 1463 } */ *ap = v; 1464 struct vnode *vp = ap->a_vp; 1465 struct inode *ip = VTOI(vp); 1466 struct lfs *fs = ip->i_lfs; 1467 int error; 1468 1469 /* 1470 * The inode must be freed and updated before being removed 1471 * from its hash chain. Other threads trying to gain a hold 1472 * or lock on the inode will be stalled. 1473 */ 1474 if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) 1475 lfs_vfree(vp, ip->i_number, ip->i_omode); 1476 1477 mutex_enter(&lfs_lock); 1478 LFS_CLR_UINO(ip, IN_ALLMOD); 1479 mutex_exit(&lfs_lock); 1480 if ((error = ulfs_reclaim(vp))) 1481 return (error); 1482 1483 /* 1484 * Take us off the paging and/or dirop queues if we were on them. 1485 * We shouldn't be on them. 1486 */ 1487 mutex_enter(&lfs_lock); 1488 if (ip->i_flags & IN_PAGING) { 1489 log(LOG_WARNING, "%s: reclaimed vnode is IN_PAGING\n", 1490 fs->lfs_fsmnt); 1491 ip->i_flags &= ~IN_PAGING; 1492 TAILQ_REMOVE(&fs->lfs_pchainhd, ip, i_lfs_pchain); 1493 } 1494 if (vp->v_uflag & VU_DIROP) { 1495 panic("reclaimed vnode is VU_DIROP"); 1496 vp->v_uflag &= ~VU_DIROP; 1497 TAILQ_REMOVE(&fs->lfs_dchainhd, ip, i_lfs_dchain); 1498 } 1499 mutex_exit(&lfs_lock); 1500 1501 pool_put(&lfs_dinode_pool, ip->i_din.ffs1_din); 1502 lfs_deregister_all(vp); 1503 pool_put(&lfs_inoext_pool, ip->inode_ext.lfs); 1504 ip->inode_ext.lfs = NULL; 1505 genfs_node_destroy(vp); 1506 pool_put(&lfs_inode_pool, vp->v_data); 1507 vp->v_data = NULL; 1508 return (0); 1509 } 1510 1511 /* 1512 * Read a block from a storage device. 1513 * 1514 * Calculate the logical to physical mapping if not done already, 1515 * then call the device strategy routine. 1516 * 1517 * In order to avoid reading blocks that are in the process of being 1518 * written by the cleaner---and hence are not mutexed by the normal 1519 * buffer cache / page cache mechanisms---check for collisions before 1520 * reading. 1521 * 1522 * We inline ulfs_strategy to make sure that the VOP_BMAP occurs *before* 1523 * the active cleaner test. 1524 * 1525 * XXX This code assumes that lfs_markv makes synchronous checkpoints. 1526 */ 1527 int 1528 lfs_strategy(void *v) 1529 { 1530 struct vop_strategy_args /* { 1531 struct vnode *a_vp; 1532 struct buf *a_bp; 1533 } */ *ap = v; 1534 struct buf *bp; 1535 struct lfs *fs; 1536 struct vnode *vp; 1537 struct inode *ip; 1538 daddr_t tbn; 1539 #define MAXLOOP 25 1540 int i, sn, error, slept, loopcount; 1541 1542 bp = ap->a_bp; 1543 vp = ap->a_vp; 1544 ip = VTOI(vp); 1545 fs = ip->i_lfs; 1546 1547 /* lfs uses its strategy routine only for read */ 1548 KASSERT(bp->b_flags & B_READ); 1549 1550 if (vp->v_type == VBLK || vp->v_type == VCHR) 1551 panic("lfs_strategy: spec"); 1552 KASSERT(bp->b_bcount != 0); 1553 if (bp->b_blkno == bp->b_lblkno) { 1554 error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, 1555 NULL); 1556 if (error) { 1557 bp->b_error = error; 1558 bp->b_resid = bp->b_bcount; 1559 biodone(bp); 1560 return (error); 1561 } 1562 if ((long)bp->b_blkno == -1) /* no valid data */ 1563 clrbuf(bp); 1564 } 1565 if ((long)bp->b_blkno < 0) { /* block is not on disk */ 1566 bp->b_resid = bp->b_bcount; 1567 biodone(bp); 1568 return (0); 1569 } 1570 1571 slept = 1; 1572 loopcount = 0; 1573 mutex_enter(&lfs_lock); 1574 while (slept && fs->lfs_seglock) { 1575 mutex_exit(&lfs_lock); 1576 /* 1577 * Look through list of intervals. 1578 * There will only be intervals to look through 1579 * if the cleaner holds the seglock. 1580 * Since the cleaner is synchronous, we can trust 1581 * the list of intervals to be current. 1582 */ 1583 tbn = LFS_DBTOFSB(fs, bp->b_blkno); 1584 sn = lfs_dtosn(fs, tbn); 1585 slept = 0; 1586 for (i = 0; i < fs->lfs_cleanind; i++) { 1587 if (sn == lfs_dtosn(fs, fs->lfs_cleanint[i]) && 1588 tbn >= fs->lfs_cleanint[i]) { 1589 DLOG((DLOG_CLEAN, 1590 "lfs_strategy: ino %d lbn %" PRId64 1591 " ind %d sn %d fsb %" PRIx32 1592 " given sn %d fsb %" PRIx64 "\n", 1593 ip->i_number, bp->b_lblkno, i, 1594 lfs_dtosn(fs, fs->lfs_cleanint[i]), 1595 fs->lfs_cleanint[i], sn, tbn)); 1596 DLOG((DLOG_CLEAN, 1597 "lfs_strategy: sleeping on ino %d lbn %" 1598 PRId64 "\n", ip->i_number, bp->b_lblkno)); 1599 mutex_enter(&lfs_lock); 1600 if (LFS_SEGLOCK_HELD(fs) && fs->lfs_iocount) { 1601 /* 1602 * Cleaner can't wait for itself. 1603 * Instead, wait for the blocks 1604 * to be written to disk. 1605 * XXX we need pribio in the test 1606 * XXX here. 1607 */ 1608 mtsleep(&fs->lfs_iocount, 1609 (PRIBIO + 1) | PNORELOCK, 1610 "clean2", hz/10 + 1, 1611 &lfs_lock); 1612 slept = 1; 1613 ++loopcount; 1614 break; 1615 } else if (fs->lfs_seglock) { 1616 mtsleep(&fs->lfs_seglock, 1617 (PRIBIO + 1) | PNORELOCK, 1618 "clean1", 0, 1619 &lfs_lock); 1620 slept = 1; 1621 break; 1622 } 1623 mutex_exit(&lfs_lock); 1624 } 1625 } 1626 mutex_enter(&lfs_lock); 1627 if (loopcount > MAXLOOP) { 1628 printf("lfs_strategy: breaking out of clean2 loop\n"); 1629 break; 1630 } 1631 } 1632 mutex_exit(&lfs_lock); 1633 1634 vp = ip->i_devvp; 1635 return VOP_STRATEGY(vp, bp); 1636 } 1637 1638 /* 1639 * Inline lfs_segwrite/lfs_writevnodes, but just for dirops. 1640 * Technically this is a checkpoint (the on-disk state is valid) 1641 * even though we are leaving out all the file data. 1642 */ 1643 int 1644 lfs_flush_dirops(struct lfs *fs) 1645 { 1646 struct inode *ip, *nip; 1647 struct vnode *vp; 1648 extern int lfs_dostats; 1649 struct segment *sp; 1650 int flags = 0; 1651 int error = 0; 1652 1653 ASSERT_MAYBE_SEGLOCK(fs); 1654 KASSERT(fs->lfs_nadirop == 0); 1655 1656 if (fs->lfs_ronly) 1657 return EROFS; 1658 1659 mutex_enter(&lfs_lock); 1660 if (TAILQ_FIRST(&fs->lfs_dchainhd) == NULL) { 1661 mutex_exit(&lfs_lock); 1662 return 0; 1663 } else 1664 mutex_exit(&lfs_lock); 1665 1666 if (lfs_dostats) 1667 ++lfs_stats.flush_invoked; 1668 1669 lfs_imtime(fs); 1670 lfs_seglock(fs, flags); 1671 sp = fs->lfs_sp; 1672 1673 /* 1674 * lfs_writevnodes, optimized to get dirops out of the way. 1675 * Only write dirops, and don't flush files' pages, only 1676 * blocks from the directories. 1677 * 1678 * We don't need to vref these files because they are 1679 * dirops and so hold an extra reference until the 1680 * segunlock clears them of that status. 1681 * 1682 * We don't need to check for IN_ADIROP because we know that 1683 * no dirops are active. 1684 * 1685 */ 1686 mutex_enter(&lfs_lock); 1687 for (ip = TAILQ_FIRST(&fs->lfs_dchainhd); ip != NULL; ip = nip) { 1688 nip = TAILQ_NEXT(ip, i_lfs_dchain); 1689 mutex_exit(&lfs_lock); 1690 vp = ITOV(ip); 1691 mutex_enter(vp->v_interlock); 1692 1693 KASSERT((ip->i_flag & IN_ADIROP) == 0); 1694 KASSERT(vp->v_uflag & VU_DIROP); 1695 KASSERT(vdead_check(vp, VDEAD_NOWAIT) == 0); 1696 1697 /* 1698 * All writes to directories come from dirops; all 1699 * writes to files' direct blocks go through the page 1700 * cache, which we're not touching. Reads to files 1701 * and/or directories will not be affected by writing 1702 * directory blocks inodes and file inodes. So we don't 1703 * really need to lock. 1704 */ 1705 if (vdead_check(vp, VDEAD_NOWAIT) != 0) { 1706 mutex_exit(vp->v_interlock); 1707 mutex_enter(&lfs_lock); 1708 continue; 1709 } 1710 mutex_exit(vp->v_interlock); 1711 /* XXX see below 1712 * waslocked = VOP_ISLOCKED(vp); 1713 */ 1714 if (vp->v_type != VREG && 1715 ((ip->i_flag & IN_ALLMOD) || !VPISEMPTY(vp))) { 1716 error = lfs_writefile(fs, sp, vp); 1717 if (!VPISEMPTY(vp) && !WRITEINPROG(vp) && 1718 !(ip->i_flag & IN_ALLMOD)) { 1719 mutex_enter(&lfs_lock); 1720 LFS_SET_UINO(ip, IN_MODIFIED); 1721 mutex_exit(&lfs_lock); 1722 } 1723 if (error && (sp->seg_flags & SEGM_SINGLE)) { 1724 mutex_enter(&lfs_lock); 1725 error = EAGAIN; 1726 break; 1727 } 1728 } 1729 KDASSERT(ip->i_number != LFS_IFILE_INUM); 1730 error = lfs_writeinode(fs, sp, ip); 1731 mutex_enter(&lfs_lock); 1732 if (error && (sp->seg_flags & SEGM_SINGLE)) { 1733 error = EAGAIN; 1734 break; 1735 } 1736 1737 /* 1738 * We might need to update these inodes again, 1739 * for example, if they have data blocks to write. 1740 * Make sure that after this flush, they are still 1741 * marked IN_MODIFIED so that we don't forget to 1742 * write them. 1743 */ 1744 /* XXX only for non-directories? --KS */ 1745 LFS_SET_UINO(ip, IN_MODIFIED); 1746 } 1747 mutex_exit(&lfs_lock); 1748 /* We've written all the dirops there are */ 1749 ((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT); 1750 lfs_finalize_fs_seguse(fs); 1751 (void) lfs_writeseg(fs, sp); 1752 lfs_segunlock(fs); 1753 1754 return error; 1755 } 1756 1757 /* 1758 * Flush all vnodes for which the pagedaemon has requested pageouts. 1759 * Skip over any files that are marked VU_DIROP (since lfs_flush_dirop() 1760 * has just run, this would be an error). If we have to skip a vnode 1761 * for any reason, just skip it; if we have to wait for the cleaner, 1762 * abort. The writer daemon will call us again later. 1763 */ 1764 int 1765 lfs_flush_pchain(struct lfs *fs) 1766 { 1767 struct inode *ip, *nip; 1768 struct vnode *vp; 1769 extern int lfs_dostats; 1770 struct segment *sp; 1771 int error, error2; 1772 1773 ASSERT_NO_SEGLOCK(fs); 1774 1775 if (fs->lfs_ronly) 1776 return EROFS; 1777 1778 mutex_enter(&lfs_lock); 1779 if (TAILQ_FIRST(&fs->lfs_pchainhd) == NULL) { 1780 mutex_exit(&lfs_lock); 1781 return 0; 1782 } else 1783 mutex_exit(&lfs_lock); 1784 1785 /* Get dirops out of the way */ 1786 if ((error = lfs_flush_dirops(fs)) != 0) 1787 return error; 1788 1789 if (lfs_dostats) 1790 ++lfs_stats.flush_invoked; 1791 1792 /* 1793 * Inline lfs_segwrite/lfs_writevnodes, but just for pageouts. 1794 */ 1795 lfs_imtime(fs); 1796 lfs_seglock(fs, 0); 1797 sp = fs->lfs_sp; 1798 1799 /* 1800 * lfs_writevnodes, optimized to clear pageout requests. 1801 * Only write non-dirop files that are in the pageout queue. 1802 * We're very conservative about what we write; we want to be 1803 * fast and async. 1804 */ 1805 mutex_enter(&lfs_lock); 1806 top: 1807 for (ip = TAILQ_FIRST(&fs->lfs_pchainhd); ip != NULL; ip = nip) { 1808 nip = TAILQ_NEXT(ip, i_lfs_pchain); 1809 vp = ITOV(ip); 1810 1811 if (!(ip->i_flags & IN_PAGING)) 1812 goto top; 1813 1814 mutex_enter(vp->v_interlock); 1815 if (vdead_check(vp, VDEAD_NOWAIT) != 0 || 1816 (vp->v_uflag & VU_DIROP) != 0) { 1817 mutex_exit(vp->v_interlock); 1818 continue; 1819 } 1820 if (vp->v_type != VREG) { 1821 mutex_exit(vp->v_interlock); 1822 continue; 1823 } 1824 if (lfs_vref(vp)) 1825 continue; 1826 mutex_exit(&lfs_lock); 1827 1828 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_RETRY) != 0) { 1829 lfs_vunref(vp); 1830 mutex_enter(&lfs_lock); 1831 continue; 1832 } 1833 1834 error = lfs_writefile(fs, sp, vp); 1835 if (!VPISEMPTY(vp) && !WRITEINPROG(vp) && 1836 !(ip->i_flag & IN_ALLMOD)) { 1837 mutex_enter(&lfs_lock); 1838 LFS_SET_UINO(ip, IN_MODIFIED); 1839 mutex_exit(&lfs_lock); 1840 } 1841 KDASSERT(ip->i_number != LFS_IFILE_INUM); 1842 error2 = lfs_writeinode(fs, sp, ip); 1843 1844 VOP_UNLOCK(vp); 1845 lfs_vunref(vp); 1846 1847 if (error == EAGAIN || error2 == EAGAIN) { 1848 lfs_writeseg(fs, sp); 1849 mutex_enter(&lfs_lock); 1850 break; 1851 } 1852 mutex_enter(&lfs_lock); 1853 } 1854 mutex_exit(&lfs_lock); 1855 (void) lfs_writeseg(fs, sp); 1856 lfs_segunlock(fs); 1857 1858 return 0; 1859 } 1860 1861 /* 1862 * Provide a fcntl interface to sys_lfs_{segwait,bmapv,markv}. 1863 */ 1864 int 1865 lfs_fcntl(void *v) 1866 { 1867 struct vop_fcntl_args /* { 1868 struct vnode *a_vp; 1869 u_int a_command; 1870 void * a_data; 1871 int a_fflag; 1872 kauth_cred_t a_cred; 1873 } */ *ap = v; 1874 struct timeval tv; 1875 struct timeval *tvp; 1876 BLOCK_INFO *blkiov; 1877 CLEANERINFO *cip; 1878 SEGUSE *sup; 1879 int blkcnt, error; 1880 size_t fh_size; 1881 struct lfs_fcntl_markv blkvp; 1882 struct lwp *l; 1883 fsid_t *fsidp; 1884 struct lfs *fs; 1885 struct buf *bp; 1886 fhandle_t *fhp; 1887 daddr_t off; 1888 int oclean; 1889 1890 /* Only respect LFS fcntls on fs root or Ifile */ 1891 if (VTOI(ap->a_vp)->i_number != ULFS_ROOTINO && 1892 VTOI(ap->a_vp)->i_number != LFS_IFILE_INUM) { 1893 return ulfs_fcntl(v); 1894 } 1895 1896 /* Avoid locking a draining lock */ 1897 if (ap->a_vp->v_mount->mnt_iflag & IMNT_UNMOUNT) { 1898 return ESHUTDOWN; 1899 } 1900 1901 /* LFS control and monitoring fcntls are available only to root */ 1902 l = curlwp; 1903 if (((ap->a_command & 0xff00) >> 8) == 'L' && 1904 (error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS, 1905 KAUTH_REQ_SYSTEM_LFS_FCNTL, NULL, NULL, NULL)) != 0) 1906 return (error); 1907 1908 fs = VTOI(ap->a_vp)->i_lfs; 1909 fsidp = &ap->a_vp->v_mount->mnt_stat.f_fsidx; 1910 1911 error = 0; 1912 switch ((int)ap->a_command) { 1913 case LFCNSEGWAITALL_COMPAT_50: 1914 case LFCNSEGWAITALL_COMPAT: 1915 fsidp = NULL; 1916 /* FALLSTHROUGH */ 1917 case LFCNSEGWAIT_COMPAT_50: 1918 case LFCNSEGWAIT_COMPAT: 1919 { 1920 struct timeval50 *tvp50 1921 = (struct timeval50 *)ap->a_data; 1922 timeval50_to_timeval(tvp50, &tv); 1923 tvp = &tv; 1924 } 1925 goto segwait_common; 1926 case LFCNSEGWAITALL: 1927 fsidp = NULL; 1928 /* FALLSTHROUGH */ 1929 case LFCNSEGWAIT: 1930 tvp = (struct timeval *)ap->a_data; 1931 segwait_common: 1932 mutex_enter(&lfs_lock); 1933 ++fs->lfs_sleepers; 1934 mutex_exit(&lfs_lock); 1935 1936 error = lfs_segwait(fsidp, tvp); 1937 1938 mutex_enter(&lfs_lock); 1939 if (--fs->lfs_sleepers == 0) 1940 wakeup(&fs->lfs_sleepers); 1941 mutex_exit(&lfs_lock); 1942 return error; 1943 1944 case LFCNBMAPV: 1945 case LFCNMARKV: 1946 blkvp = *(struct lfs_fcntl_markv *)ap->a_data; 1947 1948 blkcnt = blkvp.blkcnt; 1949 if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT) 1950 return (EINVAL); 1951 blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV); 1952 if ((error = copyin(blkvp.blkiov, blkiov, 1953 blkcnt * sizeof(BLOCK_INFO))) != 0) { 1954 lfs_free(fs, blkiov, LFS_NB_BLKIOV); 1955 return error; 1956 } 1957 1958 mutex_enter(&lfs_lock); 1959 ++fs->lfs_sleepers; 1960 mutex_exit(&lfs_lock); 1961 if (ap->a_command == LFCNBMAPV) 1962 error = lfs_bmapv(l->l_proc, fsidp, blkiov, blkcnt); 1963 else /* LFCNMARKV */ 1964 error = lfs_markv(l->l_proc, fsidp, blkiov, blkcnt); 1965 if (error == 0) 1966 error = copyout(blkiov, blkvp.blkiov, 1967 blkcnt * sizeof(BLOCK_INFO)); 1968 mutex_enter(&lfs_lock); 1969 if (--fs->lfs_sleepers == 0) 1970 wakeup(&fs->lfs_sleepers); 1971 mutex_exit(&lfs_lock); 1972 lfs_free(fs, blkiov, LFS_NB_BLKIOV); 1973 return error; 1974 1975 case LFCNRECLAIM: 1976 /* 1977 * Flush dirops and write Ifile, allowing empty segments 1978 * to be immediately reclaimed. 1979 */ 1980 lfs_writer_enter(fs, "pndirop"); 1981 off = fs->lfs_offset; 1982 lfs_seglock(fs, SEGM_FORCE_CKP | SEGM_CKP); 1983 lfs_flush_dirops(fs); 1984 LFS_CLEANERINFO(cip, fs, bp); 1985 oclean = cip->clean; 1986 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1); 1987 lfs_segwrite(ap->a_vp->v_mount, SEGM_FORCE_CKP); 1988 fs->lfs_sp->seg_flags |= SEGM_PROT; 1989 lfs_segunlock(fs); 1990 lfs_writer_leave(fs); 1991 1992 #ifdef DEBUG 1993 LFS_CLEANERINFO(cip, fs, bp); 1994 DLOG((DLOG_CLEAN, "lfs_fcntl: reclaim wrote %" PRId64 1995 " blocks, cleaned %" PRId32 " segments (activesb %d)\n", 1996 fs->lfs_offset - off, cip->clean - oclean, 1997 fs->lfs_activesb)); 1998 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0); 1999 #else 2000 __USE(oclean); 2001 __USE(off); 2002 #endif 2003 2004 return 0; 2005 2006 case LFCNIFILEFH_COMPAT: 2007 /* Return the filehandle of the Ifile */ 2008 if ((error = kauth_authorize_system(l->l_cred, 2009 KAUTH_SYSTEM_FILEHANDLE, 0, NULL, NULL, NULL)) != 0) 2010 return (error); 2011 fhp = (struct fhandle *)ap->a_data; 2012 fhp->fh_fsid = *fsidp; 2013 fh_size = 16; /* former VFS_MAXFIDSIZ */ 2014 return lfs_vptofh(fs->lfs_ivnode, &(fhp->fh_fid), &fh_size); 2015 2016 case LFCNIFILEFH_COMPAT2: 2017 case LFCNIFILEFH: 2018 /* Return the filehandle of the Ifile */ 2019 fhp = (struct fhandle *)ap->a_data; 2020 fhp->fh_fsid = *fsidp; 2021 fh_size = sizeof(struct lfs_fhandle) - 2022 offsetof(fhandle_t, fh_fid); 2023 return lfs_vptofh(fs->lfs_ivnode, &(fhp->fh_fid), &fh_size); 2024 2025 case LFCNREWIND: 2026 /* Move lfs_offset to the lowest-numbered segment */ 2027 return lfs_rewind(fs, *(int *)ap->a_data); 2028 2029 case LFCNINVAL: 2030 /* Mark a segment SEGUSE_INVAL */ 2031 LFS_SEGENTRY(sup, fs, *(int *)ap->a_data, bp); 2032 if (sup->su_nbytes > 0) { 2033 brelse(bp, 0); 2034 lfs_unset_inval_all(fs); 2035 return EBUSY; 2036 } 2037 sup->su_flags |= SEGUSE_INVAL; 2038 VOP_BWRITE(bp->b_vp, bp); 2039 return 0; 2040 2041 case LFCNRESIZE: 2042 /* Resize the filesystem */ 2043 return lfs_resize_fs(fs, *(int *)ap->a_data); 2044 2045 case LFCNWRAPSTOP: 2046 case LFCNWRAPSTOP_COMPAT: 2047 /* 2048 * Hold lfs_newseg at segment 0; if requested, sleep until 2049 * the filesystem wraps around. To support external agents 2050 * (dump, fsck-based regression test) that need to look at 2051 * a snapshot of the filesystem, without necessarily 2052 * requiring that all fs activity stops. 2053 */ 2054 if (fs->lfs_stoplwp == curlwp) 2055 return EALREADY; 2056 2057 mutex_enter(&lfs_lock); 2058 while (fs->lfs_stoplwp != NULL) 2059 cv_wait(&fs->lfs_stopcv, &lfs_lock); 2060 fs->lfs_stoplwp = curlwp; 2061 if (fs->lfs_nowrap == 0) 2062 log(LOG_NOTICE, "%s: disabled log wrap\n", fs->lfs_fsmnt); 2063 ++fs->lfs_nowrap; 2064 if (*(int *)ap->a_data == 1 2065 || ap->a_command == LFCNWRAPSTOP_COMPAT) { 2066 log(LOG_NOTICE, "LFCNSTOPWRAP waiting for log wrap\n"); 2067 error = mtsleep(&fs->lfs_nowrap, PCATCH | PUSER, 2068 "segwrap", 0, &lfs_lock); 2069 log(LOG_NOTICE, "LFCNSTOPWRAP done waiting\n"); 2070 if (error) { 2071 lfs_wrapgo(fs, VTOI(ap->a_vp), 0); 2072 } 2073 } 2074 mutex_exit(&lfs_lock); 2075 return 0; 2076 2077 case LFCNWRAPGO: 2078 case LFCNWRAPGO_COMPAT: 2079 /* 2080 * Having done its work, the agent wakes up the writer. 2081 * If the argument is 1, it sleeps until a new segment 2082 * is selected. 2083 */ 2084 mutex_enter(&lfs_lock); 2085 error = lfs_wrapgo(fs, VTOI(ap->a_vp), 2086 ap->a_command == LFCNWRAPGO_COMPAT ? 1 : 2087 *((int *)ap->a_data)); 2088 mutex_exit(&lfs_lock); 2089 return error; 2090 2091 case LFCNWRAPPASS: 2092 if ((VTOI(ap->a_vp)->i_lfs_iflags & LFSI_WRAPWAIT)) 2093 return EALREADY; 2094 mutex_enter(&lfs_lock); 2095 if (fs->lfs_stoplwp != curlwp) { 2096 mutex_exit(&lfs_lock); 2097 return EALREADY; 2098 } 2099 if (fs->lfs_nowrap == 0) { 2100 mutex_exit(&lfs_lock); 2101 return EBUSY; 2102 } 2103 fs->lfs_wrappass = 1; 2104 wakeup(&fs->lfs_wrappass); 2105 /* Wait for the log to wrap, if asked */ 2106 if (*(int *)ap->a_data) { 2107 mutex_enter(ap->a_vp->v_interlock); 2108 if (lfs_vref(ap->a_vp) != 0) 2109 panic("LFCNWRAPPASS: lfs_vref failed"); 2110 VTOI(ap->a_vp)->i_lfs_iflags |= LFSI_WRAPWAIT; 2111 log(LOG_NOTICE, "LFCNPASS waiting for log wrap\n"); 2112 error = mtsleep(&fs->lfs_nowrap, PCATCH | PUSER, 2113 "segwrap", 0, &lfs_lock); 2114 log(LOG_NOTICE, "LFCNPASS done waiting\n"); 2115 VTOI(ap->a_vp)->i_lfs_iflags &= ~LFSI_WRAPWAIT; 2116 lfs_vunref(ap->a_vp); 2117 } 2118 mutex_exit(&lfs_lock); 2119 return error; 2120 2121 case LFCNWRAPSTATUS: 2122 mutex_enter(&lfs_lock); 2123 *(int *)ap->a_data = fs->lfs_wrapstatus; 2124 mutex_exit(&lfs_lock); 2125 return 0; 2126 2127 default: 2128 return ulfs_fcntl(v); 2129 } 2130 return 0; 2131 } 2132 2133 /* 2134 * Return the last logical file offset that should be written for this file 2135 * if we're doing a write that ends at "size". If writing, we need to know 2136 * about sizes on disk, i.e. fragments if there are any; if reading, we need 2137 * to know about entire blocks. 2138 */ 2139 void 2140 lfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags) 2141 { 2142 struct inode *ip = VTOI(vp); 2143 struct lfs *fs = ip->i_lfs; 2144 daddr_t olbn, nlbn; 2145 2146 olbn = lfs_lblkno(fs, ip->i_size); 2147 nlbn = lfs_lblkno(fs, size); 2148 if (!(flags & GOP_SIZE_MEM) && nlbn < ULFS_NDADDR && olbn <= nlbn) { 2149 *eobp = lfs_fragroundup(fs, size); 2150 } else { 2151 *eobp = lfs_blkroundup(fs, size); 2152 } 2153 } 2154 2155 #ifdef DEBUG 2156 void lfs_dump_vop(void *); 2157 2158 void 2159 lfs_dump_vop(void *v) 2160 { 2161 struct vop_putpages_args /* { 2162 struct vnode *a_vp; 2163 voff_t a_offlo; 2164 voff_t a_offhi; 2165 int a_flags; 2166 } */ *ap = v; 2167 2168 #ifdef DDB 2169 vfs_vnode_print(ap->a_vp, 0, printf); 2170 #endif 2171 lfs_dump_dinode(VTOI(ap->a_vp)->i_din.ffs1_din); 2172 } 2173 #endif 2174 2175 int 2176 lfs_mmap(void *v) 2177 { 2178 struct vop_mmap_args /* { 2179 const struct vnodeop_desc *a_desc; 2180 struct vnode *a_vp; 2181 vm_prot_t a_prot; 2182 kauth_cred_t a_cred; 2183 } */ *ap = v; 2184 2185 if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM) 2186 return EOPNOTSUPP; 2187 return ulfs_mmap(v); 2188 } 2189 2190 static int 2191 lfs_openextattr(void *v) 2192 { 2193 struct vop_openextattr_args /* { 2194 struct vnode *a_vp; 2195 kauth_cred_t a_cred; 2196 struct proc *a_p; 2197 } */ *ap = v; 2198 struct inode *ip = VTOI(ap->a_vp); 2199 struct ulfsmount *ump = ip->i_ump; 2200 //struct lfs *fs = ip->i_lfs; 2201 2202 /* Not supported for ULFS1 file systems. */ 2203 if (ump->um_fstype == ULFS1) 2204 return (EOPNOTSUPP); 2205 2206 /* XXX Not implemented for ULFS2 file systems. */ 2207 return (EOPNOTSUPP); 2208 } 2209 2210 static int 2211 lfs_closeextattr(void *v) 2212 { 2213 struct vop_closeextattr_args /* { 2214 struct vnode *a_vp; 2215 int a_commit; 2216 kauth_cred_t a_cred; 2217 struct proc *a_p; 2218 } */ *ap = v; 2219 struct inode *ip = VTOI(ap->a_vp); 2220 struct ulfsmount *ump = ip->i_ump; 2221 //struct lfs *fs = ip->i_lfs; 2222 2223 /* Not supported for ULFS1 file systems. */ 2224 if (ump->um_fstype == ULFS1) 2225 return (EOPNOTSUPP); 2226 2227 /* XXX Not implemented for ULFS2 file systems. */ 2228 return (EOPNOTSUPP); 2229 } 2230 2231 static int 2232 lfs_getextattr(void *v) 2233 { 2234 struct vop_getextattr_args /* { 2235 struct vnode *a_vp; 2236 int a_attrnamespace; 2237 const char *a_name; 2238 struct uio *a_uio; 2239 size_t *a_size; 2240 kauth_cred_t a_cred; 2241 struct proc *a_p; 2242 } */ *ap = v; 2243 struct vnode *vp = ap->a_vp; 2244 struct inode *ip = VTOI(vp); 2245 struct ulfsmount *ump = ip->i_ump; 2246 //struct lfs *fs = ip->i_lfs; 2247 int error; 2248 2249 if (ump->um_fstype == ULFS1) { 2250 #ifdef LFS_EXTATTR 2251 fstrans_start(vp->v_mount, FSTRANS_SHARED); 2252 error = ulfs_getextattr(ap); 2253 fstrans_done(vp->v_mount); 2254 #else 2255 error = EOPNOTSUPP; 2256 #endif 2257 return error; 2258 } 2259 2260 /* XXX Not implemented for ULFS2 file systems. */ 2261 return (EOPNOTSUPP); 2262 } 2263 2264 static int 2265 lfs_setextattr(void *v) 2266 { 2267 struct vop_setextattr_args /* { 2268 struct vnode *a_vp; 2269 int a_attrnamespace; 2270 const char *a_name; 2271 struct uio *a_uio; 2272 kauth_cred_t a_cred; 2273 struct proc *a_p; 2274 } */ *ap = v; 2275 struct vnode *vp = ap->a_vp; 2276 struct inode *ip = VTOI(vp); 2277 struct ulfsmount *ump = ip->i_ump; 2278 //struct lfs *fs = ip->i_lfs; 2279 int error; 2280 2281 if (ump->um_fstype == ULFS1) { 2282 #ifdef LFS_EXTATTR 2283 fstrans_start(vp->v_mount, FSTRANS_SHARED); 2284 error = ulfs_setextattr(ap); 2285 fstrans_done(vp->v_mount); 2286 #else 2287 error = EOPNOTSUPP; 2288 #endif 2289 return error; 2290 } 2291 2292 /* XXX Not implemented for ULFS2 file systems. */ 2293 return (EOPNOTSUPP); 2294 } 2295 2296 static int 2297 lfs_listextattr(void *v) 2298 { 2299 struct vop_listextattr_args /* { 2300 struct vnode *a_vp; 2301 int a_attrnamespace; 2302 struct uio *a_uio; 2303 size_t *a_size; 2304 kauth_cred_t a_cred; 2305 struct proc *a_p; 2306 } */ *ap = v; 2307 struct vnode *vp = ap->a_vp; 2308 struct inode *ip = VTOI(vp); 2309 struct ulfsmount *ump = ip->i_ump; 2310 //struct lfs *fs = ip->i_lfs; 2311 int error; 2312 2313 if (ump->um_fstype == ULFS1) { 2314 #ifdef LFS_EXTATTR 2315 fstrans_start(vp->v_mount, FSTRANS_SHARED); 2316 error = ulfs_listextattr(ap); 2317 fstrans_done(vp->v_mount); 2318 #else 2319 error = EOPNOTSUPP; 2320 #endif 2321 return error; 2322 } 2323 2324 /* XXX Not implemented for ULFS2 file systems. */ 2325 return (EOPNOTSUPP); 2326 } 2327 2328 static int 2329 lfs_deleteextattr(void *v) 2330 { 2331 struct vop_deleteextattr_args /* { 2332 struct vnode *a_vp; 2333 int a_attrnamespace; 2334 kauth_cred_t a_cred; 2335 struct proc *a_p; 2336 } */ *ap = v; 2337 struct vnode *vp = ap->a_vp; 2338 struct inode *ip = VTOI(vp); 2339 struct ulfsmount *ump = ip->i_ump; 2340 //struct fs *fs = ip->i_lfs; 2341 int error; 2342 2343 if (ump->um_fstype == ULFS1) { 2344 #ifdef LFS_EXTATTR 2345 fstrans_start(vp->v_mount, FSTRANS_SHARED); 2346 error = ulfs_deleteextattr(ap); 2347 fstrans_done(vp->v_mount); 2348 #else 2349 error = EOPNOTSUPP; 2350 #endif 2351 return error; 2352 } 2353 2354 /* XXX Not implemented for ULFS2 file systems. */ 2355 return (EOPNOTSUPP); 2356 } 2357