1 /* $NetBSD: ulfs_inode.c,v 1.9 2013/07/28 00:37:07 dholland Exp $ */ 2 /* from NetBSD: ufs_inode.c,v 1.89 2013/01/22 09:39:18 dholland Exp */ 3 4 /* 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)ufs_inode.c 8.9 (Berkeley) 5/14/95 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: ulfs_inode.c,v 1.9 2013/07/28 00:37:07 dholland Exp $"); 42 43 #if defined(_KERNEL_OPT) 44 #include "opt_lfs.h" 45 #include "opt_quota.h" 46 #include "opt_wapbl.h" 47 #endif 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/proc.h> 52 #include <sys/vnode.h> 53 #include <sys/mount.h> 54 #include <sys/kernel.h> 55 #include <sys/namei.h> 56 #include <sys/kauth.h> 57 #include <sys/wapbl.h> 58 #include <sys/fstrans.h> 59 #include <sys/kmem.h> 60 61 #include <ufs/lfs/lfs_extern.h> 62 63 #include <ufs/lfs/ulfs_inode.h> 64 #include <ufs/lfs/ulfsmount.h> 65 #include <ufs/lfs/ulfs_extern.h> 66 #ifdef LFS_DIRHASH 67 #include <ufs/lfs/ulfs_dirhash.h> 68 #endif 69 #ifdef LFS_EXTATTR 70 #include <ufs/lfs/ulfs_extattr.h> 71 #endif 72 73 #include <uvm/uvm.h> 74 75 extern int prtactive; 76 77 /* 78 * Last reference to an inode. If necessary, write or delete it. 79 */ 80 int 81 ulfs_inactive(void *v) 82 { 83 struct vop_inactive_args /* { 84 struct vnode *a_vp; 85 struct bool *a_recycle; 86 } */ *ap = v; 87 struct vnode *vp = ap->a_vp; 88 struct inode *ip = VTOI(vp); 89 struct mount *transmp; 90 mode_t mode; 91 int error = 0; 92 93 transmp = vp->v_mount; 94 fstrans_start(transmp, FSTRANS_LAZY); 95 /* 96 * Ignore inodes related to stale file handles. 97 */ 98 if (ip->i_mode == 0) 99 goto out; 100 if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { 101 #ifdef LFS_EXTATTR 102 ulfs_extattr_vnode_inactive(vp, curlwp); 103 #endif 104 if (ip->i_size != 0) { 105 error = lfs_truncate(vp, (off_t)0, 0, NOCRED); 106 } 107 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2) 108 (void)lfs_chkiq(ip, -1, NOCRED, 0); 109 #endif 110 DIP_ASSIGN(ip, rdev, 0); 111 mode = ip->i_mode; 112 ip->i_mode = 0; 113 ip->i_omode = mode; 114 DIP_ASSIGN(ip, mode, 0); 115 ip->i_flag |= IN_CHANGE | IN_UPDATE; 116 /* 117 * Defer final inode free and update to ulfs_reclaim(). 118 */ 119 } 120 121 if (ip->i_flag & (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) { 122 lfs_update(vp, NULL, NULL, 0); 123 } 124 125 out: 126 /* 127 * If we are done with the inode, reclaim it 128 * so that it can be reused immediately. 129 */ 130 *ap->a_recycle = (ip->i_mode == 0); 131 VOP_UNLOCK(vp); 132 fstrans_done(transmp); 133 return (error); 134 } 135 136 /* 137 * Reclaim an inode so that it can be used for other purposes. 138 */ 139 int 140 ulfs_reclaim(struct vnode *vp) 141 { 142 struct inode *ip = VTOI(vp); 143 144 if (prtactive && vp->v_usecount > 1) 145 vprint("ulfs_reclaim: pushing active", vp); 146 147 /* XXX: do we really need two of these? */ 148 /* note: originally the first was inside a wapbl txn */ 149 lfs_update(vp, NULL, NULL, UPDATE_CLOSE); 150 lfs_update(vp, NULL, NULL, UPDATE_CLOSE); 151 152 /* 153 * Remove the inode from its hash chain. 154 */ 155 ulfs_ihashrem(ip); 156 157 if (ip->i_devvp) { 158 vrele(ip->i_devvp); 159 ip->i_devvp = 0; 160 } 161 #if defined(LFS_QUOTA) || defined(LFS_QUOTA2) 162 ulfsquota_free(ip); 163 #endif 164 #ifdef LFS_DIRHASH 165 if (ip->i_dirhash != NULL) 166 ulfsdirhash_free(ip); 167 #endif 168 return (0); 169 } 170 171 /* 172 * allocate a range of blocks in a file. 173 * after this function returns, any page entirely contained within the range 174 * will map to invalid data and thus must be overwritten before it is made 175 * accessible to others. 176 */ 177 178 int 179 ulfs_balloc_range(struct vnode *vp, off_t off, off_t len, kauth_cred_t cred, 180 int flags) 181 { 182 off_t neweof; /* file size after the operation */ 183 off_t neweob; /* offset next to the last block after the operation */ 184 off_t pagestart; /* starting offset of range covered by pgs */ 185 off_t eob; /* offset next to allocated blocks */ 186 struct uvm_object *uobj; 187 int i, delta, error, npages; 188 int bshift = vp->v_mount->mnt_fs_bshift; 189 int bsize = 1 << bshift; 190 int ppb = MAX(bsize >> PAGE_SHIFT, 1); 191 struct vm_page **pgs; 192 size_t pgssize; 193 UVMHIST_FUNC("ulfs_balloc_range"); UVMHIST_CALLED(ubchist); 194 UVMHIST_LOG(ubchist, "vp %p off 0x%x len 0x%x u_size 0x%x", 195 vp, off, len, vp->v_size); 196 197 neweof = MAX(vp->v_size, off + len); 198 GOP_SIZE(vp, neweof, &neweob, 0); 199 200 error = 0; 201 uobj = &vp->v_uobj; 202 203 /* 204 * read or create pages covering the range of the allocation and 205 * keep them locked until the new block is allocated, so there 206 * will be no window where the old contents of the new block are 207 * visible to racing threads. 208 */ 209 210 pagestart = trunc_page(off) & ~(bsize - 1); 211 npages = MIN(ppb, (round_page(neweob) - pagestart) >> PAGE_SHIFT); 212 pgssize = npages * sizeof(struct vm_page *); 213 pgs = kmem_zalloc(pgssize, KM_SLEEP); 214 215 /* 216 * adjust off to be block-aligned. 217 */ 218 219 delta = off & (bsize - 1); 220 off -= delta; 221 len += delta; 222 223 genfs_node_wrlock(vp); 224 mutex_enter(uobj->vmobjlock); 225 error = VOP_GETPAGES(vp, pagestart, pgs, &npages, 0, 226 VM_PROT_WRITE, 0, PGO_SYNCIO | PGO_PASTEOF | PGO_NOBLOCKALLOC | 227 PGO_NOTIMESTAMP | PGO_GLOCKHELD); 228 if (error) { 229 goto out; 230 } 231 232 /* 233 * now allocate the range. 234 */ 235 236 error = GOP_ALLOC(vp, off, len, flags, cred); 237 genfs_node_unlock(vp); 238 239 /* 240 * if the allocation succeeded, clear PG_CLEAN on all the pages 241 * and clear PG_RDONLY on any pages that are now fully backed 242 * by disk blocks. if the allocation failed, we do not invalidate 243 * the pages since they might have already existed and been dirty, 244 * in which case we need to keep them around. if we created the pages, 245 * they will be clean and read-only, and leaving such pages 246 * in the cache won't cause any problems. 247 */ 248 249 GOP_SIZE(vp, off + len, &eob, 0); 250 mutex_enter(uobj->vmobjlock); 251 mutex_enter(&uvm_pageqlock); 252 for (i = 0; i < npages; i++) { 253 KASSERT((pgs[i]->flags & PG_RELEASED) == 0); 254 if (!error) { 255 if (off <= pagestart + (i << PAGE_SHIFT) && 256 pagestart + ((i + 1) << PAGE_SHIFT) <= eob) { 257 pgs[i]->flags &= ~PG_RDONLY; 258 } 259 pgs[i]->flags &= ~PG_CLEAN; 260 } 261 uvm_pageactivate(pgs[i]); 262 } 263 mutex_exit(&uvm_pageqlock); 264 uvm_page_unbusy(pgs, npages); 265 mutex_exit(uobj->vmobjlock); 266 267 out: 268 kmem_free(pgs, pgssize); 269 return error; 270 } 271