1 /* 2 * Copyright (c) 1989 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms are permitted 9 * provided that the above copyright notice and this paragraph are 10 * duplicated in all such forms and that any documentation, 11 * advertising materials, and other materials related to such 12 * distribution and use acknowledge that the software was developed 13 * by the University of California, Berkeley. The name of the 14 * University may not be used to endorse or promote products derived 15 * from this software without specific prior written permission. 16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 19 * 20 * @(#)nfs_bio.c 7.2 (Berkeley) 10/19/89 21 */ 22 23 #include "param.h" 24 #include "user.h" 25 #include "buf.h" 26 #include "vnode.h" 27 #include "trace.h" 28 #include "mount.h" 29 #include "nfsnode.h" 30 #include "nfsiom.h" 31 32 /* True and false, how exciting */ 33 #define TRUE 1 34 #define FALSE 0 35 36 /* 37 * Vnode op for read using bio 38 * Any similarity to readip() is purely coincidental 39 */ 40 nfs_read(vp, uio, offp, ioflag, cred) 41 register struct vnode *vp; 42 struct uio *uio; 43 off_t *offp; 44 int ioflag; 45 struct ucred *cred; 46 { 47 register struct nfsnode *np = VTONFS(vp); 48 struct buf *bp; 49 struct vattr vattr; 50 daddr_t lbn, bn, rablock; 51 int error = 0; 52 int diff; 53 long n, on; 54 int count; 55 56 if (!(ioflag & IO_NODELOCKED)) 57 nfs_lock(vp); 58 /* 59 * Avoid caching directories. Once everything is using getdirentries() 60 * this will never happen anyhow. 61 */ 62 if (vp->v_type == VDIR) { 63 error = nfs_readrpc(vp, uio, offp, cred); 64 if (!(ioflag & IO_NODELOCKED)) 65 nfs_unlock(vp); 66 return (error); 67 } 68 uio->uio_offset = *offp; 69 count = uio->uio_resid; 70 if (uio->uio_rw != UIO_READ) 71 panic("nfs_read mode"); 72 if (vp->v_type != VREG) 73 panic("nfs_read type"); 74 if (uio->uio_resid == 0) 75 goto out; 76 if (uio->uio_offset < 0) { 77 error = EINVAL; 78 goto out; 79 } 80 /* 81 * If the file's modify time on the server has changed since the 82 * last read rpc or you have written to the file, 83 * you may have lost data cache consistency with the 84 * server, so flush all of the file's data out of the cache. 85 * This will implicitly bring the modify time up to date, since 86 * up to date attributes are returned in the reply to any write rpc's 87 * NB: This implies that cache data can be read when up to 88 * NFS_ATTRTIMEO seconds out of date. If you find that you need current 89 * attributes this could be forced by setting n_attrstamp to 0 before 90 * the nfs_getattr() call. 91 */ 92 if (np->n_flag & NMODIFIED) { 93 np->n_flag &= ~NMODIFIED; 94 if (error = nfs_blkflush(vp, (daddr_t)0, np->n_size, TRUE)) 95 goto out; 96 if (error = nfs_getattr(vp, &vattr, cred)) 97 goto out; 98 np->n_size = vattr.va_size; 99 np->n_mtime = vattr.va_mtime.tv_sec; 100 } else { 101 if (error = nfs_getattr(vp, &vattr, cred)) 102 goto out; 103 if (np->n_mtime != vattr.va_mtime.tv_sec) { 104 if (error = nfs_blkflush(vp, (daddr_t)0, np->n_size, TRUE)) 105 goto out; 106 np->n_size = vattr.va_size; 107 np->n_mtime = vattr.va_mtime.tv_sec; 108 } 109 } 110 np->n_flag |= NBUFFERED; 111 do { 112 lbn = uio->uio_offset >> NFS_BIOSHIFT; 113 on = uio->uio_offset & (NFS_BIOSIZE-1); 114 n = MIN((unsigned)(NFS_BIOSIZE - on), uio->uio_resid); 115 diff = np->n_size - uio->uio_offset; 116 if (diff <= 0) 117 goto out; 118 if (diff < n) 119 n = diff; 120 bn = lbn*(NFS_BIOSIZE/DEV_BSIZE); 121 rablock = (lbn+1)*(NFS_BIOSIZE/DEV_BSIZE); 122 if (np->n_lastr+1 == lbn) 123 error = breada(vp, bn, NFS_BIOSIZE, rablock, NFS_BIOSIZE, 124 cred, &bp); 125 else 126 error = bread(vp, bn, NFS_BIOSIZE, cred, &bp); 127 np->n_lastr = lbn; 128 if (bp->b_resid) { 129 diff = (on >= (NFS_BIOSIZE-bp->b_resid)) ? 0 : 130 (NFS_BIOSIZE-bp->b_resid-on); 131 n = MIN(n, diff); 132 } 133 if (error) { 134 brelse(bp); 135 goto out; 136 } 137 if (n > 0) 138 error = uiomove(bp->b_un.b_addr + on, (int)n, uio); 139 if (n+on == NFS_BIOSIZE || uio->uio_offset == np->n_size) 140 bp->b_flags |= B_AGE; 141 brelse(bp); 142 } while (error == 0 && uio->uio_resid > 0 && n != 0); 143 out: 144 *offp = uio->uio_offset; 145 if (!(ioflag & IO_NODELOCKED)) 146 nfs_unlock(vp); 147 return (error); 148 } 149 150 /* 151 * Vnode op for write using bio 152 */ 153 nfs_write(vp, uio, offp, ioflag, cred) 154 register struct vnode *vp; 155 register struct uio *uio; 156 off_t *offp; 157 int ioflag; 158 struct ucred *cred; 159 { 160 struct buf *bp; 161 struct nfsnode *np = VTONFS(vp); 162 daddr_t lbn, bn; 163 int i, n, on; 164 int flags, count, size; 165 int error = 0; 166 int cnt; 167 u_long osize; 168 169 if ((ioflag & IO_NODELOCKED) == 0) 170 nfs_lock(vp); 171 /* Should we try and do this ?? */ 172 if (vp->v_type == VREG && (ioflag & IO_APPEND)) 173 *offp = np->n_size; 174 uio->uio_offset = *offp; 175 cnt = uio->uio_resid; 176 #ifdef notdef 177 osize = np->n_size; 178 #endif 179 if (uio->uio_rw != UIO_WRITE) 180 panic("nfs_write mode"); 181 if (vp->v_type != VREG) 182 panic("nfs_write type"); 183 if (uio->uio_offset < 0) { 184 error = EINVAL; 185 goto out; 186 } 187 if (uio->uio_resid == 0) 188 goto out; 189 /* 190 * Maybe this should be above the vnode op call, but so long as 191 * file servers have no limits, i don't think it matters 192 */ 193 if (vp->v_type == VREG && 194 uio->uio_offset + uio->uio_resid > 195 u.u_rlimit[RLIMIT_FSIZE].rlim_cur) { 196 psignal(u.u_procp, SIGXFSZ); 197 error = EFBIG; 198 goto out; 199 } 200 np->n_flag |= (NMODIFIED|NBUFFERED); 201 do { 202 lbn = uio->uio_offset >> NFS_BIOSHIFT; 203 on = uio->uio_offset & (NFS_BIOSIZE-1); 204 n = MIN((unsigned)(NFS_BIOSIZE - on), uio->uio_resid); 205 if (uio->uio_offset+n > np->n_size) 206 np->n_size = uio->uio_offset+n; 207 bn = lbn*(NFS_BIOSIZE/DEV_BSIZE); 208 count = howmany(NFS_BIOSIZE, CLBYTES); 209 for (i = 0; i < count; i++) 210 munhash(vp, bn + i * CLBYTES / DEV_BSIZE); 211 bp = getblk(vp, bn, NFS_BIOSIZE); 212 if (bp->b_wcred == NOCRED) { 213 crhold(cred); 214 bp->b_wcred = cred; 215 } 216 if (bp->b_dirtyend > 0) { 217 /* 218 * Iff the new write will leave a contiguous 219 * dirty area, just update the b_dirtyoff and 220 * b_dirtyend 221 * otherwise force a write rpc of the old dirty 222 * area 223 */ 224 if (on <= bp->b_dirtyend && (on+n) >= bp->b_dirtyoff) { 225 bp->b_dirtyoff = MIN(on, bp->b_dirtyoff); 226 bp->b_dirtyend = MAX((on+n), bp->b_dirtyend); 227 } else { 228 /* 229 * Like bwrite() but without the brelse 230 */ 231 bp->b_flags &= ~(B_READ | B_DONE | 232 B_ERROR | B_DELWRI | B_ASYNC); 233 u.u_ru.ru_oublock++; 234 VOP_STRATEGY(bp); 235 error = biowait(bp); 236 if (bp->b_flags & B_ERROR) { 237 brelse(bp); 238 if (bp->b_error) 239 error = bp->b_error; 240 else 241 error = EIO; 242 goto out; 243 } 244 bp->b_dirtyoff = on; 245 bp->b_dirtyend = on+n; 246 } 247 } else { 248 bp->b_dirtyoff = on; 249 bp->b_dirtyend = on+n; 250 } 251 if (error = uiomove(bp->b_un.b_addr + on, n, uio)) 252 goto out; 253 if ((n+on) == NFS_BIOSIZE) { 254 bp->b_flags |= B_AGE; 255 bawrite(bp); 256 } else { 257 bdwrite(bp); 258 } 259 } while (error == 0 && uio->uio_resid > 0 && n != 0); 260 #ifdef notdef 261 /* Should we try and do this for nfs ?? */ 262 if (error && (ioflag & IO_UNIT)) 263 np->n_size = osize; 264 else 265 #endif 266 *offp += cnt - uio->uio_resid; 267 out: 268 if ((ioflag & IO_NODELOCKED) == 0) 269 nfs_unlock(vp); 270 return (error); 271 } 272 273 /* 274 * Flush and invalidate all of the buffers associated with the blocks of vp 275 */ 276 nfs_blkflush(vp, blkno, size, invalidate) 277 struct vnode *vp; 278 daddr_t blkno; 279 long size; 280 int invalidate; 281 { 282 register struct buf *ep; 283 struct buf *dp; 284 daddr_t curblkno, last; 285 int s, error, allerrors = 0; 286 287 last = blkno + btodb(size); 288 for (curblkno = blkno; curblkno <= last; 289 curblkno += (NFS_BIOSIZE / DEV_BSIZE)) { 290 dp = BUFHASH(vp, curblkno); 291 loop: 292 for (ep = dp->b_forw; ep != dp; ep = ep->b_forw) { 293 if (ep->b_vp != vp || (ep->b_flags & B_INVAL)) 294 continue; 295 if (curblkno != ep->b_blkno) 296 continue; 297 s = splbio(); 298 if (ep->b_flags & B_BUSY) { 299 ep->b_flags |= B_WANTED; 300 sleep((caddr_t)ep, PRIBIO+1); 301 splx(s); 302 goto loop; 303 } 304 splx(s); 305 notavail(ep); 306 if (ep->b_flags & B_DELWRI) { 307 ep->b_flags &= ~B_ASYNC; 308 if (error = bwrite(ep)) 309 allerrors = error; 310 goto loop; 311 } 312 if (invalidate) { 313 ep->b_flags |= B_INVAL; 314 brelvp(ep); 315 } 316 brelse(ep); 317 } 318 } 319 return (allerrors); 320 } 321