xref: /netbsd-src/sys/ufs/ufs/ufs_readwrite.c (revision 065e3c6b56478e6ce98e939133eab01fd82617bb)
1 /*	$NetBSD: ufs_readwrite.c,v 1.129 2024/10/19 14:13:44 jakllsch Exp $	*/
2 
3 /*-
4  * Copyright (c) 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)ufs_readwrite.c	8.11 (Berkeley) 5/8/95
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(1, "$NetBSD: ufs_readwrite.c,v 1.129 2024/10/19 14:13:44 jakllsch Exp $");
36 
37 #include <sys/bitops.h>
38 
39 #define	FS			struct fs
40 #define	I_FS			i_fs
41 #define	READ			ffs_read
42 #define	READ_S			"ffs_read"
43 #define	WRITE			ffs_write
44 #define	WRITE_S			"ffs_write"
45 #define	BUFRD			ffs_bufrd
46 #define	BUFWR			ffs_bufwr
47 #define ufs_blkoff		ffs_blkoff
48 #define ufs_blksize		ffs_blksize
49 #define ufs_lblkno		ffs_lblkno
50 #define ufs_lblktosize		ffs_lblktosize
51 #define ufs_blkroundup		ffs_blkroundup
52 
53 static int	ufs_post_read_update(struct vnode *, int, int);
54 static int	ufs_post_write_update(struct vnode *, struct uio *, int,
55 		    kauth_cred_t, off_t, int, int);
56 
57 /*
58  * Vnode op for reading.
59  */
60 /* ARGSUSED */
61 int
62 READ(void *v)
63 {
64 	struct vop_read_args /* {
65 		struct vnode *a_vp;
66 		struct uio *a_uio;
67 		int a_ioflag;
68 		kauth_cred_t a_cred;
69 	} */ *ap = v;
70 	struct vnode *vp;
71 	struct inode *ip;
72 	struct uio *uio;
73 	struct ufsmount *ump;
74 	vsize_t bytelen;
75 	int error, ioflag, advice;
76 
77 	vp = ap->a_vp;
78 	ip = VTOI(vp);
79 	ump = ip->i_ump;
80 	uio = ap->a_uio;
81 	ioflag = ap->a_ioflag;
82 	error = 0;
83 
84 	KASSERT(uio->uio_rw == UIO_READ);
85 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR);
86 
87 	/* XXX Eliminate me by refusing directory reads from userland.  */
88 	if (vp->v_type == VDIR)
89 		return BUFRD(vp, uio, ioflag, ap->a_cred);
90 	if ((u_int64_t)uio->uio_offset > ump->um_maxfilesize)
91 		return (EFBIG);
92 	if (uio->uio_resid == 0)
93 		return (0);
94 
95 	if ((ip->i_flags & (SF_SNAPSHOT | SF_SNAPINVAL)) == SF_SNAPSHOT)
96 		return ffs_snapshot_read(vp, uio, ioflag);
97 
98 	if (uio->uio_offset >= ip->i_size)
99 		goto out;
100 
101 	KASSERT(vp->v_type == VREG);
102 	advice = IO_ADV_DECODE(ap->a_ioflag);
103 	while (uio->uio_resid > 0) {
104 		if (ioflag & IO_DIRECT) {
105 			genfs_directio(vp, uio, ioflag);
106 		}
107 		bytelen = MIN(ip->i_size - uio->uio_offset, uio->uio_resid);
108 		if (bytelen == 0)
109 			break;
110 		error = ubc_uiomove(&vp->v_uobj, uio, bytelen, advice,
111 		    UBC_READ | UBC_PARTIALOK | UBC_VNODE_FLAGS(vp));
112 		if (error)
113 			break;
114 	}
115 
116  out:
117 	error = ufs_post_read_update(vp, ap->a_ioflag, error);
118 	return (error);
119 }
120 
121 /*
122  * UFS op for reading via the buffer cache
123  */
124 int
125 BUFRD(struct vnode *vp, struct uio *uio, int ioflag, kauth_cred_t cred)
126 {
127 	struct inode *ip;
128 	struct ufsmount *ump;
129 	FS *fs;
130 	struct buf *bp;
131 	daddr_t lbn, nextlbn;
132 	off_t bytesinfile;
133 	long size, xfersize, blkoffset;
134 	int error;
135 
136 	KASSERT(VOP_ISLOCKED(vp));
137 	KASSERT(vp->v_type == VDIR || vp->v_type == VLNK);
138 	KASSERT(uio->uio_rw == UIO_READ);
139 
140 	ip = VTOI(vp);
141 	ump = ip->i_ump;
142 	fs = ip->I_FS;
143 	error = 0;
144 
145 	KASSERT(vp->v_type != VLNK || ip->i_size >= ump->um_maxsymlinklen);
146 	KASSERT(vp->v_type != VLNK || ump->um_maxsymlinklen != 0 ||
147 	    DIP(ip, blocks) != 0);
148 
149 	if (uio->uio_offset > ump->um_maxfilesize)
150 		return EFBIG;
151 	if (uio->uio_resid == 0)
152 		return 0;
153 
154 	KASSERT(!ISSET(ip->i_flags, (SF_SNAPSHOT | SF_SNAPINVAL)));
155 
156 	if (uio->uio_offset >= ip->i_size)
157 		goto out;
158 
159 	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
160 		bytesinfile = ip->i_size - uio->uio_offset;
161 		if (bytesinfile <= 0)
162 			break;
163 		lbn = ufs_lblkno(fs, uio->uio_offset);
164 		nextlbn = lbn + 1;
165 		size = ufs_blksize(fs, ip, lbn);
166 		blkoffset = ufs_blkoff(fs, uio->uio_offset);
167 		xfersize = MIN(MIN(fs->fs_bsize - blkoffset, uio->uio_resid),
168 		    bytesinfile);
169 
170 		if (ufs_lblktosize(fs, nextlbn) >= ip->i_size)
171 			error = bread(vp, lbn, size, 0, &bp);
172 		else {
173 			int nextsize = ufs_blksize(fs, ip, nextlbn);
174 			error = breadn(vp, lbn,
175 			    size, &nextlbn, &nextsize, 1, 0, &bp);
176 		}
177 		if (error)
178 			break;
179 
180 		/*
181 		 * We should only get non-zero b_resid when an I/O error
182 		 * has occurred, which should cause us to break above.
183 		 * However, if the short read did not cause an error,
184 		 * then we want to ensure that we do not uiomove bad
185 		 * or uninitialized data.
186 		 */
187 		size -= bp->b_resid;
188 		if (size < xfersize) {
189 			if (size == 0)
190 				break;
191 			xfersize = size;
192 		}
193 		error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio);
194 		if (error)
195 			break;
196 		brelse(bp, 0);
197 	}
198 	if (bp != NULL)
199 		brelse(bp, 0);
200 
201  out:
202 	error = ufs_post_read_update(vp, ioflag, error);
203 	return (error);
204 }
205 
206 static int
207 ufs_post_read_update(struct vnode *vp, int ioflag, int oerror)
208 {
209 	struct inode *ip = VTOI(vp);
210 	int error = oerror;
211 
212 	if (!(vp->v_mount->mnt_flag & MNT_NOATIME)) {
213 		ip->i_flag |= IN_ACCESS;
214 		if ((ioflag & IO_SYNC) == IO_SYNC) {
215 			error = UFS_WAPBL_BEGIN(vp->v_mount);
216 			if (error)
217 				goto out;
218 			error = UFS_UPDATE(vp, NULL, NULL, UPDATE_WAIT);
219 			UFS_WAPBL_END(vp->v_mount);
220 		}
221 	}
222 
223 out:
224 	/* Read error overrides any inode update error.  */
225 	if (oerror)
226 		error = oerror;
227 	return error;
228 }
229 
230 /*
231  * Vnode op for writing.
232  */
233 int
234 WRITE(void *v)
235 {
236 	struct vop_write_args /* {
237 		struct vnode *a_vp;
238 		struct uio *a_uio;
239 		int a_ioflag;
240 		kauth_cred_t a_cred;
241 	} */ *ap = v;
242 	struct vnode *vp;
243 	struct uio *uio;
244 	struct inode *ip;
245 	FS *fs;
246 	kauth_cred_t cred;
247 	off_t osize, origoff, oldoff, preallocoff, endallocoff, nsize;
248 	int blkoffset, error, flags, ioflag, resid;
249 	int aflag;
250 	vsize_t bytelen;
251 	bool async;
252 	struct ufsmount *ump;
253 	const unsigned int fshift = ilog2(MAXPHYS);
254 
255 	cred = ap->a_cred;
256 	ioflag = ap->a_ioflag;
257 	uio = ap->a_uio;
258 	vp = ap->a_vp;
259 	ip = VTOI(vp);
260 	ump = ip->i_ump;
261 
262 	KASSERT(vp->v_size == ip->i_size);
263 	KASSERT(uio->uio_rw == UIO_WRITE);
264 	KASSERT(vp->v_type == VREG);
265 	KASSERT(!ISSET(ioflag, IO_JOURNALLOCKED));
266 	UFS_WAPBL_JUNLOCK_ASSERT(vp->v_mount);
267 
268 	if (ioflag & IO_APPEND)
269 		uio->uio_offset = ip->i_size;
270 	if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
271 		return (EPERM);
272 
273 	fs = ip->I_FS;
274 	if (uio->uio_offset < 0 ||
275 	    (u_int64_t)uio->uio_offset + uio->uio_resid > ump->um_maxfilesize)
276 		return (EFBIG);
277 	if (uio->uio_resid == 0)
278 		return (0);
279 
280 	flags = ioflag & IO_SYNC ? B_SYNC : 0;
281 	async = vp->v_mount->mnt_flag & MNT_ASYNC;
282 	origoff = uio->uio_offset;
283 	resid = uio->uio_resid;
284 	osize = ip->i_size;
285 	error = 0;
286 
287 	KASSERT(vp->v_type == VREG);
288 
289 	/*
290 	 * XXX The entire write operation must occur in a single WAPBL
291 	 * transaction because it may allocate disk blocks, if
292 	 * appending or filling holes, which is allowed to happen only
293 	 * if the write fully succeeds.
294 	 *
295 	 * If ubc_uiomove fails in the middle with EFAULT, we can clean
296 	 * up at the end with UFS_TRUNCATE.  But if the power fails in
297 	 * the middle, there would be nobody to deallocate the blocks,
298 	 * without an fsck to globally analyze the file system.
299 	 *
300 	 * If the increasingly inaccurately named WAPBL were augmented
301 	 * with rollback records for block allocations, then we could
302 	 * split this into multiple transactions and commit the
303 	 * allocations in the last one.
304 	 *
305 	 * But WAPBL doesn't have that notion now, so we'll have to
306 	 * live with gigantic transactions and WAPBL tentacles in
307 	 * genfs_getpages/putpages to cope with the possibility that
308 	 * the transaction may or may not be locked on entry to the
309 	 * page cache.
310 	 *
311 	 * And even if we added that notion to WAPBL, it wouldn't help
312 	 * us get rid of the tentacles in genfs_getpages/putpages
313 	 * because we'd have to interoperate with old implementations
314 	 * that assume they can replay the log without fsck.
315 	 */
316 	error = UFS_WAPBL_BEGIN(vp->v_mount);
317 	if (error) {
318 		return error;
319 	}
320 
321 
322 	preallocoff = round_page(ufs_blkroundup(fs, MAX(osize, uio->uio_offset)));
323 	aflag = ioflag & IO_SYNC ? B_SYNC : 0;
324 	nsize = MAX(osize, uio->uio_offset + uio->uio_resid);
325 	endallocoff = nsize - ufs_blkoff(fs, nsize);
326 
327 	/*
328 	 * if we're increasing the file size, deal with expanding
329 	 * the fragment if there is one.
330 	 */
331 
332 	if (nsize > osize && ufs_lblkno(fs, osize) < UFS_NDADDR &&
333 	    ufs_lblkno(fs, osize) != ufs_lblkno(fs, nsize) &&
334 	    ufs_blkroundup(fs, osize) != osize) {
335 		off_t eob;
336 
337 		eob = ufs_blkroundup(fs, osize);
338 		uvm_vnp_setwritesize(vp, eob);
339 		error = ufs_balloc_range(vp, osize, eob - osize, cred, aflag);
340 		if (error)
341 			goto out;
342 		if (flags & B_SYNC) {
343 			rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
344 			VOP_PUTPAGES(vp, trunc_page(osize & fs->fs_bmask),
345 			    round_page(eob),
346 			    PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED);
347 		}
348 	}
349 
350 	while (uio->uio_resid > 0) {
351 		int ubc_flags = UBC_WRITE;
352 		bool overwrite; /* if we're overwrite a whole block */
353 		off_t newoff;
354 
355 		if (ioflag & IO_DIRECT) {
356 			genfs_directio(vp, uio, ioflag | IO_JOURNALLOCKED);
357 		}
358 
359 		oldoff = uio->uio_offset;
360 		blkoffset = ufs_blkoff(fs, uio->uio_offset);
361 		bytelen = MIN(fs->fs_bsize - blkoffset, uio->uio_resid);
362 		if (bytelen == 0) {
363 			break;
364 		}
365 
366 		/*
367 		 * if we're filling in a hole, allocate the blocks now and
368 		 * initialize the pages first.  if we're extending the file,
369 		 * we can safely allocate blocks without initializing pages
370 		 * since the new blocks will be inaccessible until the write
371 		 * is complete.
372 		 */
373 		overwrite = uio->uio_offset >= preallocoff &&
374 		    uio->uio_offset < endallocoff;
375 		if (!overwrite && (vp->v_vflag & VV_MAPPED) == 0 &&
376 		    ufs_blkoff(fs, uio->uio_offset) == 0 &&
377 		    (uio->uio_offset & PAGE_MASK) == 0) {
378 			vsize_t len;
379 
380 			len = trunc_page(bytelen);
381 			len -= ufs_blkoff(fs, len);
382 			if (len > 0) {
383 				overwrite = true;
384 				bytelen = len;
385 			}
386 		}
387 
388 		newoff = oldoff + bytelen;
389 		if (vp->v_size < newoff) {
390 			uvm_vnp_setwritesize(vp, newoff);
391 		}
392 
393 		if (!overwrite) {
394 			error = ufs_balloc_range(vp, uio->uio_offset, bytelen,
395 			    cred, aflag);
396 			if (error)
397 				break;
398 		} else {
399 			genfs_node_wrlock(vp);
400 			error = GOP_ALLOC(vp, uio->uio_offset, bytelen,
401 			    aflag, cred);
402 			genfs_node_unlock(vp);
403 			if (error)
404 				break;
405 			ubc_flags |= UBC_FAULTBUSY;
406 		}
407 
408 		/*
409 		 * copy the data.
410 		 */
411 
412 		error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
413 		    IO_ADV_DECODE(ioflag), ubc_flags | UBC_VNODE_FLAGS(vp));
414 
415 		/*
416 		 * update UVM's notion of the size now that we've
417 		 * copied the data into the vnode's pages.
418 		 *
419 		 * we should update the size even when uiomove failed.
420 		 */
421 
422 		if (vp->v_size < newoff) {
423 			uvm_vnp_setsize(vp, newoff);
424 		}
425 
426 		if (error)
427 			break;
428 
429 		/*
430 		 * flush what we just wrote if necessary.
431 		 * XXXUBC simplistic async flushing.
432 		 */
433 
434 		if (!async && oldoff >> fshift != uio->uio_offset >> fshift) {
435 			rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
436 			error = VOP_PUTPAGES(vp, (oldoff >> fshift) << fshift,
437 			    (uio->uio_offset >> fshift) << fshift,
438 			    PGO_CLEANIT | PGO_JOURNALLOCKED | PGO_LAZY);
439 			if (error)
440 				break;
441 		}
442 	}
443 	if (error == 0 && ioflag & IO_SYNC) {
444 		rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
445 		error = VOP_PUTPAGES(vp, trunc_page(origoff & fs->fs_bmask),
446 		    round_page(ufs_blkroundup(fs, uio->uio_offset)),
447 		    PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED);
448 	}
449 
450 out:
451 	error = ufs_post_write_update(vp, uio, ioflag, cred, osize, resid,
452 	    error);
453 	UFS_WAPBL_END(vp->v_mount);
454 
455 	return (error);
456 }
457 
458 /*
459  * UFS op for writing via the buffer cache
460  */
461 int
462 BUFWR(struct vnode *vp, struct uio *uio, int ioflag, kauth_cred_t cred)
463 {
464 	struct inode *ip;
465 	struct ufsmount *ump;
466 	FS *fs;
467 	int flags;
468 	struct buf *bp;
469 	off_t osize;
470 	int resid, xfersize, size, blkoffset;
471 	daddr_t lbn;
472 	int error;
473 
474 	KASSERT(ISSET(ioflag, IO_NODELOCKED));
475 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
476 	KASSERT(vp->v_type == VDIR || vp->v_type == VLNK);
477 	KASSERT(vp->v_type != VDIR || ISSET(ioflag, IO_SYNC));
478 	KASSERT(uio->uio_rw == UIO_WRITE);
479 	KASSERT(ISSET(ioflag, IO_JOURNALLOCKED));
480 	UFS_WAPBL_JLOCK_ASSERT(vp->v_mount);
481 
482 	ip = VTOI(vp);
483 	ump = ip->i_ump;
484 	fs = ip->I_FS;
485 
486 	KASSERT(vp->v_size == ip->i_size);
487 
488 	if (uio->uio_offset < 0 ||
489 	    uio->uio_resid > ump->um_maxfilesize ||
490 	    uio->uio_offset > (ump->um_maxfilesize - uio->uio_resid))
491 		return EFBIG;
492 	if (uio->uio_resid == 0)
493 		return 0;
494 
495 	flags = ioflag & IO_SYNC ? B_SYNC : 0;
496 	resid = uio->uio_resid;
497 	osize = ip->i_size;
498 	error = 0;
499 
500 	KASSERT(vp->v_type != VREG);
501 
502 
503 	/* XXX Should never have pages cached here.  */
504 	KASSERT(vp->v_uobj.uo_npages == 0);
505 	while (uio->uio_resid > 0) {
506 		lbn = ufs_lblkno(fs, uio->uio_offset);
507 		blkoffset = ufs_blkoff(fs, uio->uio_offset);
508 		xfersize = MIN(fs->fs_bsize - blkoffset, uio->uio_resid);
509 		if (fs->fs_bsize > xfersize)
510 			flags |= B_CLRBUF;
511 		else
512 			flags &= ~B_CLRBUF;
513 
514 		error = UFS_BALLOC(vp, uio->uio_offset, xfersize, cred, flags,
515 		    &bp);
516 
517 		if (error)
518 			break;
519 		if (uio->uio_offset + xfersize > ip->i_size) {
520 			ip->i_size = uio->uio_offset + xfersize;
521 			DIP_ASSIGN(ip, size, ip->i_size);
522 			uvm_vnp_setsize(vp, ip->i_size);
523 		}
524 		size = ufs_blksize(fs, ip, lbn) - bp->b_resid;
525 		if (xfersize > size)
526 			xfersize = size;
527 
528 		error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio);
529 
530 		/*
531 		 * if we didn't clear the block and the uiomove failed,
532 		 * the buf will now contain part of some other file,
533 		 * so we need to invalidate it.
534 		 */
535 		if (error && (flags & B_CLRBUF) == 0) {
536 			brelse(bp, BC_INVAL);
537 			break;
538 		}
539 		if (ioflag & IO_SYNC)
540 			(void)bwrite(bp);
541 		else if (xfersize + blkoffset == fs->fs_bsize)
542 			bawrite(bp);
543 		else
544 			bdwrite(bp);
545 		if (error || xfersize == 0)
546 			break;
547 	}
548 
549 	error = ufs_post_write_update(vp, uio, ioflag, cred, osize, resid,
550 	    error);
551 
552 	return (error);
553 }
554 
555 static int
556 ufs_post_write_update(struct vnode *vp, struct uio *uio, int ioflag,
557     kauth_cred_t cred, off_t osize, int resid, int oerror)
558 {
559 	struct inode *ip = VTOI(vp);
560 	int error = oerror;
561 
562 	/* Trigger ctime and mtime updates, and atime if MNT_RELATIME.  */
563 	ip->i_flag |= IN_CHANGE | IN_UPDATE;
564 	if (vp->v_mount->mnt_flag & MNT_RELATIME)
565 		ip->i_flag |= IN_ACCESS;
566 
567 	/*
568 	 * If we successfully wrote any data and we are not the superuser,
569 	 * we clear the setuid and setgid bits as a precaution against
570 	 * tampering.
571 	 */
572 	if (resid > uio->uio_resid && cred) {
573 		if (ip->i_mode & ISUID) {
574 			if (kauth_authorize_vnode(cred,
575 			    KAUTH_VNODE_RETAIN_SUID, vp, NULL, EPERM) != 0) {
576 				ip->i_mode &= ~ISUID;
577 				DIP_ASSIGN(ip, mode, ip->i_mode);
578 			}
579 		}
580 
581 		if (ip->i_mode & ISGID) {
582 			if (kauth_authorize_vnode(cred,
583 			    KAUTH_VNODE_RETAIN_SGID, vp, NULL, EPERM) != 0) {
584 				ip->i_mode &= ~ISGID;
585 				DIP_ASSIGN(ip, mode, ip->i_mode);
586 			}
587 		}
588 	}
589 
590 	/*
591 	 * Update the size on disk: truncate back to original size on
592 	 * error, or reflect the new size on success.
593 	 */
594 	if (error) {
595 		(void) UFS_TRUNCATE(vp, osize, ioflag & IO_SYNC, cred);
596 		uio->uio_offset -= resid - uio->uio_resid;
597 		uio->uio_resid = resid;
598 	} else if (resid > uio->uio_resid && (ioflag & IO_SYNC) == IO_SYNC)
599 		error = UFS_UPDATE(vp, NULL, NULL, UPDATE_WAIT);
600 	else
601 		UFS_WAPBL_UPDATE(vp, NULL, NULL, 0);
602 
603 	/* Make sure the vnode uvm size matches the inode file size.  */
604 	KASSERT(vp->v_size == ip->i_size);
605 
606 	/* Write error overrides any inode update error.  */
607 	if (oerror)
608 		error = oerror;
609 	return error;
610 }
611