xref: /netbsd-src/sys/ufs/ufs/ufs_readwrite.c (revision a8c74629f602faa0ccf8a463757d7baf858bbf3a)
1 /*	$NetBSD: ufs_readwrite.c,v 1.126 2020/04/23 21:47:09 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)ufs_readwrite.c	8.11 (Berkeley) 5/8/95
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(1, "$NetBSD: ufs_readwrite.c,v 1.126 2020/04/23 21:47:09 ad Exp $");
36 
37 #define	FS			struct fs
38 #define	I_FS			i_fs
39 #define	READ			ffs_read
40 #define	READ_S			"ffs_read"
41 #define	WRITE			ffs_write
42 #define	WRITE_S			"ffs_write"
43 #define	BUFRD			ffs_bufrd
44 #define	BUFWR			ffs_bufwr
45 #define ufs_blkoff		ffs_blkoff
46 #define ufs_blksize		ffs_blksize
47 #define ufs_lblkno		ffs_lblkno
48 #define ufs_lblktosize		ffs_lblktosize
49 #define ufs_blkroundup		ffs_blkroundup
50 
51 static int	ufs_post_read_update(struct vnode *, int, int);
52 static int	ufs_post_write_update(struct vnode *, struct uio *, int,
53 		    kauth_cred_t, off_t, int, int, int);
54 
55 /*
56  * Vnode op for reading.
57  */
58 /* ARGSUSED */
59 int
60 READ(void *v)
61 {
62 	struct vop_read_args /* {
63 		struct vnode *a_vp;
64 		struct uio *a_uio;
65 		int a_ioflag;
66 		kauth_cred_t a_cred;
67 	} */ *ap = v;
68 	struct vnode *vp;
69 	struct inode *ip;
70 	struct uio *uio;
71 	struct ufsmount *ump;
72 	vsize_t bytelen;
73 	int error, ioflag, advice;
74 
75 	vp = ap->a_vp;
76 	ip = VTOI(vp);
77 	ump = ip->i_ump;
78 	uio = ap->a_uio;
79 	ioflag = ap->a_ioflag;
80 	error = 0;
81 
82 	KASSERT(uio->uio_rw == UIO_READ);
83 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR);
84 
85 	/* XXX Eliminate me by refusing directory reads from userland.  */
86 	if (vp->v_type == VDIR)
87 		return BUFRD(vp, uio, ioflag, ap->a_cred);
88 	if ((u_int64_t)uio->uio_offset > ump->um_maxfilesize)
89 		return (EFBIG);
90 	if (uio->uio_resid == 0)
91 		return (0);
92 
93 	if ((ip->i_flags & (SF_SNAPSHOT | SF_SNAPINVAL)) == SF_SNAPSHOT)
94 		return ffs_snapshot_read(vp, uio, ioflag);
95 
96 	if (uio->uio_offset >= ip->i_size)
97 		goto out;
98 
99 	KASSERT(vp->v_type == VREG);
100 	advice = IO_ADV_DECODE(ap->a_ioflag);
101 	while (uio->uio_resid > 0) {
102 		if (ioflag & IO_DIRECT) {
103 			genfs_directio(vp, uio, ioflag);
104 		}
105 		bytelen = MIN(ip->i_size - uio->uio_offset, uio->uio_resid);
106 		if (bytelen == 0)
107 			break;
108 		error = ubc_uiomove(&vp->v_uobj, uio, bytelen, advice,
109 		    UBC_READ | UBC_PARTIALOK | UBC_VNODE_FLAGS(vp));
110 		if (error)
111 			break;
112 	}
113 
114  out:
115 	error = ufs_post_read_update(vp, ap->a_ioflag, error);
116 	return (error);
117 }
118 
119 /*
120  * UFS op for reading via the buffer cache
121  */
122 int
123 BUFRD(struct vnode *vp, struct uio *uio, int ioflag, kauth_cred_t cred)
124 {
125 	struct inode *ip;
126 	struct ufsmount *ump;
127 	FS *fs;
128 	struct buf *bp;
129 	daddr_t lbn, nextlbn;
130 	off_t bytesinfile;
131 	long size, xfersize, blkoffset;
132 	int error;
133 
134 	KASSERT(VOP_ISLOCKED(vp));
135 	KASSERT(vp->v_type == VDIR || vp->v_type == VLNK);
136 	KASSERT(uio->uio_rw == UIO_READ);
137 
138 	ip = VTOI(vp);
139 	ump = ip->i_ump;
140 	fs = ip->I_FS;
141 	error = 0;
142 
143 	KASSERT(vp->v_type != VLNK || ip->i_size >= ump->um_maxsymlinklen);
144 	KASSERT(vp->v_type != VLNK || ump->um_maxsymlinklen != 0 ||
145 	    DIP(ip, blocks) == 0);
146 
147 	if (uio->uio_offset > ump->um_maxfilesize)
148 		return EFBIG;
149 	if (uio->uio_resid == 0)
150 		return 0;
151 
152 	KASSERT(!ISSET(ip->i_flags, (SF_SNAPSHOT | SF_SNAPINVAL)));
153 
154 	if (uio->uio_offset >= ip->i_size)
155 		goto out;
156 
157 	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
158 		bytesinfile = ip->i_size - uio->uio_offset;
159 		if (bytesinfile <= 0)
160 			break;
161 		lbn = ufs_lblkno(fs, uio->uio_offset);
162 		nextlbn = lbn + 1;
163 		size = ufs_blksize(fs, ip, lbn);
164 		blkoffset = ufs_blkoff(fs, uio->uio_offset);
165 		xfersize = MIN(MIN(fs->fs_bsize - blkoffset, uio->uio_resid),
166 		    bytesinfile);
167 
168 		if (ufs_lblktosize(fs, nextlbn) >= ip->i_size)
169 			error = bread(vp, lbn, size, 0, &bp);
170 		else {
171 			int nextsize = ufs_blksize(fs, ip, nextlbn);
172 			error = breadn(vp, lbn,
173 			    size, &nextlbn, &nextsize, 1, 0, &bp);
174 		}
175 		if (error)
176 			break;
177 
178 		/*
179 		 * We should only get non-zero b_resid when an I/O error
180 		 * has occurred, which should cause us to break above.
181 		 * However, if the short read did not cause an error,
182 		 * then we want to ensure that we do not uiomove bad
183 		 * or uninitialized data.
184 		 */
185 		size -= bp->b_resid;
186 		if (size < xfersize) {
187 			if (size == 0)
188 				break;
189 			xfersize = size;
190 		}
191 		error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio);
192 		if (error)
193 			break;
194 		brelse(bp, 0);
195 	}
196 	if (bp != NULL)
197 		brelse(bp, 0);
198 
199  out:
200 	error = ufs_post_read_update(vp, ioflag, error);
201 	return (error);
202 }
203 
204 static int
205 ufs_post_read_update(struct vnode *vp, int ioflag, int oerror)
206 {
207 	struct inode *ip = VTOI(vp);
208 	int error = oerror;
209 
210 	if (!(vp->v_mount->mnt_flag & MNT_NOATIME)) {
211 		ip->i_flag |= IN_ACCESS;
212 		if ((ioflag & IO_SYNC) == IO_SYNC) {
213 			error = UFS_WAPBL_BEGIN(vp->v_mount);
214 			if (error)
215 				goto out;
216 			error = UFS_UPDATE(vp, NULL, NULL, UPDATE_WAIT);
217 			UFS_WAPBL_END(vp->v_mount);
218 		}
219 	}
220 
221 out:
222 	/* Read error overrides any inode update error.  */
223 	if (oerror)
224 		error = oerror;
225 	return error;
226 }
227 
228 /*
229  * Vnode op for writing.
230  */
231 int
232 WRITE(void *v)
233 {
234 	struct vop_write_args /* {
235 		struct vnode *a_vp;
236 		struct uio *a_uio;
237 		int a_ioflag;
238 		kauth_cred_t a_cred;
239 	} */ *ap = v;
240 	struct vnode *vp;
241 	struct uio *uio;
242 	struct inode *ip;
243 	FS *fs;
244 	kauth_cred_t cred;
245 	off_t osize, origoff, oldoff, preallocoff, endallocoff, nsize;
246 	int blkoffset, error, flags, ioflag, resid;
247 	int aflag;
248 	int extended=0;
249 	vsize_t bytelen;
250 	bool async;
251 	struct ufsmount *ump;
252 
253 	cred = ap->a_cred;
254 	ioflag = ap->a_ioflag;
255 	uio = ap->a_uio;
256 	vp = ap->a_vp;
257 	ip = VTOI(vp);
258 	ump = ip->i_ump;
259 
260 	KASSERT(vp->v_size == ip->i_size);
261 	KASSERT(uio->uio_rw == UIO_WRITE);
262 	KASSERT(vp->v_type == VREG);
263 	KASSERT(!ISSET(ioflag, IO_JOURNALLOCKED));
264 	UFS_WAPBL_JUNLOCK_ASSERT(vp->v_mount);
265 
266 	if (ioflag & IO_APPEND)
267 		uio->uio_offset = ip->i_size;
268 	if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
269 		return (EPERM);
270 
271 	fs = ip->I_FS;
272 	if (uio->uio_offset < 0 ||
273 	    (u_int64_t)uio->uio_offset + uio->uio_resid > ump->um_maxfilesize)
274 		return (EFBIG);
275 	if (uio->uio_resid == 0)
276 		return (0);
277 
278 	flags = ioflag & IO_SYNC ? B_SYNC : 0;
279 	async = vp->v_mount->mnt_flag & MNT_ASYNC;
280 	origoff = uio->uio_offset;
281 	resid = uio->uio_resid;
282 	osize = ip->i_size;
283 	error = 0;
284 
285 	KASSERT(vp->v_type == VREG);
286 
287 	/*
288 	 * XXX The entire write operation must occur in a single WAPBL
289 	 * transaction because it may allocate disk blocks, if
290 	 * appending or filling holes, which is allowed to happen only
291 	 * if the write fully succeeds.
292 	 *
293 	 * If ubc_uiomove fails in the middle with EFAULT, we can clean
294 	 * up at the end with UFS_TRUNCATE.  But if the power fails in
295 	 * the middle, there would be nobody to deallocate the blocks,
296 	 * without an fsck to globally analyze the file system.
297 	 *
298 	 * If the increasingly inaccurately named WAPBL were augmented
299 	 * with rollback records for block allocations, then we could
300 	 * split this into multiple transactions and commit the
301 	 * allocations in the last one.
302 	 *
303 	 * But WAPBL doesn't have that notion now, so we'll have to
304 	 * live with gigantic transactions and WAPBL tentacles in
305 	 * genfs_getpages/putpages to cope with the possibility that
306 	 * the transaction may or may not be locked on entry to the
307 	 * page cache.
308 	 *
309 	 * And even if we added that notion to WAPBL, it wouldn't help
310 	 * us get rid of the tentacles in genfs_getpages/putpages
311 	 * because we'd have to interoperate with old implementations
312 	 * that assume they can replay the log without fsck.
313 	 */
314 	error = UFS_WAPBL_BEGIN(vp->v_mount);
315 	if (error) {
316 		return error;
317 	}
318 
319 
320 	preallocoff = round_page(ufs_blkroundup(fs, MAX(osize, uio->uio_offset)));
321 	aflag = ioflag & IO_SYNC ? B_SYNC : 0;
322 	nsize = MAX(osize, uio->uio_offset + uio->uio_resid);
323 	endallocoff = nsize - ufs_blkoff(fs, nsize);
324 
325 	/*
326 	 * if we're increasing the file size, deal with expanding
327 	 * the fragment if there is one.
328 	 */
329 
330 	if (nsize > osize && ufs_lblkno(fs, osize) < UFS_NDADDR &&
331 	    ufs_lblkno(fs, osize) != ufs_lblkno(fs, nsize) &&
332 	    ufs_blkroundup(fs, osize) != osize) {
333 		off_t eob;
334 
335 		eob = ufs_blkroundup(fs, osize);
336 		uvm_vnp_setwritesize(vp, eob);
337 		error = ufs_balloc_range(vp, osize, eob - osize, cred, aflag);
338 		if (error)
339 			goto out;
340 		if (flags & B_SYNC) {
341 			rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
342 			VOP_PUTPAGES(vp, trunc_page(osize & fs->fs_bmask),
343 			    round_page(eob),
344 			    PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED);
345 		}
346 	}
347 
348 	while (uio->uio_resid > 0) {
349 		int ubc_flags = UBC_WRITE;
350 		bool overwrite; /* if we're overwrite a whole block */
351 		off_t newoff;
352 
353 		if (ioflag & IO_DIRECT) {
354 			genfs_directio(vp, uio, ioflag | IO_JOURNALLOCKED);
355 		}
356 
357 		oldoff = uio->uio_offset;
358 		blkoffset = ufs_blkoff(fs, uio->uio_offset);
359 		bytelen = MIN(fs->fs_bsize - blkoffset, uio->uio_resid);
360 		if (bytelen == 0) {
361 			break;
362 		}
363 
364 		/*
365 		 * if we're filling in a hole, allocate the blocks now and
366 		 * initialize the pages first.  if we're extending the file,
367 		 * we can safely allocate blocks without initializing pages
368 		 * since the new blocks will be inaccessible until the write
369 		 * is complete.
370 		 */
371 		overwrite = uio->uio_offset >= preallocoff &&
372 		    uio->uio_offset < endallocoff;
373 		if (!overwrite && (vp->v_vflag & VV_MAPPED) == 0 &&
374 		    ufs_blkoff(fs, uio->uio_offset) == 0 &&
375 		    (uio->uio_offset & PAGE_MASK) == 0) {
376 			vsize_t len;
377 
378 			len = trunc_page(bytelen);
379 			len -= ufs_blkoff(fs, len);
380 			if (len > 0) {
381 				overwrite = true;
382 				bytelen = len;
383 			}
384 		}
385 
386 		newoff = oldoff + bytelen;
387 		if (vp->v_size < newoff) {
388 			uvm_vnp_setwritesize(vp, newoff);
389 		}
390 
391 		if (!overwrite) {
392 			error = ufs_balloc_range(vp, uio->uio_offset, bytelen,
393 			    cred, aflag);
394 			if (error)
395 				break;
396 		} else {
397 			genfs_node_wrlock(vp);
398 			error = GOP_ALLOC(vp, uio->uio_offset, bytelen,
399 			    aflag, cred);
400 			genfs_node_unlock(vp);
401 			if (error)
402 				break;
403 			ubc_flags |= UBC_FAULTBUSY;
404 		}
405 
406 		/*
407 		 * copy the data.
408 		 */
409 
410 		error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
411 		    IO_ADV_DECODE(ioflag), ubc_flags | UBC_VNODE_FLAGS(vp));
412 
413 		/*
414 		 * update UVM's notion of the size now that we've
415 		 * copied the data into the vnode's pages.
416 		 *
417 		 * we should update the size even when uiomove failed.
418 		 */
419 
420 		if (vp->v_size < newoff) {
421 			uvm_vnp_setsize(vp, newoff);
422 			extended = 1;
423 		}
424 
425 		if (error)
426 			break;
427 
428 		/*
429 		 * flush what we just wrote if necessary.
430 		 * XXXUBC simplistic async flushing.
431 		 */
432 
433 		if (!async && oldoff >> 16 != uio->uio_offset >> 16) {
434 			rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
435 			error = VOP_PUTPAGES(vp, (oldoff >> 16) << 16,
436 			    (uio->uio_offset >> 16) << 16,
437 			    PGO_CLEANIT | PGO_JOURNALLOCKED | PGO_LAZY);
438 			if (error)
439 				break;
440 		}
441 	}
442 	if (error == 0 && ioflag & IO_SYNC) {
443 		rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
444 		error = VOP_PUTPAGES(vp, trunc_page(origoff & fs->fs_bmask),
445 		    round_page(ufs_blkroundup(fs, uio->uio_offset)),
446 		    PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED);
447 	}
448 
449 out:
450 	error = ufs_post_write_update(vp, uio, ioflag, cred, osize, resid,
451 	    extended, error);
452 	UFS_WAPBL_END(vp->v_mount);
453 
454 	return (error);
455 }
456 
457 /*
458  * UFS op for writing via the buffer cache
459  */
460 int
461 BUFWR(struct vnode *vp, struct uio *uio, int ioflag, kauth_cred_t cred)
462 {
463 	struct inode *ip;
464 	struct ufsmount *ump;
465 	FS *fs;
466 	int flags;
467 	struct buf *bp;
468 	off_t osize;
469 	int resid, xfersize, size, blkoffset;
470 	daddr_t lbn;
471 	int extended=0;
472 	int error;
473 
474 	KASSERT(ISSET(ioflag, IO_NODELOCKED));
475 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
476 	KASSERT(vp->v_type == VDIR || vp->v_type == VLNK);
477 	KASSERT(vp->v_type != VDIR || ISSET(ioflag, IO_SYNC));
478 	KASSERT(uio->uio_rw == UIO_WRITE);
479 	KASSERT(ISSET(ioflag, IO_JOURNALLOCKED));
480 	UFS_WAPBL_JLOCK_ASSERT(vp->v_mount);
481 
482 	ip = VTOI(vp);
483 	ump = ip->i_ump;
484 	fs = ip->I_FS;
485 
486 	KASSERT(vp->v_size == ip->i_size);
487 
488 	if (uio->uio_offset < 0 ||
489 	    uio->uio_resid > ump->um_maxfilesize ||
490 	    uio->uio_offset > (ump->um_maxfilesize - uio->uio_resid))
491 		return EFBIG;
492 	if (uio->uio_resid == 0)
493 		return 0;
494 
495 	flags = ioflag & IO_SYNC ? B_SYNC : 0;
496 	resid = uio->uio_resid;
497 	osize = ip->i_size;
498 	error = 0;
499 
500 	KASSERT(vp->v_type != VREG);
501 
502 
503 	/* XXX Should never have pages cached here.  */
504 	KASSERT(vp->v_uobj.uo_npages == 0);
505 	while (uio->uio_resid > 0) {
506 		lbn = ufs_lblkno(fs, uio->uio_offset);
507 		blkoffset = ufs_blkoff(fs, uio->uio_offset);
508 		xfersize = MIN(fs->fs_bsize - blkoffset, uio->uio_resid);
509 		if (fs->fs_bsize > xfersize)
510 			flags |= B_CLRBUF;
511 		else
512 			flags &= ~B_CLRBUF;
513 
514 		error = UFS_BALLOC(vp, uio->uio_offset, xfersize, cred, flags,
515 		    &bp);
516 
517 		if (error)
518 			break;
519 		if (uio->uio_offset + xfersize > ip->i_size) {
520 			ip->i_size = uio->uio_offset + xfersize;
521 			DIP_ASSIGN(ip, size, ip->i_size);
522 			uvm_vnp_setsize(vp, ip->i_size);
523 			extended = 1;
524 		}
525 		size = ufs_blksize(fs, ip, lbn) - bp->b_resid;
526 		if (xfersize > size)
527 			xfersize = size;
528 
529 		error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio);
530 
531 		/*
532 		 * if we didn't clear the block and the uiomove failed,
533 		 * the buf will now contain part of some other file,
534 		 * so we need to invalidate it.
535 		 */
536 		if (error && (flags & B_CLRBUF) == 0) {
537 			brelse(bp, BC_INVAL);
538 			break;
539 		}
540 		if (ioflag & IO_SYNC)
541 			(void)bwrite(bp);
542 		else if (xfersize + blkoffset == fs->fs_bsize)
543 			bawrite(bp);
544 		else
545 			bdwrite(bp);
546 		if (error || xfersize == 0)
547 			break;
548 	}
549 
550 	error = ufs_post_write_update(vp, uio, ioflag, cred, osize, resid,
551 	    extended, error);
552 
553 	return (error);
554 }
555 
556 static int
557 ufs_post_write_update(struct vnode *vp, struct uio *uio, int ioflag,
558     kauth_cred_t cred, off_t osize, int resid, int extended, int oerror)
559 {
560 	struct inode *ip = VTOI(vp);
561 	int error = oerror;
562 
563 	/* Trigger ctime and mtime updates, and atime if MNT_RELATIME.  */
564 	ip->i_flag |= IN_CHANGE | IN_UPDATE;
565 	if (vp->v_mount->mnt_flag & MNT_RELATIME)
566 		ip->i_flag |= IN_ACCESS;
567 
568 	/*
569 	 * If we successfully wrote any data and we are not the superuser,
570 	 * we clear the setuid and setgid bits as a precaution against
571 	 * tampering.
572 	 */
573 	if (resid > uio->uio_resid && cred) {
574 		if (ip->i_mode & ISUID) {
575 			if (kauth_authorize_vnode(cred,
576 			    KAUTH_VNODE_RETAIN_SUID, vp, NULL, EPERM) != 0) {
577 				ip->i_mode &= ~ISUID;
578 				DIP_ASSIGN(ip, mode, ip->i_mode);
579 			}
580 		}
581 
582 		if (ip->i_mode & ISGID) {
583 			if (kauth_authorize_vnode(cred,
584 			    KAUTH_VNODE_RETAIN_SGID, vp, NULL, EPERM) != 0) {
585 				ip->i_mode &= ~ISGID;
586 				DIP_ASSIGN(ip, mode, ip->i_mode);
587 			}
588 		}
589 	}
590 
591 	/* If we successfully wrote anything, notify kevent listeners.  */
592 	if (resid > uio->uio_resid)
593 		VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
594 
595 	/*
596 	 * Update the size on disk: truncate back to original size on
597 	 * error, or reflect the new size on success.
598 	 */
599 	if (error) {
600 		(void) UFS_TRUNCATE(vp, osize, ioflag & IO_SYNC, cred);
601 		uio->uio_offset -= resid - uio->uio_resid;
602 		uio->uio_resid = resid;
603 	} else if (resid > uio->uio_resid && (ioflag & IO_SYNC) == IO_SYNC)
604 		error = UFS_UPDATE(vp, NULL, NULL, UPDATE_WAIT);
605 	else
606 		UFS_WAPBL_UPDATE(vp, NULL, NULL, 0);
607 
608 	/* Make sure the vnode uvm size matches the inode file size.  */
609 	KASSERT(vp->v_size == ip->i_size);
610 
611 	/* Write error overrides any inode update error.  */
612 	if (oerror)
613 		error = oerror;
614 	return error;
615 }
616