xref: /netbsd-src/sys/ufs/ufs/ufs_readwrite.c (revision 528ce0b18ee40383f14928382d06afd754b01561)
1 /*	$NetBSD: ufs_readwrite.c,v 1.128 2022/02/21 17:07:45 hannken Exp $	*/
2 
3 /*-
4  * Copyright (c) 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)ufs_readwrite.c	8.11 (Berkeley) 5/8/95
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(1, "$NetBSD: ufs_readwrite.c,v 1.128 2022/02/21 17:07:45 hannken Exp $");
36 
37 #define	FS			struct fs
38 #define	I_FS			i_fs
39 #define	READ			ffs_read
40 #define	READ_S			"ffs_read"
41 #define	WRITE			ffs_write
42 #define	WRITE_S			"ffs_write"
43 #define	BUFRD			ffs_bufrd
44 #define	BUFWR			ffs_bufwr
45 #define ufs_blkoff		ffs_blkoff
46 #define ufs_blksize		ffs_blksize
47 #define ufs_lblkno		ffs_lblkno
48 #define ufs_lblktosize		ffs_lblktosize
49 #define ufs_blkroundup		ffs_blkroundup
50 
51 static int	ufs_post_read_update(struct vnode *, int, int);
52 static int	ufs_post_write_update(struct vnode *, struct uio *, int,
53 		    kauth_cred_t, off_t, int, int);
54 
55 /*
56  * Vnode op for reading.
57  */
58 /* ARGSUSED */
59 int
60 READ(void *v)
61 {
62 	struct vop_read_args /* {
63 		struct vnode *a_vp;
64 		struct uio *a_uio;
65 		int a_ioflag;
66 		kauth_cred_t a_cred;
67 	} */ *ap = v;
68 	struct vnode *vp;
69 	struct inode *ip;
70 	struct uio *uio;
71 	struct ufsmount *ump;
72 	vsize_t bytelen;
73 	int error, ioflag, advice;
74 
75 	vp = ap->a_vp;
76 	ip = VTOI(vp);
77 	ump = ip->i_ump;
78 	uio = ap->a_uio;
79 	ioflag = ap->a_ioflag;
80 	error = 0;
81 
82 	KASSERT(uio->uio_rw == UIO_READ);
83 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR);
84 
85 	/* XXX Eliminate me by refusing directory reads from userland.  */
86 	if (vp->v_type == VDIR)
87 		return BUFRD(vp, uio, ioflag, ap->a_cred);
88 	if ((u_int64_t)uio->uio_offset > ump->um_maxfilesize)
89 		return (EFBIG);
90 	if (uio->uio_resid == 0)
91 		return (0);
92 
93 	if ((ip->i_flags & (SF_SNAPSHOT | SF_SNAPINVAL)) == SF_SNAPSHOT)
94 		return ffs_snapshot_read(vp, uio, ioflag);
95 
96 	if (uio->uio_offset >= ip->i_size)
97 		goto out;
98 
99 	KASSERT(vp->v_type == VREG);
100 	advice = IO_ADV_DECODE(ap->a_ioflag);
101 	while (uio->uio_resid > 0) {
102 		if (ioflag & IO_DIRECT) {
103 			genfs_directio(vp, uio, ioflag);
104 		}
105 		bytelen = MIN(ip->i_size - uio->uio_offset, uio->uio_resid);
106 		if (bytelen == 0)
107 			break;
108 		error = ubc_uiomove(&vp->v_uobj, uio, bytelen, advice,
109 		    UBC_READ | UBC_PARTIALOK | UBC_VNODE_FLAGS(vp));
110 		if (error)
111 			break;
112 	}
113 
114  out:
115 	error = ufs_post_read_update(vp, ap->a_ioflag, error);
116 	return (error);
117 }
118 
119 /*
120  * UFS op for reading via the buffer cache
121  */
122 int
123 BUFRD(struct vnode *vp, struct uio *uio, int ioflag, kauth_cred_t cred)
124 {
125 	struct inode *ip;
126 	struct ufsmount *ump;
127 	FS *fs;
128 	struct buf *bp;
129 	daddr_t lbn, nextlbn;
130 	off_t bytesinfile;
131 	long size, xfersize, blkoffset;
132 	int error;
133 
134 	KASSERT(VOP_ISLOCKED(vp));
135 	KASSERT(vp->v_type == VDIR || vp->v_type == VLNK);
136 	KASSERT(uio->uio_rw == UIO_READ);
137 
138 	ip = VTOI(vp);
139 	ump = ip->i_ump;
140 	fs = ip->I_FS;
141 	error = 0;
142 
143 	KASSERT(vp->v_type != VLNK || ip->i_size >= ump->um_maxsymlinklen);
144 	KASSERT(vp->v_type != VLNK || ump->um_maxsymlinklen != 0 ||
145 	    DIP(ip, blocks) != 0);
146 
147 	if (uio->uio_offset > ump->um_maxfilesize)
148 		return EFBIG;
149 	if (uio->uio_resid == 0)
150 		return 0;
151 
152 	KASSERT(!ISSET(ip->i_flags, (SF_SNAPSHOT | SF_SNAPINVAL)));
153 
154 	if (uio->uio_offset >= ip->i_size)
155 		goto out;
156 
157 	for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
158 		bytesinfile = ip->i_size - uio->uio_offset;
159 		if (bytesinfile <= 0)
160 			break;
161 		lbn = ufs_lblkno(fs, uio->uio_offset);
162 		nextlbn = lbn + 1;
163 		size = ufs_blksize(fs, ip, lbn);
164 		blkoffset = ufs_blkoff(fs, uio->uio_offset);
165 		xfersize = MIN(MIN(fs->fs_bsize - blkoffset, uio->uio_resid),
166 		    bytesinfile);
167 
168 		if (ufs_lblktosize(fs, nextlbn) >= ip->i_size)
169 			error = bread(vp, lbn, size, 0, &bp);
170 		else {
171 			int nextsize = ufs_blksize(fs, ip, nextlbn);
172 			error = breadn(vp, lbn,
173 			    size, &nextlbn, &nextsize, 1, 0, &bp);
174 		}
175 		if (error)
176 			break;
177 
178 		/*
179 		 * We should only get non-zero b_resid when an I/O error
180 		 * has occurred, which should cause us to break above.
181 		 * However, if the short read did not cause an error,
182 		 * then we want to ensure that we do not uiomove bad
183 		 * or uninitialized data.
184 		 */
185 		size -= bp->b_resid;
186 		if (size < xfersize) {
187 			if (size == 0)
188 				break;
189 			xfersize = size;
190 		}
191 		error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio);
192 		if (error)
193 			break;
194 		brelse(bp, 0);
195 	}
196 	if (bp != NULL)
197 		brelse(bp, 0);
198 
199  out:
200 	error = ufs_post_read_update(vp, ioflag, error);
201 	return (error);
202 }
203 
204 static int
205 ufs_post_read_update(struct vnode *vp, int ioflag, int oerror)
206 {
207 	struct inode *ip = VTOI(vp);
208 	int error = oerror;
209 
210 	if (!(vp->v_mount->mnt_flag & MNT_NOATIME)) {
211 		ip->i_flag |= IN_ACCESS;
212 		if ((ioflag & IO_SYNC) == IO_SYNC) {
213 			error = UFS_WAPBL_BEGIN(vp->v_mount);
214 			if (error)
215 				goto out;
216 			error = UFS_UPDATE(vp, NULL, NULL, UPDATE_WAIT);
217 			UFS_WAPBL_END(vp->v_mount);
218 		}
219 	}
220 
221 out:
222 	/* Read error overrides any inode update error.  */
223 	if (oerror)
224 		error = oerror;
225 	return error;
226 }
227 
228 /*
229  * Vnode op for writing.
230  */
231 int
232 WRITE(void *v)
233 {
234 	struct vop_write_args /* {
235 		struct vnode *a_vp;
236 		struct uio *a_uio;
237 		int a_ioflag;
238 		kauth_cred_t a_cred;
239 	} */ *ap = v;
240 	struct vnode *vp;
241 	struct uio *uio;
242 	struct inode *ip;
243 	FS *fs;
244 	kauth_cred_t cred;
245 	off_t osize, origoff, oldoff, preallocoff, endallocoff, nsize;
246 	int blkoffset, error, flags, ioflag, resid;
247 	int aflag;
248 	vsize_t bytelen;
249 	bool async;
250 	struct ufsmount *ump;
251 
252 	cred = ap->a_cred;
253 	ioflag = ap->a_ioflag;
254 	uio = ap->a_uio;
255 	vp = ap->a_vp;
256 	ip = VTOI(vp);
257 	ump = ip->i_ump;
258 
259 	KASSERT(vp->v_size == ip->i_size);
260 	KASSERT(uio->uio_rw == UIO_WRITE);
261 	KASSERT(vp->v_type == VREG);
262 	KASSERT(!ISSET(ioflag, IO_JOURNALLOCKED));
263 	UFS_WAPBL_JUNLOCK_ASSERT(vp->v_mount);
264 
265 	if (ioflag & IO_APPEND)
266 		uio->uio_offset = ip->i_size;
267 	if ((ip->i_flags & APPEND) && uio->uio_offset != ip->i_size)
268 		return (EPERM);
269 
270 	fs = ip->I_FS;
271 	if (uio->uio_offset < 0 ||
272 	    (u_int64_t)uio->uio_offset + uio->uio_resid > ump->um_maxfilesize)
273 		return (EFBIG);
274 	if (uio->uio_resid == 0)
275 		return (0);
276 
277 	flags = ioflag & IO_SYNC ? B_SYNC : 0;
278 	async = vp->v_mount->mnt_flag & MNT_ASYNC;
279 	origoff = uio->uio_offset;
280 	resid = uio->uio_resid;
281 	osize = ip->i_size;
282 	error = 0;
283 
284 	KASSERT(vp->v_type == VREG);
285 
286 	/*
287 	 * XXX The entire write operation must occur in a single WAPBL
288 	 * transaction because it may allocate disk blocks, if
289 	 * appending or filling holes, which is allowed to happen only
290 	 * if the write fully succeeds.
291 	 *
292 	 * If ubc_uiomove fails in the middle with EFAULT, we can clean
293 	 * up at the end with UFS_TRUNCATE.  But if the power fails in
294 	 * the middle, there would be nobody to deallocate the blocks,
295 	 * without an fsck to globally analyze the file system.
296 	 *
297 	 * If the increasingly inaccurately named WAPBL were augmented
298 	 * with rollback records for block allocations, then we could
299 	 * split this into multiple transactions and commit the
300 	 * allocations in the last one.
301 	 *
302 	 * But WAPBL doesn't have that notion now, so we'll have to
303 	 * live with gigantic transactions and WAPBL tentacles in
304 	 * genfs_getpages/putpages to cope with the possibility that
305 	 * the transaction may or may not be locked on entry to the
306 	 * page cache.
307 	 *
308 	 * And even if we added that notion to WAPBL, it wouldn't help
309 	 * us get rid of the tentacles in genfs_getpages/putpages
310 	 * because we'd have to interoperate with old implementations
311 	 * that assume they can replay the log without fsck.
312 	 */
313 	error = UFS_WAPBL_BEGIN(vp->v_mount);
314 	if (error) {
315 		return error;
316 	}
317 
318 
319 	preallocoff = round_page(ufs_blkroundup(fs, MAX(osize, uio->uio_offset)));
320 	aflag = ioflag & IO_SYNC ? B_SYNC : 0;
321 	nsize = MAX(osize, uio->uio_offset + uio->uio_resid);
322 	endallocoff = nsize - ufs_blkoff(fs, nsize);
323 
324 	/*
325 	 * if we're increasing the file size, deal with expanding
326 	 * the fragment if there is one.
327 	 */
328 
329 	if (nsize > osize && ufs_lblkno(fs, osize) < UFS_NDADDR &&
330 	    ufs_lblkno(fs, osize) != ufs_lblkno(fs, nsize) &&
331 	    ufs_blkroundup(fs, osize) != osize) {
332 		off_t eob;
333 
334 		eob = ufs_blkroundup(fs, osize);
335 		uvm_vnp_setwritesize(vp, eob);
336 		error = ufs_balloc_range(vp, osize, eob - osize, cred, aflag);
337 		if (error)
338 			goto out;
339 		if (flags & B_SYNC) {
340 			rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
341 			VOP_PUTPAGES(vp, trunc_page(osize & fs->fs_bmask),
342 			    round_page(eob),
343 			    PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED);
344 		}
345 	}
346 
347 	while (uio->uio_resid > 0) {
348 		int ubc_flags = UBC_WRITE;
349 		bool overwrite; /* if we're overwrite a whole block */
350 		off_t newoff;
351 
352 		if (ioflag & IO_DIRECT) {
353 			genfs_directio(vp, uio, ioflag | IO_JOURNALLOCKED);
354 		}
355 
356 		oldoff = uio->uio_offset;
357 		blkoffset = ufs_blkoff(fs, uio->uio_offset);
358 		bytelen = MIN(fs->fs_bsize - blkoffset, uio->uio_resid);
359 		if (bytelen == 0) {
360 			break;
361 		}
362 
363 		/*
364 		 * if we're filling in a hole, allocate the blocks now and
365 		 * initialize the pages first.  if we're extending the file,
366 		 * we can safely allocate blocks without initializing pages
367 		 * since the new blocks will be inaccessible until the write
368 		 * is complete.
369 		 */
370 		overwrite = uio->uio_offset >= preallocoff &&
371 		    uio->uio_offset < endallocoff;
372 		if (!overwrite && (vp->v_vflag & VV_MAPPED) == 0 &&
373 		    ufs_blkoff(fs, uio->uio_offset) == 0 &&
374 		    (uio->uio_offset & PAGE_MASK) == 0) {
375 			vsize_t len;
376 
377 			len = trunc_page(bytelen);
378 			len -= ufs_blkoff(fs, len);
379 			if (len > 0) {
380 				overwrite = true;
381 				bytelen = len;
382 			}
383 		}
384 
385 		newoff = oldoff + bytelen;
386 		if (vp->v_size < newoff) {
387 			uvm_vnp_setwritesize(vp, newoff);
388 		}
389 
390 		if (!overwrite) {
391 			error = ufs_balloc_range(vp, uio->uio_offset, bytelen,
392 			    cred, aflag);
393 			if (error)
394 				break;
395 		} else {
396 			genfs_node_wrlock(vp);
397 			error = GOP_ALLOC(vp, uio->uio_offset, bytelen,
398 			    aflag, cred);
399 			genfs_node_unlock(vp);
400 			if (error)
401 				break;
402 			ubc_flags |= UBC_FAULTBUSY;
403 		}
404 
405 		/*
406 		 * copy the data.
407 		 */
408 
409 		error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
410 		    IO_ADV_DECODE(ioflag), ubc_flags | UBC_VNODE_FLAGS(vp));
411 
412 		/*
413 		 * update UVM's notion of the size now that we've
414 		 * copied the data into the vnode's pages.
415 		 *
416 		 * we should update the size even when uiomove failed.
417 		 */
418 
419 		if (vp->v_size < newoff) {
420 			uvm_vnp_setsize(vp, newoff);
421 		}
422 
423 		if (error)
424 			break;
425 
426 		/*
427 		 * flush what we just wrote if necessary.
428 		 * XXXUBC simplistic async flushing.
429 		 */
430 
431 		if (!async && oldoff >> 16 != uio->uio_offset >> 16) {
432 			rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
433 			error = VOP_PUTPAGES(vp, (oldoff >> 16) << 16,
434 			    (uio->uio_offset >> 16) << 16,
435 			    PGO_CLEANIT | PGO_JOURNALLOCKED | PGO_LAZY);
436 			if (error)
437 				break;
438 		}
439 	}
440 	if (error == 0 && ioflag & IO_SYNC) {
441 		rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
442 		error = VOP_PUTPAGES(vp, trunc_page(origoff & fs->fs_bmask),
443 		    round_page(ufs_blkroundup(fs, uio->uio_offset)),
444 		    PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED);
445 	}
446 
447 out:
448 	error = ufs_post_write_update(vp, uio, ioflag, cred, osize, resid,
449 	    error);
450 	UFS_WAPBL_END(vp->v_mount);
451 
452 	return (error);
453 }
454 
455 /*
456  * UFS op for writing via the buffer cache
457  */
458 int
459 BUFWR(struct vnode *vp, struct uio *uio, int ioflag, kauth_cred_t cred)
460 {
461 	struct inode *ip;
462 	struct ufsmount *ump;
463 	FS *fs;
464 	int flags;
465 	struct buf *bp;
466 	off_t osize;
467 	int resid, xfersize, size, blkoffset;
468 	daddr_t lbn;
469 	int error;
470 
471 	KASSERT(ISSET(ioflag, IO_NODELOCKED));
472 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
473 	KASSERT(vp->v_type == VDIR || vp->v_type == VLNK);
474 	KASSERT(vp->v_type != VDIR || ISSET(ioflag, IO_SYNC));
475 	KASSERT(uio->uio_rw == UIO_WRITE);
476 	KASSERT(ISSET(ioflag, IO_JOURNALLOCKED));
477 	UFS_WAPBL_JLOCK_ASSERT(vp->v_mount);
478 
479 	ip = VTOI(vp);
480 	ump = ip->i_ump;
481 	fs = ip->I_FS;
482 
483 	KASSERT(vp->v_size == ip->i_size);
484 
485 	if (uio->uio_offset < 0 ||
486 	    uio->uio_resid > ump->um_maxfilesize ||
487 	    uio->uio_offset > (ump->um_maxfilesize - uio->uio_resid))
488 		return EFBIG;
489 	if (uio->uio_resid == 0)
490 		return 0;
491 
492 	flags = ioflag & IO_SYNC ? B_SYNC : 0;
493 	resid = uio->uio_resid;
494 	osize = ip->i_size;
495 	error = 0;
496 
497 	KASSERT(vp->v_type != VREG);
498 
499 
500 	/* XXX Should never have pages cached here.  */
501 	KASSERT(vp->v_uobj.uo_npages == 0);
502 	while (uio->uio_resid > 0) {
503 		lbn = ufs_lblkno(fs, uio->uio_offset);
504 		blkoffset = ufs_blkoff(fs, uio->uio_offset);
505 		xfersize = MIN(fs->fs_bsize - blkoffset, uio->uio_resid);
506 		if (fs->fs_bsize > xfersize)
507 			flags |= B_CLRBUF;
508 		else
509 			flags &= ~B_CLRBUF;
510 
511 		error = UFS_BALLOC(vp, uio->uio_offset, xfersize, cred, flags,
512 		    &bp);
513 
514 		if (error)
515 			break;
516 		if (uio->uio_offset + xfersize > ip->i_size) {
517 			ip->i_size = uio->uio_offset + xfersize;
518 			DIP_ASSIGN(ip, size, ip->i_size);
519 			uvm_vnp_setsize(vp, ip->i_size);
520 		}
521 		size = ufs_blksize(fs, ip, lbn) - bp->b_resid;
522 		if (xfersize > size)
523 			xfersize = size;
524 
525 		error = uiomove((char *)bp->b_data + blkoffset, xfersize, uio);
526 
527 		/*
528 		 * if we didn't clear the block and the uiomove failed,
529 		 * the buf will now contain part of some other file,
530 		 * so we need to invalidate it.
531 		 */
532 		if (error && (flags & B_CLRBUF) == 0) {
533 			brelse(bp, BC_INVAL);
534 			break;
535 		}
536 		if (ioflag & IO_SYNC)
537 			(void)bwrite(bp);
538 		else if (xfersize + blkoffset == fs->fs_bsize)
539 			bawrite(bp);
540 		else
541 			bdwrite(bp);
542 		if (error || xfersize == 0)
543 			break;
544 	}
545 
546 	error = ufs_post_write_update(vp, uio, ioflag, cred, osize, resid,
547 	    error);
548 
549 	return (error);
550 }
551 
552 static int
553 ufs_post_write_update(struct vnode *vp, struct uio *uio, int ioflag,
554     kauth_cred_t cred, off_t osize, int resid, int oerror)
555 {
556 	struct inode *ip = VTOI(vp);
557 	int error = oerror;
558 
559 	/* Trigger ctime and mtime updates, and atime if MNT_RELATIME.  */
560 	ip->i_flag |= IN_CHANGE | IN_UPDATE;
561 	if (vp->v_mount->mnt_flag & MNT_RELATIME)
562 		ip->i_flag |= IN_ACCESS;
563 
564 	/*
565 	 * If we successfully wrote any data and we are not the superuser,
566 	 * we clear the setuid and setgid bits as a precaution against
567 	 * tampering.
568 	 */
569 	if (resid > uio->uio_resid && cred) {
570 		if (ip->i_mode & ISUID) {
571 			if (kauth_authorize_vnode(cred,
572 			    KAUTH_VNODE_RETAIN_SUID, vp, NULL, EPERM) != 0) {
573 				ip->i_mode &= ~ISUID;
574 				DIP_ASSIGN(ip, mode, ip->i_mode);
575 			}
576 		}
577 
578 		if (ip->i_mode & ISGID) {
579 			if (kauth_authorize_vnode(cred,
580 			    KAUTH_VNODE_RETAIN_SGID, vp, NULL, EPERM) != 0) {
581 				ip->i_mode &= ~ISGID;
582 				DIP_ASSIGN(ip, mode, ip->i_mode);
583 			}
584 		}
585 	}
586 
587 	/*
588 	 * Update the size on disk: truncate back to original size on
589 	 * error, or reflect the new size on success.
590 	 */
591 	if (error) {
592 		(void) UFS_TRUNCATE(vp, osize, ioflag & IO_SYNC, cred);
593 		uio->uio_offset -= resid - uio->uio_resid;
594 		uio->uio_resid = resid;
595 	} else if (resid > uio->uio_resid && (ioflag & IO_SYNC) == IO_SYNC)
596 		error = UFS_UPDATE(vp, NULL, NULL, UPDATE_WAIT);
597 	else
598 		UFS_WAPBL_UPDATE(vp, NULL, NULL, 0);
599 
600 	/* Make sure the vnode uvm size matches the inode file size.  */
601 	KASSERT(vp->v_size == ip->i_size);
602 
603 	/* Write error overrides any inode update error.  */
604 	if (oerror)
605 		error = oerror;
606 	return error;
607 }
608