1 /* 2 * Copyright (c) 2000-2001, Boris Popov 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Boris Popov. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD: src/sys/fs/smbfs/smbfs_io.c,v 1.3.2.3 2003/01/17 08:20:26 tjr Exp $ 33 * $DragonFly: src/sys/vfs/smbfs/smbfs_io.c,v 1.22 2006/03/24 18:35:34 dillon Exp $ 34 * 35 */ 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/resourcevar.h> /* defines plimit structure in proc struct */ 39 #include <sys/kernel.h> 40 #include <sys/proc.h> 41 #include <sys/fcntl.h> 42 #include <sys/mount.h> 43 #include <sys/namei.h> 44 #include <sys/vnode.h> 45 #include <sys/dirent.h> 46 #include <sys/signalvar.h> 47 #include <sys/sysctl.h> 48 49 #include <machine/limits.h> 50 51 #include <vm/vm.h> 52 #include <vm/vm_page.h> 53 #include <vm/vm_extern.h> 54 #include <vm/vm_object.h> 55 #include <vm/vm_pager.h> 56 #include <vm/vnode_pager.h> 57 /* 58 #include <sys/ioccom.h> 59 */ 60 #include <netproto/smb/smb.h> 61 #include <netproto/smb/smb_conn.h> 62 #include <netproto/smb/smb_subr.h> 63 64 #include "smbfs.h" 65 #include "smbfs_node.h" 66 #include "smbfs_subr.h" 67 68 #include <sys/buf.h> 69 70 #include <sys/thread2.h> 71 72 /*#define SMBFS_RWGENERIC*/ 73 74 extern int smbfs_pbuf_freecnt; 75 76 static int smbfs_fastlookup = 1; 77 78 SYSCTL_DECL(_vfs_smbfs); 79 SYSCTL_INT(_vfs_smbfs, OID_AUTO, fastlookup, CTLFLAG_RW, &smbfs_fastlookup, 0, ""); 80 81 static int 82 smbfs_readvdir(struct vnode *vp, struct uio *uio, struct ucred *cred) 83 { 84 struct smb_cred scred; 85 struct smbfs_fctx *ctx; 86 struct vnode *newvp; 87 struct smbnode *np = VTOSMB(vp); 88 int error, offset, retval/*, *eofflag = ap->a_eofflag*/; 89 90 np = VTOSMB(vp); 91 SMBVDEBUG("dirname='%s'\n", np->n_name); 92 smb_makescred(&scred, uio->uio_td, cred); 93 94 if (uio->uio_resid < 0 || uio->uio_offset < 0 || 95 uio->uio_offset > INT_MAX) 96 return(EINVAL); 97 98 error = 0; 99 offset = uio->uio_offset; 100 101 if (uio->uio_resid > 0 && offset < 1) { 102 if (vop_write_dirent(&error, uio, np->n_ino, DT_DIR, 1, ".")) 103 goto done; 104 if (error) 105 goto done; 106 ++offset; 107 } 108 109 if (uio->uio_resid > 0 && offset < 2) { 110 if (vop_write_dirent(&error, uio, 111 np->n_parent ? VTOSMB(np->n_parent)->n_ino : 2, 112 DT_DIR, 2, "..")) 113 goto done; 114 if (error) 115 goto done; 116 ++offset; 117 } 118 119 if (uio->uio_resid == 0) 120 goto done; 121 122 if (offset != np->n_dirofs || np->n_dirseq == NULL) { 123 SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs); 124 if (np->n_dirseq) { 125 smbfs_findclose(np->n_dirseq, &scred); 126 np->n_dirseq = NULL; 127 } 128 np->n_dirofs = 2; 129 error = smbfs_findopen(np, "*", 1, 130 SMB_FA_SYSTEM | SMB_FA_HIDDEN | SMB_FA_DIR, 131 &scred, &ctx); 132 if (error) { 133 SMBVDEBUG("can not open search, error = %d", error); 134 return error; 135 } 136 np->n_dirseq = ctx; 137 } else { 138 ctx = np->n_dirseq; 139 } 140 while (np->n_dirofs < offset) { 141 error = smbfs_findnext(ctx, offset - np->n_dirofs, &scred); 142 ++np->n_dirofs; 143 if (error) { 144 smbfs_findclose(np->n_dirseq, &scred); 145 np->n_dirseq = NULL; 146 return error == ENOENT ? 0 : error; 147 } 148 } 149 error = 0; 150 while (uio->uio_resid > 0 && !error) { 151 /* 152 * Overestimate the size of a record a bit, doesn't really 153 * hurt to be wrong here. 154 */ 155 error = smbfs_findnext(ctx, uio->uio_resid / _DIRENT_RECLEN(255) + 1, &scred); 156 if (error) 157 break; 158 np->n_dirofs++; 159 ++offset; 160 161 retval = vop_write_dirent(&error, uio, ctx->f_attr.fa_ino, 162 (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG, 163 ctx->f_nmlen, ctx->f_name); 164 if (retval) 165 break; 166 if (smbfs_fastlookup && !error) { 167 error = smbfs_nget(vp->v_mount, vp, ctx->f_name, 168 ctx->f_nmlen, &ctx->f_attr, &newvp); 169 if (!error) 170 vput(newvp); 171 } 172 } 173 if (error == ENOENT) 174 error = 0; 175 done: 176 uio->uio_offset = offset; 177 return error; 178 } 179 180 int 181 smbfs_readvnode(struct vnode *vp, struct uio *uiop, struct ucred *cred) 182 { 183 struct thread *td; 184 struct smbmount *smp = VFSTOSMBFS(vp->v_mount); 185 struct smbnode *np = VTOSMB(vp); 186 struct vattr vattr; 187 struct smb_cred scred; 188 int error, lks; 189 190 /* 191 * Protect against method which is not supported for now 192 */ 193 if (uiop->uio_segflg == UIO_NOCOPY) 194 return EOPNOTSUPP; 195 196 if (vp->v_type != VREG && vp->v_type != VDIR) { 197 SMBFSERR("vn types other than VREG or VDIR are unsupported !\n"); 198 return EIO; 199 } 200 if (uiop->uio_resid == 0) 201 return 0; 202 if (uiop->uio_offset < 0) 203 return EINVAL; 204 /* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize) 205 return EFBIG;*/ 206 td = uiop->uio_td; 207 if (vp->v_type == VDIR) { 208 lks = LK_EXCLUSIVE;/*lockstatus(&vp->v_lock, td);*/ 209 if (lks == LK_SHARED) 210 vn_lock(vp, LK_UPGRADE | LK_RETRY, td); 211 error = smbfs_readvdir(vp, uiop, cred); 212 if (lks == LK_SHARED) 213 vn_lock(vp, LK_DOWNGRADE | LK_RETRY, td); 214 return error; 215 } 216 217 /* biosize = SSTOCN(smp->sm_share)->sc_txmax;*/ 218 if (np->n_flag & NMODIFIED) { 219 smbfs_attr_cacheremove(vp); 220 error = VOP_GETATTR(vp, &vattr, td); 221 if (error) 222 return error; 223 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec; 224 } else { 225 error = VOP_GETATTR(vp, &vattr, td); 226 if (error) 227 return error; 228 if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) { 229 error = smbfs_vinvalbuf(vp, V_SAVE, td, 1); 230 if (error) 231 return error; 232 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec; 233 } 234 } 235 smb_makescred(&scred, td, cred); 236 return smb_read(smp->sm_share, np->n_fid, uiop, &scred); 237 } 238 239 int 240 smbfs_writevnode(struct vnode *vp, struct uio *uiop, 241 struct ucred *cred, int ioflag) 242 { 243 struct thread *td; 244 struct smbmount *smp = VTOSMBFS(vp); 245 struct smbnode *np = VTOSMB(vp); 246 struct smb_cred scred; 247 int error = 0; 248 249 if (vp->v_type != VREG) { 250 SMBERROR("vn types other than VREG unsupported !\n"); 251 return EIO; 252 } 253 SMBVDEBUG("ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid); 254 if (uiop->uio_offset < 0) 255 return EINVAL; 256 /* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize) 257 return (EFBIG);*/ 258 td = uiop->uio_td; 259 if (ioflag & (IO_APPEND | IO_SYNC)) { 260 if (np->n_flag & NMODIFIED) { 261 smbfs_attr_cacheremove(vp); 262 error = smbfs_vinvalbuf(vp, V_SAVE, td, 1); 263 if (error) 264 return error; 265 } 266 if (ioflag & IO_APPEND) { 267 #if notyet 268 /* 269 * File size can be changed by another client 270 */ 271 smbfs_attr_cacheremove(vp); 272 error = VOP_GETATTR(vp, &vattr, td); 273 if (error) return (error); 274 #endif 275 uiop->uio_offset = np->n_size; 276 } 277 } 278 if (uiop->uio_resid == 0) 279 return 0; 280 if (td->td_proc && 281 uiop->uio_offset + uiop->uio_resid > 282 td->td_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) { 283 psignal(td->td_proc, SIGXFSZ); 284 return EFBIG; 285 } 286 smb_makescred(&scred, td, cred); 287 error = smb_write(smp->sm_share, np->n_fid, uiop, &scred); 288 SMBVDEBUG("after: ofs=%d,resid=%d\n",(int)uiop->uio_offset, uiop->uio_resid); 289 if (!error) { 290 if (uiop->uio_offset > np->n_size) { 291 np->n_size = uiop->uio_offset; 292 vnode_pager_setsize(vp, np->n_size); 293 } 294 } 295 return error; 296 } 297 298 /* 299 * Do an I/O operation to/from a cache block. 300 */ 301 int 302 smbfs_doio(struct vnode *vp, struct bio *bio, struct ucred *cr, struct thread *td) 303 { 304 struct buf *bp = bio->bio_buf; 305 struct smbmount *smp = VFSTOSMBFS(vp->v_mount); 306 struct smbnode *np = VTOSMB(vp); 307 struct uio uio, *uiop = &uio; 308 struct iovec io; 309 struct smb_cred scred; 310 int error = 0; 311 312 uiop->uio_iov = &io; 313 uiop->uio_iovcnt = 1; 314 uiop->uio_segflg = UIO_SYSSPACE; 315 uiop->uio_td = td; 316 317 smb_makescred(&scred, td, cr); 318 319 if (bp->b_flags & B_READ) { 320 io.iov_len = uiop->uio_resid = bp->b_bcount; 321 io.iov_base = bp->b_data; 322 uiop->uio_rw = UIO_READ; 323 switch (vp->v_type) { 324 case VREG: 325 uiop->uio_offset = bio->bio_offset; 326 error = smb_read(smp->sm_share, np->n_fid, uiop, &scred); 327 if (error) 328 break; 329 if (uiop->uio_resid) { 330 int left = uiop->uio_resid; 331 int nread = bp->b_bcount - left; 332 if (left > 0) 333 bzero((char *)bp->b_data + nread, left); 334 } 335 break; 336 default: 337 printf("smbfs_doio: type %x unexpected\n",vp->v_type); 338 break; 339 }; 340 if (error) { 341 bp->b_error = error; 342 bp->b_flags |= B_ERROR; 343 } 344 } else { /* write */ 345 if (bio->bio_offset + bp->b_dirtyend > np->n_size) 346 bp->b_dirtyend = np->n_size - bio->bio_offset; 347 348 if (bp->b_dirtyend > bp->b_dirtyoff) { 349 io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff; 350 uiop->uio_offset = bio->bio_offset + bp->b_dirtyoff; 351 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; 352 uiop->uio_rw = UIO_WRITE; 353 error = smb_write(smp->sm_share, np->n_fid, uiop, &scred); 354 355 /* 356 * For an interrupted write, the buffer is still valid 357 * and the write hasn't been pushed to the server yet, 358 * so we can't set BIO_ERROR and report the interruption 359 * by setting B_EINTR. For the B_ASYNC case, B_EINTR 360 * is not relevant, so the rpc attempt is essentially 361 * a noop. For the case of a V3 write rpc not being 362 * committed to stable storage, the block is still 363 * dirty and requires either a commit rpc or another 364 * write rpc with iomode == NFSV3WRITE_FILESYNC before 365 * the block is reused. This is indicated by setting 366 * the B_DELWRI and B_NEEDCOMMIT flags. 367 */ 368 if (error == EINTR 369 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { 370 371 crit_enter(); 372 bp->b_flags &= ~(B_INVAL|B_NOCACHE); 373 if ((bp->b_flags & B_ASYNC) == 0) 374 bp->b_flags |= B_EINTR; 375 if ((bp->b_flags & B_PAGING) == 0) { 376 bdirty(bp); 377 bp->b_flags &= ~B_DONE; 378 } 379 if ((bp->b_flags & B_ASYNC) == 0) 380 bp->b_flags |= B_EINTR; 381 crit_exit(); 382 } else { 383 if (error) { 384 bp->b_flags |= B_ERROR; 385 bp->b_error = error; 386 } 387 bp->b_dirtyoff = bp->b_dirtyend = 0; 388 } 389 } else { 390 bp->b_resid = 0; 391 biodone(bio); 392 return 0; 393 } 394 } 395 bp->b_resid = uiop->uio_resid; 396 biodone(bio); 397 return error; 398 } 399 400 /* 401 * Vnode op for VM getpages. 402 * Wish wish .... get rid from multiple IO routines 403 * 404 * smbfs_getpages(struct vnode *a_vp, vm_page_t *a_m, int a_count, 405 * int a_reqpage, vm_ooffset_t a_offset) 406 */ 407 int 408 smbfs_getpages(struct vop_getpages_args *ap) 409 { 410 #ifdef SMBFS_RWGENERIC 411 return vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, 412 ap->a_reqpage); 413 #else 414 int i, error, nextoff, size, toff, npages, count; 415 int doclose; 416 struct uio uio; 417 struct iovec iov; 418 vm_offset_t kva; 419 struct buf *bp; 420 struct vnode *vp; 421 struct thread *td = curthread; /* XXX */ 422 struct ucred *cred; 423 struct smbmount *smp; 424 struct smbnode *np; 425 struct smb_cred scred; 426 vm_page_t *pages; 427 428 KKASSERT(td->td_proc); 429 430 vp = ap->a_vp; 431 cred = td->td_proc->p_ucred; 432 np = VTOSMB(vp); 433 smp = VFSTOSMBFS(vp->v_mount); 434 pages = ap->a_m; 435 count = ap->a_count; 436 437 if (vp->v_object == NULL) { 438 printf("smbfs_getpages: called with non-merged cache vnode??\n"); 439 return VM_PAGER_ERROR; 440 } 441 smb_makescred(&scred, td, cred); 442 443 bp = getpbuf(&smbfs_pbuf_freecnt); 444 npages = btoc(count); 445 kva = (vm_offset_t) bp->b_data; 446 pmap_qenter(kva, pages, npages); 447 448 iov.iov_base = (caddr_t) kva; 449 iov.iov_len = count; 450 uio.uio_iov = &iov; 451 uio.uio_iovcnt = 1; 452 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 453 uio.uio_resid = count; 454 uio.uio_segflg = UIO_SYSSPACE; 455 uio.uio_rw = UIO_READ; 456 uio.uio_td = td; 457 458 /* 459 * This is kinda nasty. Since smbfs is physically closing the 460 * fid on close(), we have to reopen it if necessary. There are 461 * other races here too, such as if another process opens the same 462 * file while we are blocked in read. XXX 463 */ 464 error = 0; 465 doclose = 0; 466 if (np->n_opencount == 0) { 467 error = smbfs_smb_open(np, SMB_AM_OPENREAD, &scred); 468 if (error == 0) 469 doclose = 1; 470 } 471 if (error == 0) 472 error = smb_read(smp->sm_share, np->n_fid, &uio, &scred); 473 if (doclose) 474 smbfs_smb_close(smp->sm_share, np->n_fid, NULL, &scred); 475 pmap_qremove(kva, npages); 476 477 relpbuf(bp, &smbfs_pbuf_freecnt); 478 479 if (error && (uio.uio_resid == count)) { 480 printf("smbfs_getpages: error %d\n",error); 481 for (i = 0; i < npages; i++) { 482 if (ap->a_reqpage != i) 483 vnode_pager_freepage(pages[i]); 484 } 485 return VM_PAGER_ERROR; 486 } 487 488 size = count - uio.uio_resid; 489 490 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { 491 vm_page_t m; 492 nextoff = toff + PAGE_SIZE; 493 m = pages[i]; 494 495 m->flags &= ~PG_ZERO; 496 497 if (nextoff <= size) { 498 m->valid = VM_PAGE_BITS_ALL; 499 m->dirty = 0; 500 } else { 501 int nvalid = ((size + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1); 502 vm_page_set_validclean(m, 0, nvalid); 503 } 504 505 if (i != ap->a_reqpage) { 506 /* 507 * Whether or not to leave the page activated is up in 508 * the air, but we should put the page on a page queue 509 * somewhere (it already is in the object). Result: 510 * It appears that emperical results show that 511 * deactivating pages is best. 512 */ 513 514 /* 515 * Just in case someone was asking for this page we 516 * now tell them that it is ok to use. 517 */ 518 if (!error) { 519 if (m->flags & PG_WANTED) 520 vm_page_activate(m); 521 else 522 vm_page_deactivate(m); 523 vm_page_wakeup(m); 524 } else { 525 vnode_pager_freepage(m); 526 } 527 } 528 } 529 return 0; 530 #endif /* SMBFS_RWGENERIC */ 531 } 532 533 /* 534 * Vnode op for VM putpages. 535 * possible bug: all IO done in sync mode 536 * Note that vop_close always invalidate pages before close, so it's 537 * not necessary to open vnode. 538 * 539 * smbfs_putpages(struct vnode *a_vp, vm_page_t *a_m, int a_count, int a_sync, 540 * int *a_rtvals, vm_ooffset_t a_offset) 541 */ 542 int 543 smbfs_putpages(struct vop_putpages_args *ap) 544 { 545 int error; 546 struct vnode *vp = ap->a_vp; 547 struct thread *td = curthread; /* XXX */ 548 struct ucred *cred; 549 550 #ifdef SMBFS_RWGENERIC 551 KKASSERT(td->td_proc); 552 cred = td->td_proc->p_ucred; 553 VOP_OPEN(vp, FWRITE, cred, NULL, td); 554 error = vnode_pager_generic_putpages(ap->a_vp, ap->a_m, ap->a_count, 555 ap->a_sync, ap->a_rtvals); 556 VOP_CLOSE(vp, FWRITE, cred, td); 557 return error; 558 #else 559 struct uio uio; 560 struct iovec iov; 561 vm_offset_t kva; 562 struct buf *bp; 563 int i, npages, count; 564 int doclose; 565 int *rtvals; 566 struct smbmount *smp; 567 struct smbnode *np; 568 struct smb_cred scred; 569 vm_page_t *pages; 570 571 KKASSERT(td->td_proc); 572 cred = td->td_proc->p_ucred; 573 /* VOP_OPEN(vp, FWRITE, cred, td);*/ 574 np = VTOSMB(vp); 575 smp = VFSTOSMBFS(vp->v_mount); 576 pages = ap->a_m; 577 count = ap->a_count; 578 rtvals = ap->a_rtvals; 579 npages = btoc(count); 580 581 for (i = 0; i < npages; i++) { 582 rtvals[i] = VM_PAGER_AGAIN; 583 } 584 585 bp = getpbuf(&smbfs_pbuf_freecnt); 586 kva = (vm_offset_t) bp->b_data; 587 pmap_qenter(kva, pages, npages); 588 589 iov.iov_base = (caddr_t) kva; 590 iov.iov_len = count; 591 uio.uio_iov = &iov; 592 uio.uio_iovcnt = 1; 593 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); 594 uio.uio_resid = count; 595 uio.uio_segflg = UIO_SYSSPACE; 596 uio.uio_rw = UIO_WRITE; 597 uio.uio_td = td; 598 SMBVDEBUG("ofs=%d,resid=%d\n",(int)uio.uio_offset, uio.uio_resid); 599 600 smb_makescred(&scred, td, cred); 601 602 /* 603 * This is kinda nasty. Since smbfs is physically closing the 604 * fid on close(), we have to reopen it if necessary. There are 605 * other races here too, such as if another process opens the same 606 * file while we are blocked in read, or the file is open read-only 607 * XXX 608 */ 609 error = 0; 610 doclose = 0; 611 if (np->n_opencount == 0) { 612 error = smbfs_smb_open(np, SMB_AM_OPENRW, &scred); 613 if (error == 0) 614 doclose = 1; 615 } 616 if (error == 0) 617 error = smb_write(smp->sm_share, np->n_fid, &uio, &scred); 618 if (doclose) 619 smbfs_smb_close(smp->sm_share, np->n_fid, NULL, &scred); 620 /* VOP_CLOSE(vp, FWRITE, cred, td);*/ 621 SMBVDEBUG("paged write done: %d\n", error); 622 623 pmap_qremove(kva, npages); 624 relpbuf(bp, &smbfs_pbuf_freecnt); 625 626 if (!error) { 627 int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE; 628 for (i = 0; i < nwritten; i++) { 629 rtvals[i] = VM_PAGER_OK; 630 pages[i]->dirty = 0; 631 } 632 } 633 return rtvals[0]; 634 #endif /* SMBFS_RWGENERIC */ 635 } 636 637 /* 638 * Flush and invalidate all dirty buffers. If another process is already 639 * doing the flush, just wait for completion. 640 */ 641 int 642 smbfs_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg) 643 { 644 struct smbnode *np = VTOSMB(vp); 645 int error = 0, slpflag, slptimeo; 646 647 if (vp->v_flag & VRECLAIMED) 648 return 0; 649 if (intrflg) { 650 slpflag = PCATCH; 651 slptimeo = 2 * hz; 652 } else { 653 slpflag = 0; 654 slptimeo = 0; 655 } 656 while (np->n_flag & NFLUSHINPROG) { 657 np->n_flag |= NFLUSHWANT; 658 error = tsleep((caddr_t)&np->n_flag, 0, "smfsvinv", slptimeo); 659 error = smb_proc_intr(td); 660 if (error == EINTR && intrflg) 661 return EINTR; 662 } 663 np->n_flag |= NFLUSHINPROG; 664 error = vinvalbuf(vp, flags, td, slpflag, 0); 665 while (error) { 666 if (intrflg && (error == ERESTART || error == EINTR)) { 667 np->n_flag &= ~NFLUSHINPROG; 668 if (np->n_flag & NFLUSHWANT) { 669 np->n_flag &= ~NFLUSHWANT; 670 wakeup((caddr_t)&np->n_flag); 671 } 672 return EINTR; 673 } 674 error = vinvalbuf(vp, flags, td, slpflag, 0); 675 } 676 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG); 677 if (np->n_flag & NFLUSHWANT) { 678 np->n_flag &= ~NFLUSHWANT; 679 wakeup((caddr_t)&np->n_flag); 680 } 681 return (error); 682 } 683