1 /* $NetBSD: nfs_commonkrpc.c,v 1.2 2016/12/13 22:31:51 pgoyette Exp $ */ 2 /*- 3 * Copyright (c) 1989, 1991, 1993, 1995 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * Rick Macklem at The University of Guelph. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 */ 34 35 #include <sys/cdefs.h> 36 /* __FBSDID("FreeBSD: head/sys/fs/nfs/nfs_commonkrpc.c 304026 2016-08-12 22:44:59Z rmacklem "); */ 37 __RCSID("$NetBSD: nfs_commonkrpc.c,v 1.2 2016/12/13 22:31:51 pgoyette Exp $"); 38 39 /* 40 * Socket operations for use by nfs 41 */ 42 43 #ifdef _KERNEL_OPT 44 #include "opt_dtrace.h" 45 #include "opt_newnfs.h" 46 #if 0 47 #include "opt_kgssapi.h" 48 #endif 49 #endif 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/kernel.h> 54 #include <sys/limits.h> 55 #include <sys/lock.h> 56 #include <sys/malloc.h> 57 #include <sys/mbuf.h> 58 #include <sys/mount.h> 59 #include <sys/mutex.h> 60 #include <sys/proc.h> 61 #include <sys/signalvar.h> 62 #include <sys/syscallsubr.h> 63 #include <sys/sysctl.h> 64 #include <sys/syslog.h> 65 #include <sys/vnode.h> 66 67 #include <rpc/rpc.h> 68 #include <fs/nfs/common/krpc.h> 69 70 #include <kgssapi/krb5/kcrypto.h> 71 72 #include <fs/nfs/common/nfsport.h> 73 74 #ifdef KDTRACE_HOOKS 75 #include <sys/dtrace_bsd.h> 76 77 dtrace_nfsclient_nfs23_start_probe_func_t 78 dtrace_nfscl_nfs234_start_probe; 79 80 dtrace_nfsclient_nfs23_done_probe_func_t 81 dtrace_nfscl_nfs234_done_probe; 82 83 /* 84 * Registered probes by RPC type. 85 */ 86 uint32_t nfscl_nfs2_start_probes[NFSV41_NPROCS + 1]; 87 uint32_t nfscl_nfs2_done_probes[NFSV41_NPROCS + 1]; 88 89 uint32_t nfscl_nfs3_start_probes[NFSV41_NPROCS + 1]; 90 uint32_t nfscl_nfs3_done_probes[NFSV41_NPROCS + 1]; 91 92 uint32_t nfscl_nfs4_start_probes[NFSV41_NPROCS + 1]; 93 uint32_t nfscl_nfs4_done_probes[NFSV41_NPROCS + 1]; 94 #endif 95 96 NFSSTATESPINLOCK; 97 NFSREQSPINLOCK; 98 NFSDLOCKMUTEX; 99 extern struct nfsstatsv1 nfsstatsv1; 100 extern struct nfsreqhead nfsd_reqq; 101 extern int nfscl_ticks; 102 extern void (*ncl_call_invalcaches)(struct vnode *); 103 extern int nfs_numnfscbd; 104 extern int nfscl_debuglevel; 105 106 SVCPOOL *nfscbd_pool; 107 static int nfsrv_gsscallbackson = 0; 108 static int nfs_bufpackets = 4; 109 static int nfs_reconnects; 110 static int nfs3_jukebox_delay = 10; 111 static int nfs_skip_wcc_data_onerr = 1; 112 113 SYSCTL_DECL(_vfs_nfs); 114 115 SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, 116 "Buffer reservation size 2 < x < 64"); 117 SYSCTL_INT(_vfs_nfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0, 118 "Number of times the nfs client has had to reconnect"); 119 SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW, &nfs3_jukebox_delay, 0, 120 "Number of seconds to delay a retry after receiving EJUKEBOX"); 121 SYSCTL_INT(_vfs_nfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW, &nfs_skip_wcc_data_onerr, 0, 122 "Disable weak cache consistency checking when server returns an error"); 123 124 static void nfs_down(struct nfsmount *, struct thread *, const char *, 125 int, int); 126 static void nfs_up(struct nfsmount *, struct thread *, const char *, 127 int, int); 128 static int nfs_msg(struct thread *, const char *, const char *, int); 129 130 struct nfs_cached_auth { 131 int ca_refs; /* refcount, including 1 from the cache */ 132 uid_t ca_uid; /* uid that corresponds to this auth */ 133 AUTH *ca_auth; /* RPC auth handle */ 134 }; 135 136 static int nfsv2_procid[NFS_V3NPROCS] = { 137 NFSV2PROC_NULL, 138 NFSV2PROC_GETATTR, 139 NFSV2PROC_SETATTR, 140 NFSV2PROC_LOOKUP, 141 NFSV2PROC_NOOP, 142 NFSV2PROC_READLINK, 143 NFSV2PROC_READ, 144 NFSV2PROC_WRITE, 145 NFSV2PROC_CREATE, 146 NFSV2PROC_MKDIR, 147 NFSV2PROC_SYMLINK, 148 NFSV2PROC_CREATE, 149 NFSV2PROC_REMOVE, 150 NFSV2PROC_RMDIR, 151 NFSV2PROC_RENAME, 152 NFSV2PROC_LINK, 153 NFSV2PROC_READDIR, 154 NFSV2PROC_NOOP, 155 NFSV2PROC_STATFS, 156 NFSV2PROC_NOOP, 157 NFSV2PROC_NOOP, 158 NFSV2PROC_NOOP, 159 }; 160 161 /* 162 * Initialize sockets and congestion for a new NFS connection. 163 * We do not free the sockaddr if error. 164 */ 165 int 166 newnfs_connect(struct nfsmount *nmp, struct nfssockreq *nrp, 167 struct ucred *cred, NFSPROC_T *p, int callback_retry_mult) 168 { 169 int rcvreserve, sndreserve; 170 int pktscale; 171 struct sockaddr *saddr; 172 struct ucred *origcred; 173 CLIENT *client; 174 struct netconfig *nconf; 175 struct socket *so; 176 int one = 1, retries, error = 0; 177 struct thread *td = curthread; 178 SVCXPRT *xprt; 179 struct timeval timo; 180 181 /* 182 * We need to establish the socket using the credentials of 183 * the mountpoint. Some parts of this process (such as 184 * sobind() and soconnect()) will use the curent thread's 185 * credential instead of the socket credential. To work 186 * around this, temporarily change the current thread's 187 * credential to that of the mountpoint. 188 * 189 * XXX: It would be better to explicitly pass the correct 190 * credential to sobind() and soconnect(). 191 */ 192 origcred = td->td_ucred; 193 194 /* 195 * Use the credential in nr_cred, if not NULL. 196 */ 197 if (nrp->nr_cred != NULL) 198 td->td_ucred = nrp->nr_cred; 199 else 200 td->td_ucred = cred; 201 saddr = nrp->nr_nam; 202 203 if (saddr->sa_family == AF_INET) 204 if (nrp->nr_sotype == SOCK_DGRAM) 205 nconf = getnetconfigent("udp"); 206 else 207 nconf = getnetconfigent("tcp"); 208 else 209 if (nrp->nr_sotype == SOCK_DGRAM) 210 nconf = getnetconfigent("udp6"); 211 else 212 nconf = getnetconfigent("tcp6"); 213 214 pktscale = nfs_bufpackets; 215 if (pktscale < 2) 216 pktscale = 2; 217 if (pktscale > 64) 218 pktscale = 64; 219 /* 220 * soreserve() can fail if sb_max is too small, so shrink pktscale 221 * and try again if there is an error. 222 * Print a log message suggesting increasing sb_max. 223 * Creating a socket and doing this is necessary since, if the 224 * reservation sizes are too large and will make soreserve() fail, 225 * the connection will work until a large send is attempted and 226 * then it will loop in the krpc code. 227 */ 228 so = NULL; 229 saddr = NFSSOCKADDR(nrp->nr_nam, struct sockaddr *); 230 error = socreate(saddr->sa_family, &so, nrp->nr_sotype, 231 nrp->nr_soproto, td->td_ucred, td); 232 if (error) { 233 td->td_ucred = origcred; 234 goto out; 235 } 236 do { 237 if (error != 0 && pktscale > 2) 238 pktscale--; 239 if (nrp->nr_sotype == SOCK_DGRAM) { 240 if (nmp != NULL) { 241 sndreserve = (NFS_MAXDGRAMDATA + NFS_MAXPKTHDR) * 242 pktscale; 243 rcvreserve = (NFS_MAXDGRAMDATA + NFS_MAXPKTHDR) * 244 pktscale; 245 } else { 246 sndreserve = rcvreserve = 1024 * pktscale; 247 } 248 } else { 249 if (nrp->nr_sotype != SOCK_STREAM) 250 panic("nfscon sotype"); 251 if (nmp != NULL) { 252 sndreserve = (NFS_MAXBSIZE + NFS_MAXPKTHDR + 253 sizeof (u_int32_t)) * pktscale; 254 rcvreserve = (NFS_MAXBSIZE + NFS_MAXPKTHDR + 255 sizeof (u_int32_t)) * pktscale; 256 } else { 257 sndreserve = rcvreserve = 1024 * pktscale; 258 } 259 } 260 error = soreserve(so, sndreserve, rcvreserve); 261 } while (error != 0 && pktscale > 2); 262 soclose(so); 263 if (error) { 264 td->td_ucred = origcred; 265 goto out; 266 } 267 268 client = clnt_reconnect_create(nconf, saddr, nrp->nr_prog, 269 nrp->nr_vers, sndreserve, rcvreserve); 270 CLNT_CONTROL(client, CLSET_WAITCHAN, "nfsreq"); 271 if (nmp != NULL) { 272 if ((nmp->nm_flag & NFSMNT_INT)) 273 CLNT_CONTROL(client, CLSET_INTERRUPTIBLE, &one); 274 if ((nmp->nm_flag & NFSMNT_RESVPORT)) 275 CLNT_CONTROL(client, CLSET_PRIVPORT, &one); 276 if (NFSHASSOFT(nmp)) { 277 if (nmp->nm_sotype == SOCK_DGRAM) 278 /* 279 * For UDP, the large timeout for a reconnect 280 * will be set to "nm_retry * nm_timeo / 2", so 281 * we only want to do 2 reconnect timeout 282 * retries. 283 */ 284 retries = 2; 285 else 286 retries = nmp->nm_retry; 287 } else 288 retries = INT_MAX; 289 if (NFSHASNFSV4N(nmp)) { 290 /* 291 * Make sure the nfscbd_pool doesn't get destroyed 292 * while doing this. 293 */ 294 NFSD_LOCK(); 295 if (nfs_numnfscbd > 0) { 296 nfs_numnfscbd++; 297 NFSD_UNLOCK(); 298 xprt = svc_vc_create_backchannel(nfscbd_pool); 299 CLNT_CONTROL(client, CLSET_BACKCHANNEL, xprt); 300 NFSD_LOCK(); 301 nfs_numnfscbd--; 302 if (nfs_numnfscbd == 0) 303 wakeup(&nfs_numnfscbd); 304 } 305 NFSD_UNLOCK(); 306 } 307 } else { 308 /* 309 * Three cases: 310 * - Null RPC callback to client 311 * - Non-Null RPC callback to client, wait a little longer 312 * - upcalls to nfsuserd and gssd (clp == NULL) 313 */ 314 if (callback_retry_mult == 0) { 315 retries = NFSV4_UPCALLRETRY; 316 CLNT_CONTROL(client, CLSET_PRIVPORT, &one); 317 } else { 318 retries = NFSV4_CALLBACKRETRY * callback_retry_mult; 319 } 320 } 321 CLNT_CONTROL(client, CLSET_RETRIES, &retries); 322 323 if (nmp != NULL) { 324 /* 325 * For UDP, there are 2 timeouts: 326 * - CLSET_RETRY_TIMEOUT sets the initial timeout for the timer 327 * that does a retransmit of an RPC request using the same 328 * socket and xid. This is what you normally want to do, 329 * since NFS servers depend on "same xid" for their 330 * Duplicate Request Cache. 331 * - timeout specified in CLNT_CALL_MBUF(), which specifies when 332 * retransmits on the same socket should fail and a fresh 333 * socket created. Each of these timeouts counts as one 334 * CLSET_RETRIES as set above. 335 * Set the initial retransmit timeout for UDP. This timeout 336 * doesn't exist for TCP and the following call just fails, 337 * which is ok. 338 */ 339 timo.tv_sec = nmp->nm_timeo / NFS_HZ; 340 timo.tv_usec = (nmp->nm_timeo % NFS_HZ) * 1000000 / NFS_HZ; 341 CLNT_CONTROL(client, CLSET_RETRY_TIMEOUT, &timo); 342 } 343 344 mtx_lock(&nrp->nr_mtx); 345 if (nrp->nr_client != NULL) { 346 mtx_unlock(&nrp->nr_mtx); 347 /* 348 * Someone else already connected. 349 */ 350 CLNT_RELEASE(client); 351 } else { 352 nrp->nr_client = client; 353 /* 354 * Protocols that do not require connections may be optionally 355 * left unconnected for servers that reply from a port other 356 * than NFS_PORT. 357 */ 358 if (nmp == NULL || (nmp->nm_flag & NFSMNT_NOCONN) == 0) { 359 mtx_unlock(&nrp->nr_mtx); 360 CLNT_CONTROL(client, CLSET_CONNECT, &one); 361 } else 362 mtx_unlock(&nrp->nr_mtx); 363 } 364 365 366 /* Restore current thread's credentials. */ 367 td->td_ucred = origcred; 368 369 out: 370 NFSEXITCODE(error); 371 return (error); 372 } 373 374 /* 375 * NFS disconnect. Clean up and unlink. 376 */ 377 void 378 newnfs_disconnect(struct nfssockreq *nrp) 379 { 380 CLIENT *client; 381 382 mtx_lock(&nrp->nr_mtx); 383 if (nrp->nr_client != NULL) { 384 client = nrp->nr_client; 385 nrp->nr_client = NULL; 386 mtx_unlock(&nrp->nr_mtx); 387 rpc_gss_secpurge_call(client); 388 CLNT_CLOSE(client); 389 CLNT_RELEASE(client); 390 } else { 391 mtx_unlock(&nrp->nr_mtx); 392 } 393 } 394 395 static AUTH * 396 nfs_getauth(struct nfssockreq *nrp, int secflavour, char *clnt_principal, 397 char *srv_principal, gss_OID mech_oid, struct ucred *cred) 398 { 399 rpc_gss_service_t svc; 400 AUTH *auth; 401 402 switch (secflavour) { 403 case RPCSEC_GSS_KRB5: 404 case RPCSEC_GSS_KRB5I: 405 case RPCSEC_GSS_KRB5P: 406 if (!mech_oid) { 407 if (!rpc_gss_mech_to_oid_call("kerberosv5", &mech_oid)) 408 return (NULL); 409 } 410 if (secflavour == RPCSEC_GSS_KRB5) 411 svc = rpc_gss_svc_none; 412 else if (secflavour == RPCSEC_GSS_KRB5I) 413 svc = rpc_gss_svc_integrity; 414 else 415 svc = rpc_gss_svc_privacy; 416 417 if (clnt_principal == NULL) 418 auth = rpc_gss_secfind_call(nrp->nr_client, cred, 419 srv_principal, mech_oid, svc); 420 else { 421 auth = rpc_gss_seccreate_call(nrp->nr_client, cred, 422 clnt_principal, srv_principal, "kerberosv5", 423 svc, NULL, NULL, NULL); 424 return (auth); 425 } 426 if (auth != NULL) 427 return (auth); 428 /* fallthrough */ 429 case AUTH_SYS: 430 default: 431 return (authunix_create(cred)); 432 433 } 434 } 435 436 /* 437 * Callback from the RPC code to generate up/down notifications. 438 */ 439 440 struct nfs_feedback_arg { 441 struct nfsmount *nf_mount; 442 int nf_lastmsg; /* last tprintf */ 443 int nf_tprintfmsg; 444 struct thread *nf_td; 445 }; 446 447 static void 448 nfs_feedback(int type, int proc, void *arg) 449 { 450 struct nfs_feedback_arg *nf = (struct nfs_feedback_arg *) arg; 451 struct nfsmount *nmp = nf->nf_mount; 452 time_t now; 453 454 switch (type) { 455 case FEEDBACK_REXMIT2: 456 case FEEDBACK_RECONNECT: 457 now = NFSD_MONOSEC; 458 if (nf->nf_lastmsg + nmp->nm_tprintf_delay < now) { 459 nfs_down(nmp, nf->nf_td, 460 "not responding", 0, NFSSTA_TIMEO); 461 nf->nf_tprintfmsg = TRUE; 462 nf->nf_lastmsg = now; 463 } 464 break; 465 466 case FEEDBACK_OK: 467 nfs_up(nf->nf_mount, nf->nf_td, 468 "is alive again", NFSSTA_TIMEO, nf->nf_tprintfmsg); 469 break; 470 } 471 } 472 473 /* 474 * newnfs_request - goes something like this 475 * - does the rpc by calling the krpc layer 476 * - break down rpc header and return with nfs reply 477 * nb: always frees up nd_mreq mbuf list 478 */ 479 int 480 newnfs_request(struct nfsrv_descript *nd, struct nfsmount *nmp, 481 struct nfsclient *clp, struct nfssockreq *nrp, vnode_t vp, 482 struct thread *td, struct ucred *cred, u_int32_t prog, u_int32_t vers, 483 u_char *retsum, int toplevel, u_int64_t *xidp, struct nfsclsession *sep) 484 { 485 u_int32_t retseq, retval, *tl; 486 time_t waituntil; 487 int i = 0, j = 0, opcnt, set_sigset = 0, slot; 488 int trycnt, error = 0, usegssname = 0, secflavour = AUTH_SYS; 489 int freeslot, timeo; 490 u_int16_t procnum; 491 u_int trylater_delay = 1; 492 struct nfs_feedback_arg nf; 493 struct timeval timo; 494 AUTH *auth; 495 struct rpc_callextra ext; 496 enum clnt_stat stat; 497 struct nfsreq *rep = NULL; 498 char *srv_principal = NULL, *clnt_principal = NULL; 499 sigset_t oldset; 500 struct ucred *authcred; 501 502 if (xidp != NULL) 503 *xidp = 0; 504 /* Reject requests while attempting a forced unmount. */ 505 if (nmp != NULL && (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)) { 506 m_freem(nd->nd_mreq); 507 return (ESTALE); 508 } 509 510 /* 511 * Set authcred, which is used to acquire RPC credentials to 512 * the cred argument, by default. The crhold() should not be 513 * necessary, but will ensure that some future code change 514 * doesn't result in the credential being free'd prematurely. 515 */ 516 authcred = crhold(cred); 517 518 /* For client side interruptible mounts, mask off the signals. */ 519 if (nmp != NULL && td != NULL && NFSHASINT(nmp)) { 520 newnfs_set_sigmask(td, &oldset); 521 set_sigset = 1; 522 } 523 524 /* 525 * XXX if not already connected call nfs_connect now. Longer 526 * term, change nfs_mount to call nfs_connect unconditionally 527 * and let clnt_reconnect_create handle reconnects. 528 */ 529 if (nrp->nr_client == NULL) 530 newnfs_connect(nmp, nrp, cred, td, 0); 531 532 /* 533 * For a client side mount, nmp is != NULL and clp == NULL. For 534 * server calls (callbacks or upcalls), nmp == NULL. 535 */ 536 if (clp != NULL) { 537 NFSLOCKSTATE(); 538 if ((clp->lc_flags & LCL_GSS) && nfsrv_gsscallbackson) { 539 secflavour = RPCSEC_GSS_KRB5; 540 if (nd->nd_procnum != NFSPROC_NULL) { 541 if (clp->lc_flags & LCL_GSSINTEGRITY) 542 secflavour = RPCSEC_GSS_KRB5I; 543 else if (clp->lc_flags & LCL_GSSPRIVACY) 544 secflavour = RPCSEC_GSS_KRB5P; 545 } 546 } 547 NFSUNLOCKSTATE(); 548 } else if (nmp != NULL && NFSHASKERB(nmp) && 549 nd->nd_procnum != NFSPROC_NULL) { 550 if (NFSHASALLGSSNAME(nmp) && nmp->nm_krbnamelen > 0) 551 nd->nd_flag |= ND_USEGSSNAME; 552 if ((nd->nd_flag & ND_USEGSSNAME) != 0) { 553 /* 554 * If there is a client side host based credential, 555 * use that, otherwise use the system uid, if set. 556 * The system uid is in the nmp->nm_sockreq.nr_cred 557 * credentials. 558 */ 559 if (nmp->nm_krbnamelen > 0) { 560 usegssname = 1; 561 clnt_principal = nmp->nm_krbname; 562 } else if (nmp->nm_uid != (uid_t)-1) { 563 KASSERT(nmp->nm_sockreq.nr_cred != NULL, 564 ("newnfs_request: NULL nr_cred")); 565 crfree(authcred); 566 authcred = crhold(nmp->nm_sockreq.nr_cred); 567 } 568 } else if (nmp->nm_krbnamelen == 0 && 569 nmp->nm_uid != (uid_t)-1 && cred->cr_uid == (uid_t)0) { 570 /* 571 * If there is no host based principal name and 572 * the system uid is set and this is root, use the 573 * system uid, since root won't have user 574 * credentials in a credentials cache file. 575 * The system uid is in the nmp->nm_sockreq.nr_cred 576 * credentials. 577 */ 578 KASSERT(nmp->nm_sockreq.nr_cred != NULL, 579 ("newnfs_request: NULL nr_cred")); 580 crfree(authcred); 581 authcred = crhold(nmp->nm_sockreq.nr_cred); 582 } 583 if (NFSHASINTEGRITY(nmp)) 584 secflavour = RPCSEC_GSS_KRB5I; 585 else if (NFSHASPRIVACY(nmp)) 586 secflavour = RPCSEC_GSS_KRB5P; 587 else 588 secflavour = RPCSEC_GSS_KRB5; 589 srv_principal = NFSMNT_SRVKRBNAME(nmp); 590 } else if (nmp != NULL && !NFSHASKERB(nmp) && 591 nd->nd_procnum != NFSPROC_NULL && 592 (nd->nd_flag & ND_USEGSSNAME) != 0) { 593 /* 594 * Use the uid that did the mount when the RPC is doing 595 * NFSv4 system operations, as indicated by the 596 * ND_USEGSSNAME flag, for the AUTH_SYS case. 597 * The credentials in nm_sockreq.nr_cred were used for the 598 * mount. 599 */ 600 KASSERT(nmp->nm_sockreq.nr_cred != NULL, 601 ("newnfs_request: NULL nr_cred")); 602 crfree(authcred); 603 authcred = crhold(nmp->nm_sockreq.nr_cred); 604 } 605 606 if (nmp != NULL) { 607 bzero(&nf, sizeof(struct nfs_feedback_arg)); 608 nf.nf_mount = nmp; 609 nf.nf_td = td; 610 nf.nf_lastmsg = NFSD_MONOSEC - 611 ((nmp->nm_tprintf_delay)-(nmp->nm_tprintf_initial_delay)); 612 } 613 614 if (nd->nd_procnum == NFSPROC_NULL) 615 auth = authnone_create(); 616 else if (usegssname) { 617 /* 618 * For this case, the authenticator is held in the 619 * nfssockreq structure, so don't release the reference count 620 * held on it. --> Don't AUTH_DESTROY() it in this function. 621 */ 622 if (nrp->nr_auth == NULL) 623 nrp->nr_auth = nfs_getauth(nrp, secflavour, 624 clnt_principal, srv_principal, NULL, authcred); 625 else 626 rpc_gss_refresh_auth_call(nrp->nr_auth); 627 auth = nrp->nr_auth; 628 } else 629 auth = nfs_getauth(nrp, secflavour, NULL, 630 srv_principal, NULL, authcred); 631 crfree(authcred); 632 if (auth == NULL) { 633 m_freem(nd->nd_mreq); 634 if (set_sigset) 635 newnfs_restore_sigmask(td, &oldset); 636 return (EACCES); 637 } 638 bzero(&ext, sizeof(ext)); 639 ext.rc_auth = auth; 640 if (nmp != NULL) { 641 ext.rc_feedback = nfs_feedback; 642 ext.rc_feedback_arg = &nf; 643 } 644 645 procnum = nd->nd_procnum; 646 if ((nd->nd_flag & ND_NFSV4) && 647 nd->nd_procnum != NFSPROC_NULL && 648 nd->nd_procnum != NFSV4PROC_CBCOMPOUND) 649 procnum = NFSV4PROC_COMPOUND; 650 651 if (nmp != NULL) { 652 NFSINCRGLOBAL(nfsstatsv1.rpcrequests); 653 654 /* Map the procnum to the old NFSv2 one, as required. */ 655 if ((nd->nd_flag & ND_NFSV2) != 0) { 656 if (nd->nd_procnum < NFS_V3NPROCS) 657 procnum = nfsv2_procid[nd->nd_procnum]; 658 else 659 procnum = NFSV2PROC_NOOP; 660 } 661 662 /* 663 * Now only used for the R_DONTRECOVER case, but until that is 664 * supported within the krpc code, I need to keep a queue of 665 * outstanding RPCs for nfsv4 client requests. 666 */ 667 if ((nd->nd_flag & ND_NFSV4) && procnum == NFSV4PROC_COMPOUND) 668 MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), 669 M_NFSDREQ, M_WAITOK); 670 #ifdef KDTRACE_HOOKS 671 if (dtrace_nfscl_nfs234_start_probe != NULL) { 672 uint32_t probe_id; 673 int probe_procnum; 674 675 if (nd->nd_flag & ND_NFSV4) { 676 probe_id = 677 nfscl_nfs4_start_probes[nd->nd_procnum]; 678 probe_procnum = nd->nd_procnum; 679 } else if (nd->nd_flag & ND_NFSV3) { 680 probe_id = nfscl_nfs3_start_probes[procnum]; 681 probe_procnum = procnum; 682 } else { 683 probe_id = 684 nfscl_nfs2_start_probes[nd->nd_procnum]; 685 probe_procnum = procnum; 686 } 687 if (probe_id != 0) 688 (dtrace_nfscl_nfs234_start_probe) 689 (probe_id, vp, nd->nd_mreq, cred, 690 probe_procnum); 691 } 692 #endif 693 } 694 trycnt = 0; 695 freeslot = -1; /* Set to slot that needs to be free'd */ 696 tryagain: 697 slot = -1; /* Slot that needs a sequence# increment. */ 698 /* 699 * This timeout specifies when a new socket should be created, 700 * along with new xid values. For UDP, this should be done 701 * infrequently, since retransmits of RPC requests should normally 702 * use the same xid. 703 */ 704 if (nmp == NULL) { 705 timo.tv_usec = 0; 706 if (clp == NULL) 707 timo.tv_sec = NFSV4_UPCALLTIMEO; 708 else 709 timo.tv_sec = NFSV4_CALLBACKTIMEO; 710 } else { 711 if (nrp->nr_sotype != SOCK_DGRAM) { 712 timo.tv_usec = 0; 713 if ((nmp->nm_flag & NFSMNT_NFSV4)) 714 timo.tv_sec = INT_MAX; 715 else 716 timo.tv_sec = NFS_TCPTIMEO; 717 } else { 718 if (NFSHASSOFT(nmp)) { 719 /* 720 * CLSET_RETRIES is set to 2, so this should be 721 * half of the total timeout required. 722 */ 723 timeo = nmp->nm_retry * nmp->nm_timeo / 2; 724 if (timeo < 1) 725 timeo = 1; 726 timo.tv_sec = timeo / NFS_HZ; 727 timo.tv_usec = (timeo % NFS_HZ) * 1000000 / 728 NFS_HZ; 729 } else { 730 /* For UDP hard mounts, use a large value. */ 731 timo.tv_sec = NFS_MAXTIMEO / NFS_HZ; 732 timo.tv_usec = 0; 733 } 734 } 735 736 if (rep != NULL) { 737 rep->r_flags = 0; 738 rep->r_nmp = nmp; 739 /* 740 * Chain request into list of outstanding requests. 741 */ 742 NFSLOCKREQ(); 743 TAILQ_INSERT_TAIL(&nfsd_reqq, rep, r_chain); 744 NFSUNLOCKREQ(); 745 } 746 } 747 748 nd->nd_mrep = NULL; 749 if (clp != NULL && sep != NULL) 750 stat = clnt_bck_call(nrp->nr_client, &ext, procnum, 751 nd->nd_mreq, &nd->nd_mrep, timo, sep->nfsess_xprt); 752 else 753 stat = CLNT_CALL_MBUF(nrp->nr_client, &ext, procnum, 754 nd->nd_mreq, &nd->nd_mrep, timo); 755 756 if (rep != NULL) { 757 /* 758 * RPC done, unlink the request. 759 */ 760 NFSLOCKREQ(); 761 TAILQ_REMOVE(&nfsd_reqq, rep, r_chain); 762 NFSUNLOCKREQ(); 763 } 764 765 /* 766 * If there was a successful reply and a tprintf msg. 767 * tprintf a response. 768 */ 769 if (stat == RPC_SUCCESS) { 770 error = 0; 771 } else if (stat == RPC_TIMEDOUT) { 772 NFSINCRGLOBAL(nfsstatsv1.rpctimeouts); 773 error = ETIMEDOUT; 774 } else if (stat == RPC_VERSMISMATCH) { 775 NFSINCRGLOBAL(nfsstatsv1.rpcinvalid); 776 error = EOPNOTSUPP; 777 } else if (stat == RPC_PROGVERSMISMATCH) { 778 NFSINCRGLOBAL(nfsstatsv1.rpcinvalid); 779 error = EPROTONOSUPPORT; 780 } else if (stat == RPC_INTR) { 781 error = EINTR; 782 } else { 783 NFSINCRGLOBAL(nfsstatsv1.rpcinvalid); 784 error = EACCES; 785 } 786 if (error) { 787 m_freem(nd->nd_mreq); 788 if (usegssname == 0) 789 AUTH_DESTROY(auth); 790 if (rep != NULL) 791 FREE((caddr_t)rep, M_NFSDREQ); 792 if (set_sigset) 793 newnfs_restore_sigmask(td, &oldset); 794 return (error); 795 } 796 797 KASSERT(nd->nd_mrep != NULL, ("mrep shouldn't be NULL if no error\n")); 798 799 /* 800 * Search for any mbufs that are not a multiple of 4 bytes long 801 * or with m_data not longword aligned. 802 * These could cause pointer alignment problems, so copy them to 803 * well aligned mbufs. 804 */ 805 newnfs_realign(&nd->nd_mrep, M_WAITOK); 806 nd->nd_md = nd->nd_mrep; 807 nd->nd_dpos = NFSMTOD(nd->nd_md, caddr_t); 808 nd->nd_repstat = 0; 809 if (nd->nd_procnum != NFSPROC_NULL && 810 nd->nd_procnum != NFSV4PROC_CBNULL) { 811 /* If sep == NULL, set it to the default in nmp. */ 812 if (sep == NULL && nmp != NULL) 813 sep = NFSMNT_MDSSESSION(nmp); 814 /* 815 * and now the actual NFS xdr. 816 */ 817 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 818 nd->nd_repstat = fxdr_unsigned(u_int32_t, *tl); 819 if (nd->nd_repstat >= 10000) 820 NFSCL_DEBUG(1, "proc=%d reps=%d\n", (int)nd->nd_procnum, 821 (int)nd->nd_repstat); 822 823 /* 824 * Get rid of the tag, return count and SEQUENCE result for 825 * NFSv4. 826 */ 827 if ((nd->nd_flag & ND_NFSV4) != 0) { 828 NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED); 829 i = fxdr_unsigned(int, *tl); 830 error = nfsm_advance(nd, NFSM_RNDUP(i), -1); 831 if (error) 832 goto nfsmout; 833 NFSM_DISSECT(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 834 opcnt = fxdr_unsigned(int, *tl++); 835 i = fxdr_unsigned(int, *tl++); 836 j = fxdr_unsigned(int, *tl); 837 if (j >= 10000) 838 NFSCL_DEBUG(1, "fop=%d fst=%d\n", i, j); 839 /* 840 * If the first op is Sequence, free up the slot. 841 */ 842 if ((nmp != NULL && i == NFSV4OP_SEQUENCE && j != 0) || 843 (clp != NULL && i == NFSV4OP_CBSEQUENCE && j != 0)) 844 NFSCL_DEBUG(1, "failed seq=%d\n", j); 845 if ((nmp != NULL && i == NFSV4OP_SEQUENCE && j == 0) || 846 (clp != NULL && i == NFSV4OP_CBSEQUENCE && j == 0) 847 ) { 848 if (i == NFSV4OP_SEQUENCE) 849 NFSM_DISSECT(tl, uint32_t *, 850 NFSX_V4SESSIONID + 851 5 * NFSX_UNSIGNED); 852 else 853 NFSM_DISSECT(tl, uint32_t *, 854 NFSX_V4SESSIONID + 855 4 * NFSX_UNSIGNED); 856 mtx_lock(&sep->nfsess_mtx); 857 tl += NFSX_V4SESSIONID / NFSX_UNSIGNED; 858 retseq = fxdr_unsigned(uint32_t, *tl++); 859 slot = fxdr_unsigned(int, *tl++); 860 freeslot = slot; 861 if (retseq != sep->nfsess_slotseq[slot]) 862 printf("retseq diff 0x%x\n", retseq); 863 retval = fxdr_unsigned(uint32_t, *++tl); 864 if ((retval + 1) < sep->nfsess_foreslots) 865 sep->nfsess_foreslots = (retval + 1); 866 else if ((retval + 1) > sep->nfsess_foreslots) 867 sep->nfsess_foreslots = (retval < 64) ? 868 (retval + 1) : 64; 869 mtx_unlock(&sep->nfsess_mtx); 870 871 /* Grab the op and status for the next one. */ 872 if (opcnt > 1) { 873 NFSM_DISSECT(tl, uint32_t *, 874 2 * NFSX_UNSIGNED); 875 i = fxdr_unsigned(int, *tl++); 876 j = fxdr_unsigned(int, *tl); 877 } 878 } 879 } 880 if (nd->nd_repstat != 0) { 881 if (((nd->nd_repstat == NFSERR_DELAY || 882 nd->nd_repstat == NFSERR_GRACE) && 883 (nd->nd_flag & ND_NFSV4) && 884 nd->nd_procnum != NFSPROC_DELEGRETURN && 885 nd->nd_procnum != NFSPROC_SETATTR && 886 nd->nd_procnum != NFSPROC_READ && 887 nd->nd_procnum != NFSPROC_READDS && 888 nd->nd_procnum != NFSPROC_WRITE && 889 nd->nd_procnum != NFSPROC_WRITEDS && 890 nd->nd_procnum != NFSPROC_OPEN && 891 nd->nd_procnum != NFSPROC_CREATE && 892 nd->nd_procnum != NFSPROC_OPENCONFIRM && 893 nd->nd_procnum != NFSPROC_OPENDOWNGRADE && 894 nd->nd_procnum != NFSPROC_CLOSE && 895 nd->nd_procnum != NFSPROC_LOCK && 896 nd->nd_procnum != NFSPROC_LOCKU) || 897 (nd->nd_repstat == NFSERR_DELAY && 898 (nd->nd_flag & ND_NFSV4) == 0) || 899 nd->nd_repstat == NFSERR_RESOURCE) { 900 if (trylater_delay > NFS_TRYLATERDEL) 901 trylater_delay = NFS_TRYLATERDEL; 902 waituntil = NFSD_MONOSEC + trylater_delay; 903 while (NFSD_MONOSEC < waituntil) 904 (void) nfs_catnap(PZERO, 0, "nfstry"); 905 trylater_delay *= 2; 906 if (slot != -1) { 907 mtx_lock(&sep->nfsess_mtx); 908 sep->nfsess_slotseq[slot]++; 909 *nd->nd_slotseq = txdr_unsigned( 910 sep->nfsess_slotseq[slot]); 911 mtx_unlock(&sep->nfsess_mtx); 912 } 913 m_freem(nd->nd_mrep); 914 nd->nd_mrep = NULL; 915 goto tryagain; 916 } 917 918 /* 919 * If the File Handle was stale, invalidate the 920 * lookup cache, just in case. 921 * (vp != NULL implies a client side call) 922 */ 923 if (nd->nd_repstat == ESTALE && vp != NULL) { 924 cache_purge(vp); 925 if (ncl_call_invalcaches != NULL) 926 (*ncl_call_invalcaches)(vp); 927 } 928 } 929 if ((nd->nd_flag & ND_NFSV4) != 0) { 930 /* Free the slot, as required. */ 931 if (freeslot != -1) 932 nfsv4_freeslot(sep, freeslot); 933 /* 934 * If this op is Putfh, throw its results away. 935 */ 936 if (j >= 10000) 937 NFSCL_DEBUG(1, "nop=%d nst=%d\n", i, j); 938 if (nmp != NULL && i == NFSV4OP_PUTFH && j == 0) { 939 NFSM_DISSECT(tl,u_int32_t *,2 * NFSX_UNSIGNED); 940 i = fxdr_unsigned(int, *tl++); 941 j = fxdr_unsigned(int, *tl); 942 if (j >= 10000) 943 NFSCL_DEBUG(1, "n2op=%d n2st=%d\n", i, 944 j); 945 /* 946 * All Compounds that do an Op that must 947 * be in sequence consist of NFSV4OP_PUTFH 948 * followed by one of these. As such, we 949 * can determine if the seqid# should be 950 * incremented, here. 951 */ 952 if ((i == NFSV4OP_OPEN || 953 i == NFSV4OP_OPENCONFIRM || 954 i == NFSV4OP_OPENDOWNGRADE || 955 i == NFSV4OP_CLOSE || 956 i == NFSV4OP_LOCK || 957 i == NFSV4OP_LOCKU) && 958 (j == 0 || 959 (j != NFSERR_STALECLIENTID && 960 j != NFSERR_STALESTATEID && 961 j != NFSERR_BADSTATEID && 962 j != NFSERR_BADSEQID && 963 j != NFSERR_BADXDR && 964 j != NFSERR_RESOURCE && 965 j != NFSERR_NOFILEHANDLE))) 966 nd->nd_flag |= ND_INCRSEQID; 967 } 968 /* 969 * If this op's status is non-zero, mark 970 * that there is no more data to process. 971 */ 972 if (j) 973 nd->nd_flag |= ND_NOMOREDATA; 974 975 /* 976 * If R_DONTRECOVER is set, replace the stale error 977 * reply, so that recovery isn't initiated. 978 */ 979 if ((nd->nd_repstat == NFSERR_STALECLIENTID || 980 nd->nd_repstat == NFSERR_BADSESSION || 981 nd->nd_repstat == NFSERR_STALESTATEID) && 982 rep != NULL && (rep->r_flags & R_DONTRECOVER)) 983 nd->nd_repstat = NFSERR_STALEDONTRECOVER; 984 } 985 } 986 987 #ifdef KDTRACE_HOOKS 988 if (nmp != NULL && dtrace_nfscl_nfs234_done_probe != NULL) { 989 uint32_t probe_id; 990 int probe_procnum; 991 992 if (nd->nd_flag & ND_NFSV4) { 993 probe_id = nfscl_nfs4_done_probes[nd->nd_procnum]; 994 probe_procnum = nd->nd_procnum; 995 } else if (nd->nd_flag & ND_NFSV3) { 996 probe_id = nfscl_nfs3_done_probes[procnum]; 997 probe_procnum = procnum; 998 } else { 999 probe_id = nfscl_nfs2_done_probes[nd->nd_procnum]; 1000 probe_procnum = procnum; 1001 } 1002 if (probe_id != 0) 1003 (dtrace_nfscl_nfs234_done_probe)(probe_id, vp, 1004 nd->nd_mreq, cred, probe_procnum, 0); 1005 } 1006 #endif 1007 1008 m_freem(nd->nd_mreq); 1009 if (usegssname == 0) 1010 AUTH_DESTROY(auth); 1011 if (rep != NULL) 1012 FREE((caddr_t)rep, M_NFSDREQ); 1013 if (set_sigset) 1014 newnfs_restore_sigmask(td, &oldset); 1015 return (0); 1016 nfsmout: 1017 mbuf_freem(nd->nd_mrep); 1018 mbuf_freem(nd->nd_mreq); 1019 if (usegssname == 0) 1020 AUTH_DESTROY(auth); 1021 if (rep != NULL) 1022 FREE((caddr_t)rep, M_NFSDREQ); 1023 if (set_sigset) 1024 newnfs_restore_sigmask(td, &oldset); 1025 return (error); 1026 } 1027 1028 /* 1029 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and 1030 * wait for all requests to complete. This is used by forced unmounts 1031 * to terminate any outstanding RPCs. 1032 */ 1033 int 1034 newnfs_nmcancelreqs(struct nfsmount *nmp) 1035 { 1036 1037 if (nmp->nm_sockreq.nr_client != NULL) 1038 CLNT_CLOSE(nmp->nm_sockreq.nr_client); 1039 return (0); 1040 } 1041 1042 /* 1043 * Any signal that can interrupt an NFS operation in an intr mount 1044 * should be added to this set. SIGSTOP and SIGKILL cannot be masked. 1045 */ 1046 int newnfs_sig_set[] = { 1047 SIGINT, 1048 SIGTERM, 1049 SIGHUP, 1050 SIGKILL, 1051 SIGQUIT 1052 }; 1053 1054 /* 1055 * Check to see if one of the signals in our subset is pending on 1056 * the process (in an intr mount). 1057 */ 1058 static int 1059 nfs_sig_pending(sigset_t set) 1060 { 1061 int i; 1062 1063 for (i = 0 ; i < nitems(newnfs_sig_set); i++) 1064 if (SIGISMEMBER(set, newnfs_sig_set[i])) 1065 return (1); 1066 return (0); 1067 } 1068 1069 /* 1070 * The set/restore sigmask functions are used to (temporarily) overwrite 1071 * the thread td_sigmask during an RPC call (for example). These are also 1072 * used in other places in the NFS client that might tsleep(). 1073 */ 1074 void 1075 newnfs_set_sigmask(struct thread *td, sigset_t *oldset) 1076 { 1077 sigset_t newset; 1078 int i; 1079 struct proc *p; 1080 1081 SIGFILLSET(newset); 1082 if (td == NULL) 1083 td = curthread; /* XXX */ 1084 p = td->td_proc; 1085 /* Remove the NFS set of signals from newset */ 1086 PROC_LOCK(p); 1087 mtx_lock(&p->p_sigacts->ps_mtx); 1088 for (i = 0 ; i < nitems(newnfs_sig_set); i++) { 1089 /* 1090 * But make sure we leave the ones already masked 1091 * by the process, ie. remove the signal from the 1092 * temporary signalmask only if it wasn't already 1093 * in p_sigmask. 1094 */ 1095 if (!SIGISMEMBER(td->td_sigmask, newnfs_sig_set[i]) && 1096 !SIGISMEMBER(p->p_sigacts->ps_sigignore, newnfs_sig_set[i])) 1097 SIGDELSET(newset, newnfs_sig_set[i]); 1098 } 1099 mtx_unlock(&p->p_sigacts->ps_mtx); 1100 kern_sigprocmask(td, SIG_SETMASK, &newset, oldset, 1101 SIGPROCMASK_PROC_LOCKED); 1102 PROC_UNLOCK(p); 1103 } 1104 1105 void 1106 newnfs_restore_sigmask(struct thread *td, sigset_t *set) 1107 { 1108 if (td == NULL) 1109 td = curthread; /* XXX */ 1110 kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0); 1111 } 1112 1113 /* 1114 * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the 1115 * old one after msleep() returns. 1116 */ 1117 int 1118 newnfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo) 1119 { 1120 sigset_t oldset; 1121 int error; 1122 struct proc *p; 1123 1124 if ((priority & PCATCH) == 0) 1125 return msleep(ident, mtx, priority, wmesg, timo); 1126 if (td == NULL) 1127 td = curthread; /* XXX */ 1128 newnfs_set_sigmask(td, &oldset); 1129 error = msleep(ident, mtx, priority, wmesg, timo); 1130 newnfs_restore_sigmask(td, &oldset); 1131 p = td->td_proc; 1132 return (error); 1133 } 1134 1135 /* 1136 * Test for a termination condition pending on the process. 1137 * This is used for NFSMNT_INT mounts. 1138 */ 1139 int 1140 newnfs_sigintr(struct nfsmount *nmp, struct thread *td) 1141 { 1142 struct proc *p; 1143 sigset_t tmpset; 1144 1145 /* Terminate all requests while attempting a forced unmount. */ 1146 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) 1147 return (EIO); 1148 if (!(nmp->nm_flag & NFSMNT_INT)) 1149 return (0); 1150 if (td == NULL) 1151 return (0); 1152 p = td->td_proc; 1153 PROC_LOCK(p); 1154 tmpset = p->p_siglist; 1155 SIGSETOR(tmpset, td->td_siglist); 1156 SIGSETNAND(tmpset, td->td_sigmask); 1157 mtx_lock(&p->p_sigacts->ps_mtx); 1158 SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore); 1159 mtx_unlock(&p->p_sigacts->ps_mtx); 1160 if ((SIGNOTEMPTY(p->p_siglist) || SIGNOTEMPTY(td->td_siglist)) 1161 && nfs_sig_pending(tmpset)) { 1162 PROC_UNLOCK(p); 1163 return (EINTR); 1164 } 1165 PROC_UNLOCK(p); 1166 return (0); 1167 } 1168 1169 static int 1170 nfs_msg(struct thread *td, const char *server, const char *msg, int error) 1171 { 1172 struct proc *p; 1173 1174 p = td ? td->td_proc : NULL; 1175 if (error) { 1176 tprintf(p, LOG_INFO, "nfs server %s: %s, error %d\n", 1177 server, msg, error); 1178 } else { 1179 tprintf(p, LOG_INFO, "nfs server %s: %s\n", server, msg); 1180 } 1181 return (0); 1182 } 1183 1184 static void 1185 nfs_down(struct nfsmount *nmp, struct thread *td, const char *msg, 1186 int error, int flags) 1187 { 1188 if (nmp == NULL) 1189 return; 1190 mtx_lock(&nmp->nm_mtx); 1191 if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) { 1192 nmp->nm_state |= NFSSTA_TIMEO; 1193 mtx_unlock(&nmp->nm_mtx); 1194 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid, 1195 VQ_NOTRESP, 0); 1196 } else 1197 mtx_unlock(&nmp->nm_mtx); 1198 mtx_lock(&nmp->nm_mtx); 1199 if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) { 1200 nmp->nm_state |= NFSSTA_LOCKTIMEO; 1201 mtx_unlock(&nmp->nm_mtx); 1202 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid, 1203 VQ_NOTRESPLOCK, 0); 1204 } else 1205 mtx_unlock(&nmp->nm_mtx); 1206 nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error); 1207 } 1208 1209 static void 1210 nfs_up(struct nfsmount *nmp, struct thread *td, const char *msg, 1211 int flags, int tprintfmsg) 1212 { 1213 if (nmp == NULL) 1214 return; 1215 if (tprintfmsg) { 1216 nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0); 1217 } 1218 1219 mtx_lock(&nmp->nm_mtx); 1220 if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) { 1221 nmp->nm_state &= ~NFSSTA_TIMEO; 1222 mtx_unlock(&nmp->nm_mtx); 1223 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid, 1224 VQ_NOTRESP, 1); 1225 } else 1226 mtx_unlock(&nmp->nm_mtx); 1227 1228 mtx_lock(&nmp->nm_mtx); 1229 if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) { 1230 nmp->nm_state &= ~NFSSTA_LOCKTIMEO; 1231 mtx_unlock(&nmp->nm_mtx); 1232 vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid, 1233 VQ_NOTRESPLOCK, 1); 1234 } else 1235 mtx_unlock(&nmp->nm_mtx); 1236 } 1237 1238