1 /* 2 * Copyright (c) 1989, 1991 The Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)nfs_socket.c 7.33 (Berkeley) 07/10/92 11 */ 12 13 /* 14 * Socket operations for use by nfs 15 */ 16 17 #include <sys/param.h> 18 #include <sys/proc.h> 19 #include <sys/mount.h> 20 #include <sys/kernel.h> 21 #include <sys/mbuf.h> 22 #include <sys/vnode.h> 23 #include <sys/domain.h> 24 #include <sys/protosw.h> 25 #include <sys/socket.h> 26 #include <sys/socketvar.h> 27 #include <sys/syslog.h> 28 #include <sys/tprintf.h> 29 #include <netinet/in.h> 30 #include <netinet/tcp.h> 31 #include <nfs/rpcv2.h> 32 #include <nfs/nfsv2.h> 33 #include <nfs/nfs.h> 34 #include <nfs/xdr_subs.h> 35 #include <nfs/nfsm_subs.h> 36 #include <nfs/nfsmount.h> 37 #include <nfs/nfsnode.h> 38 #include <nfs/nfsrtt.h> 39 #include <nfs/nqnfs.h> 40 41 #define TRUE 1 42 #define FALSE 0 43 44 /* 45 * Estimate rto for an nfs rpc sent via. an unreliable datagram. 46 * Use the mean and mean deviation of rtt for the appropriate type of rpc 47 * for the frequent rpcs and a default for the others. 48 * The justification for doing "other" this way is that these rpcs 49 * happen so infrequently that timer est. would probably be stale. 50 * Also, since many of these rpcs are 51 * non-idempotent, a conservative timeout is desired. 52 * getattr, lookup - A+2D 53 * read, write - A+4D 54 * other - nm_timeo 55 */ 56 #define NFS_RTO(n, t) \ 57 ((t) == 0 ? (n)->nm_timeo : \ 58 ((t) < 3 ? \ 59 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \ 60 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1))) 61 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1] 62 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1] 63 /* 64 * External data, mostly RPC constants in XDR form 65 */ 66 extern u_long rpc_reply, rpc_msgdenied, rpc_mismatch, rpc_vers, rpc_auth_unix, 67 rpc_msgaccepted, rpc_call, rpc_autherr, rpc_rejectedcred, 68 rpc_auth_kerb; 69 extern u_long nfs_prog, nfs_vers, nqnfs_prog, nqnfs_vers; 70 extern time_t nqnfsstarttime; 71 extern int nonidempotent[NFS_NPROCS]; 72 73 /* 74 * Maps errno values to nfs error numbers. 75 * Use NFSERR_IO as the catch all for ones not specifically defined in 76 * RFC 1094. 77 */ 78 static int nfsrv_errmap[ELAST] = { 79 NFSERR_PERM, NFSERR_NOENT, NFSERR_IO, NFSERR_IO, NFSERR_IO, 80 NFSERR_NXIO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 81 NFSERR_IO, NFSERR_IO, NFSERR_ACCES, NFSERR_IO, NFSERR_IO, 82 NFSERR_IO, NFSERR_EXIST, NFSERR_IO, NFSERR_NODEV, NFSERR_NOTDIR, 83 NFSERR_ISDIR, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 84 NFSERR_IO, NFSERR_FBIG, NFSERR_NOSPC, NFSERR_IO, NFSERR_ROFS, 85 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 86 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 87 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 88 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 89 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 90 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 91 NFSERR_IO, NFSERR_IO, NFSERR_NAMETOL, NFSERR_IO, NFSERR_IO, 92 NFSERR_NOTEMPTY, NFSERR_IO, NFSERR_IO, NFSERR_DQUOT, NFSERR_STALE, 93 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 94 NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, 95 NFSERR_IO, 96 }; 97 98 /* 99 * Defines which timer to use for the procnum. 100 * 0 - default 101 * 1 - getattr 102 * 2 - lookup 103 * 3 - read 104 * 4 - write 105 */ 106 static int proct[NFS_NPROCS] = { 107 0, 1, 0, 0, 2, 3, 3, 0, 4, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 0, 0, 108 }; 109 110 /* 111 * There is a congestion window for outstanding rpcs maintained per mount 112 * point. The cwnd size is adjusted in roughly the way that: 113 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of 114 * SIGCOMM '88". ACM, August 1988. 115 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout 116 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd 117 * of rpcs is in progress. 118 * (The sent count and cwnd are scaled for integer arith.) 119 * Variants of "slow start" were tried and were found to be too much of a 120 * performance hit (ave. rtt 3 times larger), 121 * I suspect due to the large rtt that nfs rpcs have. 122 */ 123 #define NFS_CWNDSCALE 256 124 #define NFS_MAXCWND (NFS_CWNDSCALE * 32) 125 static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, }; 126 int nfs_sbwait(); 127 void nfs_disconnect(), nfs_realign(), nfsrv_wakenfsd(), nfs_sndunlock(); 128 void nfs_rcvunlock(), nqnfs_serverd(); 129 struct mbuf *nfsm_rpchead(); 130 int nfsrtton = 0; 131 struct nfsrtt nfsrtt; 132 struct nfsd nfsd_head; 133 134 int nfsrv_null(), 135 nfsrv_getattr(), 136 nfsrv_setattr(), 137 nfsrv_lookup(), 138 nfsrv_readlink(), 139 nfsrv_read(), 140 nfsrv_write(), 141 nfsrv_create(), 142 nfsrv_remove(), 143 nfsrv_rename(), 144 nfsrv_link(), 145 nfsrv_symlink(), 146 nfsrv_mkdir(), 147 nfsrv_rmdir(), 148 nfsrv_readdir(), 149 nfsrv_statfs(), 150 nfsrv_noop(), 151 nqnfsrv_readdirlook(), 152 nqnfsrv_getlease(), 153 nqnfsrv_vacated(); 154 155 int (*nfsrv_procs[NFS_NPROCS])() = { 156 nfsrv_null, 157 nfsrv_getattr, 158 nfsrv_setattr, 159 nfsrv_noop, 160 nfsrv_lookup, 161 nfsrv_readlink, 162 nfsrv_read, 163 nfsrv_noop, 164 nfsrv_write, 165 nfsrv_create, 166 nfsrv_remove, 167 nfsrv_rename, 168 nfsrv_link, 169 nfsrv_symlink, 170 nfsrv_mkdir, 171 nfsrv_rmdir, 172 nfsrv_readdir, 173 nfsrv_statfs, 174 nqnfsrv_readdirlook, 175 nqnfsrv_getlease, 176 nqnfsrv_vacated, 177 }; 178 179 struct nfsreq nfsreqh; 180 181 /* 182 * Initialize sockets and congestion for a new NFS connection. 183 * We do not free the sockaddr if error. 184 */ 185 nfs_connect(nmp, rep) 186 register struct nfsmount *nmp; 187 struct nfsreq *rep; 188 { 189 register struct socket *so; 190 int s, error, rcvreserve, sndreserve; 191 struct sockaddr *saddr; 192 struct sockaddr_in *sin; 193 struct mbuf *m; 194 u_short tport; 195 196 nmp->nm_so = (struct socket *)0; 197 saddr = mtod(nmp->nm_nam, struct sockaddr *); 198 if (error = socreate(saddr->sa_family, 199 &nmp->nm_so, nmp->nm_sotype, nmp->nm_soproto)) 200 goto bad; 201 so = nmp->nm_so; 202 nmp->nm_soflags = so->so_proto->pr_flags; 203 204 /* 205 * Some servers require that the client port be a reserved port number. 206 */ 207 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 208 MGET(m, M_WAIT, MT_SONAME); 209 sin = mtod(m, struct sockaddr_in *); 210 sin->sin_len = m->m_len = sizeof (struct sockaddr_in); 211 sin->sin_family = AF_INET; 212 sin->sin_addr.s_addr = INADDR_ANY; 213 tport = IPPORT_RESERVED - 1; 214 sin->sin_port = htons(tport); 215 while ((error = sobind(so, m)) == EADDRINUSE && 216 --tport > IPPORT_RESERVED / 2) 217 sin->sin_port = htons(tport); 218 m_freem(m); 219 if (error) 220 goto bad; 221 } 222 223 /* 224 * Protocols that do not require connections may be optionally left 225 * unconnected for servers that reply from a port other than NFS_PORT. 226 */ 227 if (nmp->nm_flag & NFSMNT_NOCONN) { 228 if (nmp->nm_soflags & PR_CONNREQUIRED) { 229 error = ENOTCONN; 230 goto bad; 231 } 232 } else { 233 if (error = soconnect(so, nmp->nm_nam)) 234 goto bad; 235 236 /* 237 * Wait for the connection to complete. Cribbed from the 238 * connect system call but with the wait timing out so 239 * that interruptible mounts don't hang here for a long time. 240 */ 241 s = splnet(); 242 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 243 (void) tsleep((caddr_t)&so->so_timeo, PSOCK, 244 "nfscon", 2 * hz); 245 if ((so->so_state & SS_ISCONNECTING) && 246 so->so_error == 0 && rep && 247 (error = nfs_sigintr(nmp, rep, rep->r_procp))) { 248 so->so_state &= ~SS_ISCONNECTING; 249 splx(s); 250 goto bad; 251 } 252 } 253 if (so->so_error) { 254 error = so->so_error; 255 so->so_error = 0; 256 splx(s); 257 goto bad; 258 } 259 splx(s); 260 } 261 if (nmp->nm_flag & (NFSMNT_SOFT | NFSMNT_INT)) { 262 so->so_rcv.sb_timeo = (5 * hz); 263 so->so_snd.sb_timeo = (5 * hz); 264 } else { 265 so->so_rcv.sb_timeo = 0; 266 so->so_snd.sb_timeo = 0; 267 } 268 if (nmp->nm_sotype == SOCK_DGRAM) { 269 sndreserve = nmp->nm_wsize + NFS_MAXPKTHDR; 270 rcvreserve = nmp->nm_rsize + NFS_MAXPKTHDR; 271 } else if (nmp->nm_sotype == SOCK_SEQPACKET) { 272 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 2; 273 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR) * 2; 274 } else { 275 if (nmp->nm_sotype != SOCK_STREAM) 276 panic("nfscon sotype"); 277 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 278 MGET(m, M_WAIT, MT_SOOPTS); 279 *mtod(m, int *) = 1; 280 m->m_len = sizeof(int); 281 sosetopt(so, SOL_SOCKET, SO_KEEPALIVE, m); 282 } 283 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 284 MGET(m, M_WAIT, MT_SOOPTS); 285 *mtod(m, int *) = 1; 286 m->m_len = sizeof(int); 287 sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m); 288 } 289 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + sizeof (u_long)) 290 * 2; 291 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + sizeof (u_long)) 292 * 2; 293 } 294 if (error = soreserve(so, sndreserve, rcvreserve)) 295 goto bad; 296 so->so_rcv.sb_flags |= SB_NOINTR; 297 so->so_snd.sb_flags |= SB_NOINTR; 298 299 /* Initialize other non-zero congestion variables */ 300 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = nmp->nm_srtt[3] = 301 nmp->nm_srtt[4] = (NFS_TIMEO << 3); 302 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 303 nmp->nm_sdrtt[3] = nmp->nm_sdrtt[4] = 0; 304 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */ 305 nmp->nm_sent = 0; 306 nmp->nm_timeouts = 0; 307 return (0); 308 309 bad: 310 nfs_disconnect(nmp); 311 return (error); 312 } 313 314 /* 315 * Reconnect routine: 316 * Called when a connection is broken on a reliable protocol. 317 * - clean up the old socket 318 * - nfs_connect() again 319 * - set R_MUSTRESEND for all outstanding requests on mount point 320 * If this fails the mount point is DEAD! 321 * nb: Must be called with the nfs_sndlock() set on the mount point. 322 */ 323 nfs_reconnect(rep) 324 register struct nfsreq *rep; 325 { 326 register struct nfsreq *rp; 327 register struct nfsmount *nmp = rep->r_nmp; 328 int error; 329 330 nfs_disconnect(nmp); 331 while (error = nfs_connect(nmp, rep)) { 332 if (error == EINTR || error == ERESTART) 333 return (EINTR); 334 (void) tsleep((caddr_t)&lbolt, PSOCK, "nfscon", 0); 335 } 336 337 /* 338 * Loop through outstanding request list and fix up all requests 339 * on old socket. 340 */ 341 rp = nfsreqh.r_next; 342 while (rp != &nfsreqh) { 343 if (rp->r_nmp == nmp) 344 rp->r_flags |= R_MUSTRESEND; 345 rp = rp->r_next; 346 } 347 return (0); 348 } 349 350 /* 351 * NFS disconnect. Clean up and unlink. 352 */ 353 void 354 nfs_disconnect(nmp) 355 register struct nfsmount *nmp; 356 { 357 register struct socket *so; 358 359 if (nmp->nm_so) { 360 so = nmp->nm_so; 361 nmp->nm_so = (struct socket *)0; 362 soshutdown(so, 2); 363 soclose(so); 364 } 365 } 366 367 /* 368 * This is the nfs send routine. For connection based socket types, it 369 * must be called with an nfs_sndlock() on the socket. 370 * "rep == NULL" indicates that it has been called from a server. 371 * For the client side: 372 * - return EINTR if the RPC is terminated, 0 otherwise 373 * - set R_MUSTRESEND if the send fails for any reason 374 * - do any cleanup required by recoverable socket errors (???) 375 * For the server side: 376 * - return EINTR or ERESTART if interrupted by a signal 377 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 378 * - do any cleanup required by recoverable socket errors (???) 379 */ 380 nfs_send(so, nam, top, rep) 381 register struct socket *so; 382 struct mbuf *nam; 383 register struct mbuf *top; 384 struct nfsreq *rep; 385 { 386 struct mbuf *sendnam; 387 int error, soflags, flags; 388 389 if (rep) { 390 if (rep->r_flags & R_SOFTTERM) { 391 m_freem(top); 392 return (EINTR); 393 } 394 if ((so = rep->r_nmp->nm_so) == NULL) { 395 rep->r_flags |= R_MUSTRESEND; 396 m_freem(top); 397 return (0); 398 } 399 rep->r_flags &= ~R_MUSTRESEND; 400 soflags = rep->r_nmp->nm_soflags; 401 } else 402 soflags = so->so_proto->pr_flags; 403 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 404 sendnam = (struct mbuf *)0; 405 else 406 sendnam = nam; 407 if (so->so_type == SOCK_SEQPACKET) 408 flags = MSG_EOR; 409 else 410 flags = 0; 411 412 error = sosend(so, sendnam, (struct uio *)0, top, 413 (struct mbuf *)0, flags); 414 if (error) { 415 if (rep) { 416 log(LOG_INFO, "nfs send error %d for server %s\n",error, 417 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 418 /* 419 * Deal with errors for the client side. 420 */ 421 if (rep->r_flags & R_SOFTTERM) 422 error = EINTR; 423 else 424 rep->r_flags |= R_MUSTRESEND; 425 } else 426 log(LOG_INFO, "nfsd send error %d\n", error); 427 428 /* 429 * Handle any recoverable (soft) socket errors here. (???) 430 */ 431 if (error != EINTR && error != ERESTART && 432 error != EWOULDBLOCK && error != EPIPE) 433 error = 0; 434 } 435 return (error); 436 } 437 438 /* 439 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all 440 * done by soreceive(), but for SOCK_STREAM we must deal with the Record 441 * Mark and consolidate the data into a new mbuf list. 442 * nb: Sometimes TCP passes the data up to soreceive() in long lists of 443 * small mbufs. 444 * For SOCK_STREAM we must be very careful to read an entire record once 445 * we have read any of it, even if the system call has been interrupted. 446 */ 447 nfs_receive(rep, aname, mp) 448 register struct nfsreq *rep; 449 struct mbuf **aname; 450 struct mbuf **mp; 451 { 452 register struct socket *so; 453 struct uio auio; 454 struct iovec aio; 455 register struct mbuf *m; 456 struct mbuf *control; 457 u_long len; 458 struct mbuf **getnam; 459 int error, sotype, rcvflg; 460 struct proc *p = curproc; /* XXX */ 461 462 /* 463 * Set up arguments for soreceive() 464 */ 465 *mp = (struct mbuf *)0; 466 *aname = (struct mbuf *)0; 467 sotype = rep->r_nmp->nm_sotype; 468 469 /* 470 * For reliable protocols, lock against other senders/receivers 471 * in case a reconnect is necessary. 472 * For SOCK_STREAM, first get the Record Mark to find out how much 473 * more there is to get. 474 * We must lock the socket against other receivers 475 * until we have an entire rpc request/reply. 476 */ 477 if (sotype != SOCK_DGRAM) { 478 if (error = nfs_sndlock(&rep->r_nmp->nm_flag, rep)) 479 return (error); 480 tryagain: 481 /* 482 * Check for fatal errors and resending request. 483 */ 484 /* 485 * Ugh: If a reconnect attempt just happened, nm_so 486 * would have changed. NULL indicates a failed 487 * attempt that has essentially shut down this 488 * mount point. 489 */ 490 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) { 491 nfs_sndunlock(&rep->r_nmp->nm_flag); 492 return (EINTR); 493 } 494 if ((so = rep->r_nmp->nm_so) == NULL) { 495 if (error = nfs_reconnect(rep)) { 496 nfs_sndunlock(&rep->r_nmp->nm_flag); 497 return (error); 498 } 499 goto tryagain; 500 } 501 while (rep->r_flags & R_MUSTRESEND) { 502 m = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAIT); 503 nfsstats.rpcretries++; 504 if (error = nfs_send(so, rep->r_nmp->nm_nam, m, rep)) { 505 if (error == EINTR || error == ERESTART || 506 (error = nfs_reconnect(rep))) { 507 nfs_sndunlock(&rep->r_nmp->nm_flag); 508 return (error); 509 } 510 goto tryagain; 511 } 512 } 513 nfs_sndunlock(&rep->r_nmp->nm_flag); 514 if (sotype == SOCK_STREAM) { 515 aio.iov_base = (caddr_t) &len; 516 aio.iov_len = sizeof(u_long); 517 auio.uio_iov = &aio; 518 auio.uio_iovcnt = 1; 519 auio.uio_segflg = UIO_SYSSPACE; 520 auio.uio_rw = UIO_READ; 521 auio.uio_offset = 0; 522 auio.uio_resid = sizeof(u_long); 523 auio.uio_procp = p; 524 do { 525 rcvflg = MSG_WAITALL; 526 error = soreceive(so, (struct mbuf **)0, &auio, 527 (struct mbuf **)0, (struct mbuf **)0, &rcvflg); 528 if (error == EWOULDBLOCK && rep) { 529 if (rep->r_flags & R_SOFTTERM) 530 return (EINTR); 531 } 532 } while (error == EWOULDBLOCK); 533 if (!error && auio.uio_resid > 0) { 534 log(LOG_INFO, 535 "short receive (%d/%d) from nfs server %s\n", 536 sizeof(u_long) - auio.uio_resid, 537 sizeof(u_long), 538 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 539 error = EPIPE; 540 } 541 if (error) 542 goto errout; 543 len = ntohl(len) & ~0x80000000; 544 /* 545 * This is SERIOUS! We are out of sync with the sender 546 * and forcing a disconnect/reconnect is all I can do. 547 */ 548 if (len > NFS_MAXPACKET) { 549 log(LOG_ERR, "%s (%d) from nfs server %s\n", 550 "impossible packet length", 551 len, 552 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 553 error = EFBIG; 554 goto errout; 555 } 556 auio.uio_resid = len; 557 do { 558 rcvflg = MSG_WAITALL; 559 error = soreceive(so, (struct mbuf **)0, 560 &auio, mp, (struct mbuf **)0, &rcvflg); 561 } while (error == EWOULDBLOCK || error == EINTR || 562 error == ERESTART); 563 if (!error && auio.uio_resid > 0) { 564 log(LOG_INFO, 565 "short receive (%d/%d) from nfs server %s\n", 566 len - auio.uio_resid, len, 567 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 568 error = EPIPE; 569 } 570 } else { 571 /* 572 * NB: Since uio_resid is big, MSG_WAITALL is ignored 573 * and soreceive() will return when it has either a 574 * control msg or a data msg. 575 * We have no use for control msg., but must grab them 576 * and then throw them away so we know what is going 577 * on. 578 */ 579 auio.uio_resid = len = 100000000; /* Anything Big */ 580 auio.uio_procp = p; 581 do { 582 rcvflg = 0; 583 error = soreceive(so, (struct mbuf **)0, 584 &auio, mp, &control, &rcvflg); 585 if (control) 586 m_freem(control); 587 if (error == EWOULDBLOCK && rep) { 588 if (rep->r_flags & R_SOFTTERM) 589 return (EINTR); 590 } 591 } while (error == EWOULDBLOCK || 592 (!error && *mp == NULL && control)); 593 if ((rcvflg & MSG_EOR) == 0) 594 printf("Egad!!\n"); 595 if (!error && *mp == NULL) 596 error = EPIPE; 597 len -= auio.uio_resid; 598 } 599 errout: 600 if (error && error != EINTR && error != ERESTART) { 601 m_freem(*mp); 602 *mp = (struct mbuf *)0; 603 if (error != EPIPE) 604 log(LOG_INFO, 605 "receive error %d from nfs server %s\n", 606 error, 607 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 608 error = nfs_sndlock(&rep->r_nmp->nm_flag, rep); 609 if (!error) 610 error = nfs_reconnect(rep); 611 if (!error) 612 goto tryagain; 613 } 614 } else { 615 if ((so = rep->r_nmp->nm_so) == NULL) 616 return (EACCES); 617 if (so->so_state & SS_ISCONNECTED) 618 getnam = (struct mbuf **)0; 619 else 620 getnam = aname; 621 auio.uio_resid = len = 1000000; 622 auio.uio_procp = p; 623 do { 624 rcvflg = 0; 625 error = soreceive(so, getnam, &auio, mp, 626 (struct mbuf **)0, &rcvflg); 627 if (error == EWOULDBLOCK && 628 (rep->r_flags & R_SOFTTERM)) 629 return (EINTR); 630 } while (error == EWOULDBLOCK); 631 len -= auio.uio_resid; 632 } 633 if (error) { 634 m_freem(*mp); 635 *mp = (struct mbuf *)0; 636 } 637 /* 638 * Search for any mbufs that are not a multiple of 4 bytes long 639 * or with m_data not longword aligned. 640 * These could cause pointer alignment problems, so copy them to 641 * well aligned mbufs. 642 */ 643 nfs_realign(*mp, 5 * NFSX_UNSIGNED); 644 return (error); 645 } 646 647 /* 648 * Implement receipt of reply on a socket. 649 * We must search through the list of received datagrams matching them 650 * with outstanding requests using the xid, until ours is found. 651 */ 652 /* ARGSUSED */ 653 nfs_reply(myrep) 654 struct nfsreq *myrep; 655 { 656 register struct nfsreq *rep; 657 register struct nfsmount *nmp = myrep->r_nmp; 658 register long t1; 659 struct mbuf *mrep, *nam, *md; 660 u_long rxid, *tl; 661 caddr_t dpos, cp2; 662 int error; 663 664 /* 665 * Loop around until we get our own reply 666 */ 667 for (;;) { 668 /* 669 * Lock against other receivers so that I don't get stuck in 670 * sbwait() after someone else has received my reply for me. 671 * Also necessary for connection based protocols to avoid 672 * race conditions during a reconnect. 673 */ 674 if (error = nfs_rcvlock(myrep)) 675 return (error); 676 /* Already received, bye bye */ 677 if (myrep->r_mrep != NULL) { 678 nfs_rcvunlock(&nmp->nm_flag); 679 return (0); 680 } 681 /* 682 * Get the next Rpc reply off the socket 683 */ 684 error = nfs_receive(myrep, &nam, &mrep); 685 nfs_rcvunlock(&nmp->nm_flag); 686 if (error) printf("rcv err=%d\n",error); 687 if (error) { 688 689 /* 690 * Ignore routing errors on connectionless protocols?? 691 */ 692 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 693 nmp->nm_so->so_error = 0; 694 continue; 695 } 696 return (error); 697 } 698 if (nam) 699 m_freem(nam); 700 701 /* 702 * Get the xid and check that it is an rpc reply 703 */ 704 md = mrep; 705 dpos = mtod(md, caddr_t); 706 nfsm_dissect(tl, u_long *, 2*NFSX_UNSIGNED); 707 rxid = *tl++; 708 if (*tl != rpc_reply) { 709 if (nmp->nm_flag & NFSMNT_NQNFS) { 710 if (nqnfs_callback(nmp, mrep, md, dpos)) 711 nfsstats.rpcinvalid++; 712 } else { 713 nfsstats.rpcinvalid++; 714 m_freem(mrep); 715 } 716 nfsmout: 717 continue; 718 } 719 720 /* 721 * Loop through the request list to match up the reply 722 * Iff no match, just drop the datagram 723 */ 724 rep = nfsreqh.r_next; 725 while (rep != &nfsreqh) { 726 if (rep->r_mrep == NULL && rxid == rep->r_xid) { 727 /* Found it.. */ 728 rep->r_mrep = mrep; 729 rep->r_md = md; 730 rep->r_dpos = dpos; 731 if (nfsrtton) { 732 struct rttl *rt; 733 734 rt = &nfsrtt.rttl[nfsrtt.pos]; 735 rt->proc = rep->r_procnum; 736 rt->rto = NFS_RTO(nmp, proct[rep->r_procnum]); 737 rt->sent = nmp->nm_sent; 738 rt->cwnd = nmp->nm_cwnd; 739 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1]; 740 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1]; 741 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid; 742 rt->tstamp = time; 743 if (rep->r_flags & R_TIMING) 744 rt->rtt = rep->r_rtt; 745 else 746 rt->rtt = 1000000; 747 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ; 748 } 749 /* 750 * Update congestion window. 751 * Do the additive increase of 752 * one rpc/rtt. 753 */ 754 if (nmp->nm_cwnd <= nmp->nm_sent) { 755 nmp->nm_cwnd += 756 (NFS_CWNDSCALE * NFS_CWNDSCALE + 757 (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd; 758 if (nmp->nm_cwnd > NFS_MAXCWND) 759 nmp->nm_cwnd = NFS_MAXCWND; 760 } 761 nmp->nm_sent -= NFS_CWNDSCALE; 762 /* 763 * Update rtt using a gain of 0.125 on the mean 764 * and a gain of 0.25 on the deviation. 765 */ 766 if (rep->r_flags & R_TIMING) { 767 /* 768 * Since the timer resolution of 769 * NFS_HZ is so course, it can often 770 * result in r_rtt == 0. Since 771 * r_rtt == N means that the actual 772 * rtt is between N+dt and N+2-dt ticks, 773 * add 1. 774 */ 775 t1 = rep->r_rtt + 1; 776 t1 -= (NFS_SRTT(rep) >> 3); 777 NFS_SRTT(rep) += t1; 778 if (t1 < 0) 779 t1 = -t1; 780 t1 -= (NFS_SDRTT(rep) >> 2); 781 NFS_SDRTT(rep) += t1; 782 } 783 nmp->nm_timeouts = 0; 784 break; 785 } 786 rep = rep->r_next; 787 } 788 /* 789 * If not matched to a request, drop it. 790 * If it's mine, get out. 791 */ 792 if (rep == &nfsreqh) { 793 nfsstats.rpcunexpected++; 794 m_freem(mrep); 795 } else if (rep == myrep) { 796 if (rep->r_mrep == NULL) 797 panic("nfsreply nil"); 798 return (0); 799 } 800 } 801 } 802 803 /* 804 * nfs_request - goes something like this 805 * - fill in request struct 806 * - links it into list 807 * - calls nfs_send() for first transmit 808 * - calls nfs_receive() to get reply 809 * - break down rpc header and return with nfs reply pointed to 810 * by mrep or error 811 * nb: always frees up mreq mbuf list 812 */ 813 nfs_request(vp, mrest, procnum, procp, cred, mrp, mdp, dposp) 814 struct vnode *vp; 815 struct mbuf *mrest; 816 int procnum; 817 struct proc *procp; 818 struct ucred *cred; 819 struct mbuf **mrp; 820 struct mbuf **mdp; 821 caddr_t *dposp; 822 { 823 register struct mbuf *m, *mrep; 824 register struct nfsreq *rep; 825 register u_long *tl; 826 register int i; 827 struct nfsmount *nmp; 828 struct mbuf *md, *mheadend; 829 struct nfsreq *reph; 830 struct nfsnode *tp, *np; 831 time_t reqtime, waituntil; 832 caddr_t dpos, cp2; 833 int t1, nqlflag, cachable, s, error = 0, mrest_len, auth_len, auth_type; 834 int trylater_delay = NQ_TRYLATERDEL, trylater_cnt = 0, failed_auth = 0; 835 u_long xid; 836 char *auth_str; 837 838 nmp = VFSTONFS(vp->v_mount); 839 MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK); 840 rep->r_nmp = nmp; 841 rep->r_vp = vp; 842 rep->r_procp = procp; 843 rep->r_procnum = procnum; 844 i = 0; 845 m = mrest; 846 while (m) { 847 i += m->m_len; 848 m = m->m_next; 849 } 850 mrest_len = i; 851 852 /* 853 * Get the RPC header with authorization. 854 */ 855 kerbauth: 856 auth_str = (char *)0; 857 if (nmp->nm_flag & NFSMNT_KERB) { 858 if (failed_auth) { 859 error = nfs_getauth(nmp, rep, cred, &auth_type, 860 &auth_str, &auth_len); 861 if (error) { 862 free((caddr_t)rep, M_NFSREQ); 863 m_freem(mrest); 864 return (error); 865 } 866 } else { 867 auth_type = RPCAUTH_UNIX; 868 auth_len = 5 * NFSX_UNSIGNED; 869 } 870 } else { 871 auth_type = RPCAUTH_UNIX; 872 if (cred->cr_ngroups < 1) 873 panic("nfsreq nogrps"); 874 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ? 875 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) + 876 5 * NFSX_UNSIGNED; 877 } 878 m = nfsm_rpchead(cred, (nmp->nm_flag & NFSMNT_NQNFS), procnum, 879 auth_type, auth_len, auth_str, mrest, mrest_len, &mheadend, &xid); 880 if (auth_str) 881 free(auth_str, M_TEMP); 882 883 /* 884 * For stream protocols, insert a Sun RPC Record Mark. 885 */ 886 if (nmp->nm_sotype == SOCK_STREAM) { 887 M_PREPEND(m, NFSX_UNSIGNED, M_WAIT); 888 *mtod(m, u_long *) = htonl(0x80000000 | 889 (m->m_pkthdr.len - NFSX_UNSIGNED)); 890 } 891 rep->r_mreq = m; 892 rep->r_xid = xid; 893 tryagain: 894 if (nmp->nm_flag & NFSMNT_SOFT) 895 rep->r_retry = nmp->nm_retry; 896 else 897 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ 898 rep->r_rtt = rep->r_rexmit = 0; 899 if (proct[procnum] > 0) 900 rep->r_flags = R_TIMING; 901 else 902 rep->r_flags = 0; 903 rep->r_mrep = NULL; 904 905 /* 906 * Do the client side RPC. 907 */ 908 nfsstats.rpcrequests++; 909 /* 910 * Chain request into list of outstanding requests. Be sure 911 * to put it LAST so timer finds oldest requests first. 912 */ 913 s = splsoftclock(); 914 reph = &nfsreqh; 915 reph->r_prev->r_next = rep; 916 rep->r_prev = reph->r_prev; 917 reph->r_prev = rep; 918 rep->r_next = reph; 919 920 /* Get send time for nqnfs */ 921 reqtime = time.tv_sec; 922 923 /* 924 * If backing off another request or avoiding congestion, don't 925 * send this one now but let timer do it. If not timing a request, 926 * do it now. 927 */ 928 if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM || 929 (nmp->nm_flag & NFSMNT_DUMBTIMR) || 930 nmp->nm_sent < nmp->nm_cwnd)) { 931 splx(s); 932 if (nmp->nm_soflags & PR_CONNREQUIRED) 933 error = nfs_sndlock(&nmp->nm_flag, rep); 934 if (!error) { 935 m = m_copym(m, 0, M_COPYALL, M_WAIT); 936 error = nfs_send(nmp->nm_so, nmp->nm_nam, m, rep); 937 if (nmp->nm_soflags & PR_CONNREQUIRED) 938 nfs_sndunlock(&nmp->nm_flag); 939 } 940 if (!error && (rep->r_flags & R_MUSTRESEND) == 0) { 941 nmp->nm_sent += NFS_CWNDSCALE; 942 rep->r_flags |= R_SENT; 943 } 944 } else { 945 splx(s); 946 rep->r_rtt = -1; 947 } 948 949 /* 950 * Wait for the reply from our send or the timer's. 951 */ 952 if (!error || error == EPIPE) 953 error = nfs_reply(rep); 954 955 /* 956 * RPC done, unlink the request. 957 */ 958 s = splsoftclock(); 959 rep->r_prev->r_next = rep->r_next; 960 rep->r_next->r_prev = rep->r_prev; 961 splx(s); 962 963 /* 964 * If there was a successful reply and a tprintf msg. 965 * tprintf a response. 966 */ 967 if (!error && (rep->r_flags & R_TPRINTFMSG)) 968 nfs_msg(rep->r_procp, nmp->nm_mountp->mnt_stat.f_mntfromname, 969 "is alive again"); 970 mrep = rep->r_mrep; 971 md = rep->r_md; 972 dpos = rep->r_dpos; 973 if (error) { 974 m_freem(rep->r_mreq); 975 free((caddr_t)rep, M_NFSREQ); 976 return (error); 977 } 978 979 /* 980 * break down the rpc header and check if ok 981 */ 982 nfsm_dissect(tl, u_long *, 3*NFSX_UNSIGNED); 983 if (*tl++ == rpc_msgdenied) { 984 if (*tl == rpc_mismatch) 985 error = EOPNOTSUPP; 986 else if ((nmp->nm_flag & NFSMNT_KERB) && *tl++ == rpc_autherr) { 987 if (*tl == rpc_rejectedcred && failed_auth == 0) { 988 failed_auth++; 989 mheadend->m_next = (struct mbuf *)0; 990 m_freem(mrep); 991 m_freem(rep->r_mreq); 992 goto kerbauth; 993 } else 994 error = EAUTH; 995 } else 996 error = EACCES; 997 m_freem(mrep); 998 m_freem(rep->r_mreq); 999 free((caddr_t)rep, M_NFSREQ); 1000 return (error); 1001 } 1002 1003 /* 1004 * skip over the auth_verf, someday we may want to cache auth_short's 1005 * for nfs_reqhead(), but for now just dump it 1006 */ 1007 if (*++tl != 0) { 1008 i = nfsm_rndup(fxdr_unsigned(long, *tl)); 1009 nfsm_adv(i); 1010 } 1011 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1012 /* 0 == ok */ 1013 if (*tl == 0) { 1014 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1015 if (*tl != 0) { 1016 error = fxdr_unsigned(int, *tl); 1017 m_freem(mrep); 1018 if ((nmp->nm_flag & NFSMNT_NQNFS) && 1019 error == NQNFS_TRYLATER) { 1020 error = 0; 1021 waituntil = time.tv_sec + trylater_delay; 1022 while (time.tv_sec < waituntil) 1023 (void) tsleep((caddr_t)&lbolt, 1024 PSOCK, "nqnfstry", 0); 1025 trylater_delay *= nfs_backoff[trylater_cnt]; 1026 if (trylater_cnt < 7) 1027 trylater_cnt++; 1028 goto tryagain; 1029 } 1030 m_freem(rep->r_mreq); 1031 free((caddr_t)rep, M_NFSREQ); 1032 return (error); 1033 } 1034 1035 /* 1036 * For nqnfs, get any lease in reply 1037 */ 1038 if (nmp->nm_flag & NFSMNT_NQNFS) { 1039 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1040 if (*tl) { 1041 np = VTONFS(vp); 1042 nqlflag = fxdr_unsigned(int, *tl); 1043 nfsm_dissect(tl, u_long *, 4*NFSX_UNSIGNED); 1044 cachable = fxdr_unsigned(int, *tl++); 1045 reqtime += fxdr_unsigned(int, *tl++); 1046 if (reqtime > time.tv_sec) { 1047 if (np->n_tnext) { 1048 if (np->n_tnext == (struct nfsnode *)nmp) 1049 nmp->nm_tprev = np->n_tprev; 1050 else 1051 np->n_tnext->n_tprev = np->n_tprev; 1052 if (np->n_tprev == (struct nfsnode *)nmp) 1053 nmp->nm_tnext = np->n_tnext; 1054 else 1055 np->n_tprev->n_tnext = np->n_tnext; 1056 if (nqlflag == NQL_WRITE) 1057 np->n_flag |= NQNFSWRITE; 1058 } else if (nqlflag == NQL_READ) 1059 np->n_flag &= ~NQNFSWRITE; 1060 else 1061 np->n_flag |= NQNFSWRITE; 1062 if (cachable) 1063 np->n_flag &= ~NQNFSNONCACHE; 1064 else 1065 np->n_flag |= NQNFSNONCACHE; 1066 np->n_expiry = reqtime; 1067 fxdr_hyper(tl, &np->n_lrev); 1068 tp = nmp->nm_tprev; 1069 while (tp != (struct nfsnode *)nmp && 1070 tp->n_expiry > np->n_expiry) 1071 tp = tp->n_tprev; 1072 if (tp == (struct nfsnode *)nmp) { 1073 np->n_tnext = nmp->nm_tnext; 1074 nmp->nm_tnext = np; 1075 } else { 1076 np->n_tnext = tp->n_tnext; 1077 tp->n_tnext = np; 1078 } 1079 np->n_tprev = tp; 1080 if (np->n_tnext == (struct nfsnode *)nmp) 1081 nmp->nm_tprev = np; 1082 else 1083 np->n_tnext->n_tprev = np; 1084 } 1085 } 1086 } 1087 *mrp = mrep; 1088 *mdp = md; 1089 *dposp = dpos; 1090 m_freem(rep->r_mreq); 1091 FREE((caddr_t)rep, M_NFSREQ); 1092 return (0); 1093 } 1094 m_freem(mrep); 1095 m_freem(rep->r_mreq); 1096 free((caddr_t)rep, M_NFSREQ); 1097 error = EPROTONOSUPPORT; 1098 nfsmout: 1099 return (error); 1100 } 1101 1102 /* 1103 * Generate the rpc reply header 1104 * siz arg. is used to decide if adding a cluster is worthwhile 1105 */ 1106 nfs_rephead(siz, nd, err, cache, frev, mrq, mbp, bposp) 1107 int siz; 1108 struct nfsd *nd; 1109 int err; 1110 int cache; 1111 u_quad_t *frev; 1112 struct mbuf **mrq; 1113 struct mbuf **mbp; 1114 caddr_t *bposp; 1115 { 1116 register u_long *tl; 1117 register struct mbuf *mreq; 1118 caddr_t bpos; 1119 struct mbuf *mb, *mb2; 1120 1121 MGETHDR(mreq, M_WAIT, MT_DATA); 1122 mb = mreq; 1123 /* 1124 * If this is a big reply, use a cluster else 1125 * try and leave leading space for the lower level headers. 1126 */ 1127 siz += RPC_REPLYSIZ; 1128 if (siz >= MINCLSIZE) { 1129 MCLGET(mreq, M_WAIT); 1130 } else 1131 mreq->m_data += max_hdr; 1132 tl = mtod(mreq, u_long *); 1133 mreq->m_len = 6*NFSX_UNSIGNED; 1134 bpos = ((caddr_t)tl)+mreq->m_len; 1135 *tl++ = nd->nd_retxid; 1136 *tl++ = rpc_reply; 1137 if (err == ERPCMISMATCH || err == NQNFS_AUTHERR) { 1138 *tl++ = rpc_msgdenied; 1139 if (err == NQNFS_AUTHERR) { 1140 *tl++ = rpc_autherr; 1141 *tl = rpc_rejectedcred; 1142 mreq->m_len -= NFSX_UNSIGNED; 1143 bpos -= NFSX_UNSIGNED; 1144 } else { 1145 *tl++ = rpc_mismatch; 1146 *tl++ = txdr_unsigned(2); 1147 *tl = txdr_unsigned(2); 1148 } 1149 } else { 1150 *tl++ = rpc_msgaccepted; 1151 *tl++ = 0; 1152 *tl++ = 0; 1153 switch (err) { 1154 case EPROGUNAVAIL: 1155 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 1156 break; 1157 case EPROGMISMATCH: 1158 *tl = txdr_unsigned(RPC_PROGMISMATCH); 1159 nfsm_build(tl, u_long *, 2*NFSX_UNSIGNED); 1160 *tl++ = txdr_unsigned(2); 1161 *tl = txdr_unsigned(2); /* someday 3 */ 1162 break; 1163 case EPROCUNAVAIL: 1164 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 1165 break; 1166 default: 1167 *tl = 0; 1168 if (err != VNOVAL) { 1169 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 1170 if (err) 1171 *tl = txdr_unsigned(nfsrv_errmap[err - 1]); 1172 else 1173 *tl = 0; 1174 } 1175 break; 1176 }; 1177 } 1178 1179 /* 1180 * For nqnfs, piggyback lease as requested. 1181 */ 1182 if (nd->nd_nqlflag != NQL_NOVAL && err == 0) { 1183 if (nd->nd_nqlflag) { 1184 nfsm_build(tl, u_long *, 5*NFSX_UNSIGNED); 1185 *tl++ = txdr_unsigned(nd->nd_nqlflag); 1186 *tl++ = txdr_unsigned(cache); 1187 *tl++ = txdr_unsigned(nd->nd_duration); 1188 txdr_hyper(frev, tl); 1189 } else { 1190 if (nd->nd_nqlflag != 0) 1191 panic("nqreph"); 1192 nfsm_build(tl, u_long *, NFSX_UNSIGNED); 1193 *tl = 0; 1194 } 1195 } 1196 *mrq = mreq; 1197 *mbp = mb; 1198 *bposp = bpos; 1199 if (err != 0 && err != VNOVAL) 1200 nfsstats.srvrpc_errs++; 1201 return (0); 1202 } 1203 1204 /* 1205 * Nfs timer routine 1206 * Scan the nfsreq list and retranmit any requests that have timed out 1207 * To avoid retransmission attempts on STREAM sockets (in the future) make 1208 * sure to set the r_retry field to 0 (implies nm_retry == 0). 1209 */ 1210 nfs_timer() 1211 { 1212 register struct nfsreq *rep; 1213 register struct mbuf *m; 1214 register struct socket *so; 1215 register struct nfsmount *nmp; 1216 register int timeo; 1217 static long lasttime = 0; 1218 int s, error; 1219 1220 s = splnet(); 1221 for (rep = nfsreqh.r_next; rep != &nfsreqh; rep = rep->r_next) { 1222 nmp = rep->r_nmp; 1223 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) 1224 continue; 1225 if (nfs_sigintr(nmp, rep, rep->r_procp)) { 1226 rep->r_flags |= R_SOFTTERM; 1227 continue; 1228 } 1229 if (rep->r_rtt >= 0) { 1230 rep->r_rtt++; 1231 if (nmp->nm_flag & NFSMNT_DUMBTIMR) 1232 timeo = nmp->nm_timeo; 1233 else 1234 timeo = NFS_RTO(nmp, proct[rep->r_procnum]); 1235 if (nmp->nm_timeouts > 0) 1236 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 1237 if (rep->r_rtt <= timeo) 1238 continue; 1239 if (nmp->nm_timeouts < 8) 1240 nmp->nm_timeouts++; 1241 } 1242 /* 1243 * Check for server not responding 1244 */ 1245 if ((rep->r_flags & R_TPRINTFMSG) == 0 && 1246 rep->r_rexmit > nmp->nm_deadthresh) { 1247 nfs_msg(rep->r_procp, 1248 nmp->nm_mountp->mnt_stat.f_mntfromname, 1249 "not responding"); 1250 rep->r_flags |= R_TPRINTFMSG; 1251 } 1252 if (rep->r_rexmit >= rep->r_retry) { /* too many */ 1253 nfsstats.rpctimeouts++; 1254 rep->r_flags |= R_SOFTTERM; 1255 continue; 1256 } 1257 if (nmp->nm_sotype != SOCK_DGRAM) { 1258 if (++rep->r_rexmit > NFS_MAXREXMIT) 1259 rep->r_rexmit = NFS_MAXREXMIT; 1260 continue; 1261 } 1262 if ((so = nmp->nm_so) == NULL) 1263 continue; 1264 1265 /* 1266 * If there is enough space and the window allows.. 1267 * Resend it 1268 * Set r_rtt to -1 in case we fail to send it now. 1269 */ 1270 rep->r_rtt = -1; 1271 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len && 1272 ((nmp->nm_flag & NFSMNT_DUMBTIMR) || 1273 (rep->r_flags & R_SENT) || 1274 nmp->nm_sent < nmp->nm_cwnd) && 1275 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){ 1276 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0) 1277 error = (*so->so_proto->pr_usrreq)(so, PRU_SEND, m, 1278 (struct mbuf *)0, (struct mbuf *)0); 1279 else 1280 error = (*so->so_proto->pr_usrreq)(so, PRU_SEND, m, 1281 nmp->nm_nam, (struct mbuf *)0); 1282 if (error) { 1283 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) 1284 so->so_error = 0; 1285 } else { 1286 /* 1287 * Iff first send, start timing 1288 * else turn timing off, backoff timer 1289 * and divide congestion window by 2. 1290 */ 1291 if (rep->r_flags & R_SENT) { 1292 rep->r_flags &= ~R_TIMING; 1293 if (++rep->r_rexmit > NFS_MAXREXMIT) 1294 rep->r_rexmit = NFS_MAXREXMIT; 1295 nmp->nm_cwnd >>= 1; 1296 if (nmp->nm_cwnd < NFS_CWNDSCALE) 1297 nmp->nm_cwnd = NFS_CWNDSCALE; 1298 nfsstats.rpcretries++; 1299 } else { 1300 rep->r_flags |= R_SENT; 1301 nmp->nm_sent += NFS_CWNDSCALE; 1302 } 1303 rep->r_rtt = 0; 1304 } 1305 } 1306 } 1307 1308 /* 1309 * Call the nqnfs server timer once a second to handle leases. 1310 */ 1311 if (lasttime != time.tv_sec) { 1312 lasttime = time.tv_sec; 1313 nqnfs_serverd(); 1314 } 1315 splx(s); 1316 timeout(nfs_timer, (caddr_t)0, hz/NFS_HZ); 1317 } 1318 1319 /* 1320 * Test for a termination condition pending on the process. 1321 * This is used for NFSMNT_INT mounts. 1322 */ 1323 nfs_sigintr(nmp, rep, p) 1324 struct nfsmount *nmp; 1325 struct nfsreq *rep; 1326 register struct proc *p; 1327 { 1328 1329 if (rep && (rep->r_flags & R_SOFTTERM)) 1330 return (EINTR); 1331 if (!(nmp->nm_flag & NFSMNT_INT)) 1332 return (0); 1333 if (p && p->p_sig && (((p->p_sig &~ p->p_sigmask) &~ p->p_sigignore) & 1334 NFSINT_SIGMASK)) 1335 return (EINTR); 1336 return (0); 1337 } 1338 1339 /* 1340 * Lock a socket against others. 1341 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply 1342 * and also to avoid race conditions between the processes with nfs requests 1343 * in progress when a reconnect is necessary. 1344 */ 1345 nfs_sndlock(flagp, rep) 1346 register int *flagp; 1347 struct nfsreq *rep; 1348 { 1349 struct proc *p; 1350 1351 if (rep) 1352 p = rep->r_procp; 1353 else 1354 p = (struct proc *)0; 1355 while (*flagp & NFSMNT_SNDLOCK) { 1356 if (nfs_sigintr(rep->r_nmp, rep, p)) 1357 return (EINTR); 1358 *flagp |= NFSMNT_WANTSND; 1359 (void) tsleep((caddr_t)flagp, PZERO-1, "nfsndlck", 0); 1360 } 1361 *flagp |= NFSMNT_SNDLOCK; 1362 return (0); 1363 } 1364 1365 /* 1366 * Unlock the stream socket for others. 1367 */ 1368 void 1369 nfs_sndunlock(flagp) 1370 register int *flagp; 1371 { 1372 1373 if ((*flagp & NFSMNT_SNDLOCK) == 0) 1374 panic("nfs sndunlock"); 1375 *flagp &= ~NFSMNT_SNDLOCK; 1376 if (*flagp & NFSMNT_WANTSND) { 1377 *flagp &= ~NFSMNT_WANTSND; 1378 wakeup((caddr_t)flagp); 1379 } 1380 } 1381 1382 nfs_rcvlock(rep) 1383 register struct nfsreq *rep; 1384 { 1385 register int *flagp = &rep->r_nmp->nm_flag; 1386 1387 while (*flagp & NFSMNT_RCVLOCK) { 1388 if (nfs_sigintr(rep->r_nmp, rep, rep->r_procp)) 1389 return (EINTR); 1390 *flagp |= NFSMNT_WANTRCV; 1391 (void) tsleep((caddr_t)flagp, PZERO-1, "nfsrcvlck", 0); 1392 } 1393 *flagp |= NFSMNT_RCVLOCK; 1394 return (0); 1395 } 1396 1397 /* 1398 * Unlock the stream socket for others. 1399 */ 1400 void 1401 nfs_rcvunlock(flagp) 1402 register int *flagp; 1403 { 1404 1405 if ((*flagp & NFSMNT_RCVLOCK) == 0) 1406 panic("nfs rcvunlock"); 1407 *flagp &= ~NFSMNT_RCVLOCK; 1408 if (*flagp & NFSMNT_WANTRCV) { 1409 *flagp &= ~NFSMNT_WANTRCV; 1410 wakeup((caddr_t)flagp); 1411 } 1412 } 1413 1414 /* 1415 * Check for badly aligned mbuf data areas and 1416 * realign data in an mbuf list by copying the data areas up, as required. 1417 */ 1418 void 1419 nfs_realign(m, hsiz) 1420 register struct mbuf *m; 1421 int hsiz; 1422 { 1423 register struct mbuf *m2; 1424 register int siz, mlen, olen; 1425 register caddr_t tcp, fcp; 1426 struct mbuf *mnew; 1427 1428 while (m) { 1429 /* 1430 * This never happens for UDP, rarely happens for TCP 1431 * but frequently happens for iso transport. 1432 */ 1433 if ((m->m_len & 0x3) || (mtod(m, int) & 0x3)) { 1434 olen = m->m_len; 1435 fcp = mtod(m, caddr_t); 1436 m->m_flags &= ~M_PKTHDR; 1437 if (m->m_flags & M_EXT) 1438 m->m_data = m->m_ext.ext_buf; 1439 else 1440 m->m_data = m->m_dat; 1441 m->m_len = 0; 1442 tcp = mtod(m, caddr_t); 1443 mnew = m; 1444 m2 = m->m_next; 1445 1446 /* 1447 * If possible, only put the first invariant part 1448 * of the RPC header in the first mbuf. 1449 */ 1450 if (olen <= hsiz) 1451 mlen = hsiz; 1452 else 1453 mlen = M_TRAILINGSPACE(m); 1454 1455 /* 1456 * Loop through the mbuf list consolidating data. 1457 */ 1458 while (m) { 1459 while (olen > 0) { 1460 if (mlen == 0) { 1461 m2->m_flags &= ~M_PKTHDR; 1462 if (m2->m_flags & M_EXT) 1463 m2->m_data = m2->m_ext.ext_buf; 1464 else 1465 m2->m_data = m2->m_dat; 1466 m2->m_len = 0; 1467 mlen = M_TRAILINGSPACE(m2); 1468 tcp = mtod(m2, caddr_t); 1469 mnew = m2; 1470 m2 = m2->m_next; 1471 } 1472 siz = MIN(mlen, olen); 1473 if (tcp != fcp) 1474 bcopy(fcp, tcp, siz); 1475 mnew->m_len += siz; 1476 mlen -= siz; 1477 olen -= siz; 1478 tcp += siz; 1479 fcp += siz; 1480 } 1481 m = m->m_next; 1482 if (m) { 1483 olen = m->m_len; 1484 fcp = mtod(m, caddr_t); 1485 } 1486 } 1487 1488 /* 1489 * Finally, set m_len == 0 for any trailing mbufs that have 1490 * been copied out of. 1491 */ 1492 while (m2) { 1493 m2->m_len = 0; 1494 m2 = m2->m_next; 1495 } 1496 return; 1497 } 1498 m = m->m_next; 1499 } 1500 } 1501 1502 /* 1503 * Socket upcall routine for the nfsd sockets. 1504 * The caddr_t arg is a pointer to the "struct nfssvc_sock". 1505 * Essentially do as much as possible non-blocking, else punt and it will 1506 * be called with M_WAIT from an nfsd. 1507 */ 1508 void 1509 nfsrv_rcv(so, arg, waitflag) 1510 struct socket *so; 1511 caddr_t arg; 1512 int waitflag; 1513 { 1514 register struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 1515 register struct mbuf *m; 1516 struct mbuf *mp, *nam; 1517 struct uio auio; 1518 int flags, error; 1519 1520 if ((slp->ns_flag & SLP_VALID) == 0) 1521 return; 1522 #ifdef notdef 1523 /* 1524 * Define this to test for nfsds handling this under heavy load. 1525 */ 1526 if (waitflag == M_DONTWAIT) { 1527 slp->ns_flag |= SLP_NEEDQ; goto dorecs; 1528 } 1529 #endif 1530 auio.uio_procp = NULL; 1531 if (so->so_type == SOCK_STREAM) { 1532 /* 1533 * If there are already records on the queue, defer soreceive() 1534 * to an nfsd so that there is feedback to the TCP layer that 1535 * the nfs servers are heavily loaded. 1536 */ 1537 if (slp->ns_rec && waitflag == M_DONTWAIT) { 1538 slp->ns_flag |= SLP_NEEDQ; 1539 goto dorecs; 1540 } 1541 1542 /* 1543 * Do soreceive(). 1544 */ 1545 auio.uio_resid = 1000000000; 1546 flags = MSG_DONTWAIT; 1547 error = soreceive(so, &nam, &auio, &mp, (struct mbuf **)0, &flags); 1548 if (error || mp == (struct mbuf *)0) { 1549 if (error == EWOULDBLOCK) 1550 slp->ns_flag |= SLP_NEEDQ; 1551 else 1552 slp->ns_flag |= SLP_DISCONN; 1553 goto dorecs; 1554 } 1555 m = mp; 1556 if (slp->ns_rawend) { 1557 slp->ns_rawend->m_next = m; 1558 slp->ns_cc += 1000000000 - auio.uio_resid; 1559 } else { 1560 slp->ns_raw = m; 1561 slp->ns_cc = 1000000000 - auio.uio_resid; 1562 } 1563 while (m->m_next) 1564 m = m->m_next; 1565 slp->ns_rawend = m; 1566 1567 /* 1568 * Now try and parse record(s) out of the raw stream data. 1569 */ 1570 if (error = nfsrv_getstream(slp, waitflag)) { 1571 if (error == EPERM) 1572 slp->ns_flag |= SLP_DISCONN; 1573 else 1574 slp->ns_flag |= SLP_NEEDQ; 1575 } 1576 } else { 1577 do { 1578 auio.uio_resid = 1000000000; 1579 flags = MSG_DONTWAIT; 1580 error = soreceive(so, &nam, &auio, &mp, 1581 (struct mbuf **)0, &flags); 1582 if (mp) { 1583 nfs_realign(mp, 10 * NFSX_UNSIGNED); 1584 if (nam) { 1585 m = nam; 1586 m->m_next = mp; 1587 } else 1588 m = mp; 1589 if (slp->ns_recend) 1590 slp->ns_recend->m_nextpkt = m; 1591 else 1592 slp->ns_rec = m; 1593 slp->ns_recend = m; 1594 m->m_nextpkt = (struct mbuf *)0; 1595 } 1596 if (error) { 1597 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) 1598 && error != EWOULDBLOCK) { 1599 slp->ns_flag |= SLP_DISCONN; 1600 goto dorecs; 1601 } 1602 } 1603 } while (mp); 1604 } 1605 1606 /* 1607 * Now try and process the request records, non-blocking. 1608 */ 1609 dorecs: 1610 if (waitflag == M_DONTWAIT && 1611 (slp->ns_rec || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) 1612 nfsrv_wakenfsd(slp); 1613 } 1614 1615 /* 1616 * Try and extract an RPC request from the mbuf data list received on a 1617 * stream socket. The "waitflag" argument indicates whether or not it 1618 * can sleep. 1619 */ 1620 nfsrv_getstream(slp, waitflag) 1621 register struct nfssvc_sock *slp; 1622 int waitflag; 1623 { 1624 register struct mbuf *m; 1625 register char *cp1, *cp2; 1626 register int len; 1627 struct mbuf *om, *m2, *recm; 1628 u_long recmark; 1629 1630 if (slp->ns_flag & SLP_GETSTREAM) 1631 panic("nfs getstream"); 1632 slp->ns_flag |= SLP_GETSTREAM; 1633 for (;;) { 1634 if (slp->ns_reclen == 0) { 1635 if (slp->ns_cc < NFSX_UNSIGNED) { 1636 slp->ns_flag &= ~SLP_GETSTREAM; 1637 return (0); 1638 } 1639 m = slp->ns_raw; 1640 if (m->m_len >= NFSX_UNSIGNED) { 1641 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED); 1642 m->m_data += NFSX_UNSIGNED; 1643 m->m_len -= NFSX_UNSIGNED; 1644 } else { 1645 cp1 = (caddr_t)&recmark; 1646 cp2 = mtod(m, caddr_t); 1647 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { 1648 while (m->m_len == 0) { 1649 m = m->m_next; 1650 cp2 = mtod(m, caddr_t); 1651 } 1652 *cp1++ = *cp2++; 1653 m->m_data++; 1654 m->m_len--; 1655 } 1656 } 1657 slp->ns_cc -= NFSX_UNSIGNED; 1658 slp->ns_reclen = ntohl(recmark) & ~0x80000000; 1659 if (slp->ns_reclen < NFS_MINPACKET || slp->ns_reclen > NFS_MAXPACKET) { 1660 slp->ns_flag &= ~SLP_GETSTREAM; 1661 return (EPERM); 1662 } 1663 } 1664 1665 /* 1666 * Now get the record part. 1667 */ 1668 if (slp->ns_cc == slp->ns_reclen) { 1669 recm = slp->ns_raw; 1670 slp->ns_raw = slp->ns_rawend = (struct mbuf *)0; 1671 slp->ns_cc = slp->ns_reclen = 0; 1672 } else if (slp->ns_cc > slp->ns_reclen) { 1673 len = 0; 1674 m = slp->ns_raw; 1675 om = (struct mbuf *)0; 1676 while (len < slp->ns_reclen) { 1677 if ((len + m->m_len) > slp->ns_reclen) { 1678 m2 = m_copym(m, 0, slp->ns_reclen - len, 1679 waitflag); 1680 if (m2) { 1681 if (om) { 1682 om->m_next = m2; 1683 recm = slp->ns_raw; 1684 } else 1685 recm = m2; 1686 m->m_data += slp->ns_reclen - len; 1687 m->m_len -= slp->ns_reclen - len; 1688 len = slp->ns_reclen; 1689 } else { 1690 slp->ns_flag &= ~SLP_GETSTREAM; 1691 return (EWOULDBLOCK); 1692 } 1693 } else if ((len + m->m_len) == slp->ns_reclen) { 1694 om = m; 1695 len += m->m_len; 1696 m = m->m_next; 1697 recm = slp->ns_raw; 1698 om->m_next = (struct mbuf *)0; 1699 } else { 1700 om = m; 1701 len += m->m_len; 1702 m = m->m_next; 1703 } 1704 } 1705 slp->ns_raw = m; 1706 slp->ns_cc -= len; 1707 slp->ns_reclen = 0; 1708 } else { 1709 slp->ns_flag &= ~SLP_GETSTREAM; 1710 return (0); 1711 } 1712 nfs_realign(recm, 10 * NFSX_UNSIGNED); 1713 if (slp->ns_recend) 1714 slp->ns_recend->m_nextpkt = recm; 1715 else 1716 slp->ns_rec = recm; 1717 slp->ns_recend = recm; 1718 } 1719 } 1720 1721 /* 1722 * Parse an RPC header. 1723 */ 1724 nfsrv_dorec(slp, nd) 1725 register struct nfssvc_sock *slp; 1726 register struct nfsd *nd; 1727 { 1728 register struct mbuf *m; 1729 int error; 1730 1731 if ((slp->ns_flag & SLP_VALID) == 0 || 1732 (m = slp->ns_rec) == (struct mbuf *)0) 1733 return (ENOBUFS); 1734 if (slp->ns_rec = m->m_nextpkt) 1735 m->m_nextpkt = (struct mbuf *)0; 1736 else 1737 slp->ns_recend = (struct mbuf *)0; 1738 if (m->m_type == MT_SONAME) { 1739 nd->nd_nam = m; 1740 nd->nd_md = nd->nd_mrep = m->m_next; 1741 m->m_next = (struct mbuf *)0; 1742 } else { 1743 nd->nd_nam = (struct mbuf *)0; 1744 nd->nd_md = nd->nd_mrep = m; 1745 } 1746 nd->nd_dpos = mtod(nd->nd_md, caddr_t); 1747 if (error = nfs_getreq(nd, TRUE)) { 1748 m_freem(nd->nd_nam); 1749 return (error); 1750 } 1751 return (0); 1752 } 1753 1754 /* 1755 * Parse an RPC request 1756 * - verify it 1757 * - fill in the cred struct. 1758 */ 1759 nfs_getreq(nd, has_header) 1760 register struct nfsd *nd; 1761 int has_header; 1762 { 1763 register int len, i; 1764 register u_long *tl; 1765 register long t1; 1766 struct uio uio; 1767 struct iovec iov; 1768 caddr_t dpos, cp2; 1769 u_long nfsvers, auth_type; 1770 int error = 0, nqnfs = 0; 1771 struct mbuf *mrep, *md; 1772 1773 mrep = nd->nd_mrep; 1774 md = nd->nd_md; 1775 dpos = nd->nd_dpos; 1776 if (has_header) { 1777 nfsm_dissect(tl, u_long *, 10*NFSX_UNSIGNED); 1778 nd->nd_retxid = *tl++; 1779 if (*tl++ != rpc_call) { 1780 m_freem(mrep); 1781 return (EBADRPC); 1782 } 1783 } else { 1784 nfsm_dissect(tl, u_long *, 8*NFSX_UNSIGNED); 1785 } 1786 nd->nd_repstat = 0; 1787 if (*tl++ != rpc_vers) { 1788 nd->nd_repstat = ERPCMISMATCH; 1789 nd->nd_procnum = NFSPROC_NOOP; 1790 return (0); 1791 } 1792 nfsvers = nfs_vers; 1793 if (*tl != nfs_prog) { 1794 if (*tl == nqnfs_prog) { 1795 nqnfs++; 1796 nfsvers = nqnfs_vers; 1797 } else { 1798 nd->nd_repstat = EPROGUNAVAIL; 1799 nd->nd_procnum = NFSPROC_NOOP; 1800 return (0); 1801 } 1802 } 1803 tl++; 1804 if (*tl++ != nfsvers) { 1805 nd->nd_repstat = EPROGMISMATCH; 1806 nd->nd_procnum = NFSPROC_NOOP; 1807 return (0); 1808 } 1809 nd->nd_procnum = fxdr_unsigned(u_long, *tl++); 1810 if (nd->nd_procnum == NFSPROC_NULL) 1811 return (0); 1812 if (nd->nd_procnum >= NFS_NPROCS || 1813 (!nqnfs && nd->nd_procnum > NFSPROC_STATFS) || 1814 (*tl != rpc_auth_unix && *tl != rpc_auth_kerb)) { 1815 nd->nd_repstat = EPROCUNAVAIL; 1816 nd->nd_procnum = NFSPROC_NOOP; 1817 return (0); 1818 } 1819 auth_type = *tl++; 1820 len = fxdr_unsigned(int, *tl++); 1821 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1822 m_freem(mrep); 1823 return (EBADRPC); 1824 } 1825 1826 /* 1827 * Handle auth_unix or auth_kerb. 1828 */ 1829 if (auth_type == rpc_auth_unix) { 1830 len = fxdr_unsigned(int, *++tl); 1831 if (len < 0 || len > NFS_MAXNAMLEN) { 1832 m_freem(mrep); 1833 return (EBADRPC); 1834 } 1835 nfsm_adv(nfsm_rndup(len)); 1836 nfsm_dissect(tl, u_long *, 3*NFSX_UNSIGNED); 1837 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 1838 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++); 1839 len = fxdr_unsigned(int, *tl); 1840 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 1841 m_freem(mrep); 1842 return (EBADRPC); 1843 } 1844 nfsm_dissect(tl, u_long *, (len + 2)*NFSX_UNSIGNED); 1845 for (i = 1; i <= len; i++) 1846 if (i < NGROUPS) 1847 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++); 1848 else 1849 tl++; 1850 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); 1851 } else if (auth_type == rpc_auth_kerb) { 1852 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 1853 nd->nd_authlen = fxdr_unsigned(int, *tl); 1854 iov.iov_len = uio.uio_resid = nfsm_rndup(nd->nd_authlen); 1855 if (uio.uio_resid > (len - 2*NFSX_UNSIGNED)) { 1856 m_freem(mrep); 1857 return (EBADRPC); 1858 } 1859 uio.uio_offset = 0; 1860 uio.uio_iov = &iov; 1861 uio.uio_iovcnt = 1; 1862 uio.uio_segflg = UIO_SYSSPACE; 1863 iov.iov_base = (caddr_t)nd->nd_authstr; 1864 nfsm_mtouio(&uio, uio.uio_resid); 1865 nfsm_dissect(tl, u_long *, 2*NFSX_UNSIGNED); 1866 nd->nd_flag |= NFSD_NEEDAUTH; 1867 } 1868 1869 /* 1870 * Do we have any use for the verifier. 1871 * According to the "Remote Procedure Call Protocol Spec." it 1872 * should be AUTH_NULL, but some clients make it AUTH_UNIX? 1873 * For now, just skip over it 1874 */ 1875 len = fxdr_unsigned(int, *++tl); 1876 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1877 m_freem(mrep); 1878 return (EBADRPC); 1879 } 1880 if (len > 0) { 1881 nfsm_adv(nfsm_rndup(len)); 1882 } 1883 1884 /* 1885 * For nqnfs, get piggybacked lease request. 1886 */ 1887 if (nqnfs && nd->nd_procnum != NQNFSPROC_EVICTED) { 1888 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1889 nd->nd_nqlflag = fxdr_unsigned(int, *tl); 1890 if (nd->nd_nqlflag) { 1891 nfsm_dissect(tl, u_long *, NFSX_UNSIGNED); 1892 nd->nd_duration = fxdr_unsigned(int, *tl); 1893 } else 1894 nd->nd_duration = NQ_MINLEASE; 1895 } else { 1896 nd->nd_nqlflag = NQL_NOVAL; 1897 nd->nd_duration = NQ_MINLEASE; 1898 } 1899 nd->nd_md = md; 1900 nd->nd_dpos = dpos; 1901 return (0); 1902 nfsmout: 1903 return (error); 1904 } 1905 1906 /* 1907 * Search for a sleeping nfsd and wake it up. 1908 * SIDE EFFECT: If none found, set NFSD_CHECKSLP flag, so that one of the 1909 * running nfsds will go look for the work in the nfssvc_sock list. 1910 */ 1911 void 1912 nfsrv_wakenfsd(slp) 1913 struct nfssvc_sock *slp; 1914 { 1915 register struct nfsd *nd = nfsd_head.nd_next; 1916 1917 if ((slp->ns_flag & SLP_VALID) == 0) 1918 return; 1919 while (nd != (struct nfsd *)&nfsd_head) { 1920 if (nd->nd_flag & NFSD_WAITING) { 1921 nd->nd_flag &= ~NFSD_WAITING; 1922 if (nd->nd_slp) 1923 panic("nfsd wakeup"); 1924 slp->ns_sref++; 1925 nd->nd_slp = slp; 1926 wakeup((caddr_t)nd); 1927 return; 1928 } 1929 nd = nd->nd_next; 1930 } 1931 slp->ns_flag |= SLP_DOREC; 1932 nfsd_head.nd_flag |= NFSD_CHECKSLP; 1933 } 1934 1935 nfs_msg(p, server, msg) 1936 struct proc *p; 1937 char *server, *msg; 1938 { 1939 tpr_t tpr; 1940 1941 if (p) 1942 tpr = tprintf_open(p); 1943 else 1944 tpr = NULL; 1945 tprintf(tpr, "nfs server %s: %s\n", server, msg); 1946 tprintf_close(tpr); 1947 } 1948