1 /* 2 * Copyright (c) 1989, 1991, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95 33 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $ 34 */ 35 36 /* 37 * Socket operations for use by nfs 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/malloc.h> 44 #include <sys/mount.h> 45 #include <sys/kernel.h> 46 #include <sys/mbuf.h> 47 #include <sys/vnode.h> 48 #include <sys/fcntl.h> 49 #include <sys/protosw.h> 50 #include <sys/resourcevar.h> 51 #include <sys/socket.h> 52 #include <sys/socketvar.h> 53 #include <sys/socketops.h> 54 #include <sys/syslog.h> 55 #include <sys/thread.h> 56 #include <sys/tprintf.h> 57 #include <sys/sysctl.h> 58 #include <sys/signalvar.h> 59 60 #include <sys/signal2.h> 61 #include <sys/mutex2.h> 62 #include <sys/socketvar2.h> 63 64 #include <netinet/in.h> 65 #include <netinet/tcp.h> 66 #include <sys/thread2.h> 67 68 #include "rpcv2.h" 69 #include "nfsproto.h" 70 #include "nfs.h" 71 #include "xdr_subs.h" 72 #include "nfsm_subs.h" 73 #include "nfsmount.h" 74 #include "nfsnode.h" 75 #include "nfsrtt.h" 76 77 #define TRUE 1 78 #define FALSE 0 79 80 /* 81 * RTT calculations are scaled by 256 (8 bits). A proper fractional 82 * RTT will still be calculated even with a slow NFS timer. 83 */ 84 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum]] 85 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum]] 86 #define NFS_RTT_SCALE_BITS 8 /* bits */ 87 #define NFS_RTT_SCALE 256 /* value */ 88 89 /* 90 * Defines which timer to use for the procnum. 91 * 0 - default 92 * 1 - getattr 93 * 2 - lookup 94 * 3 - read 95 * 4 - write 96 */ 97 static int proct[NFS_NPROCS] = { 98 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, /* 00-09 */ 99 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, /* 10-19 */ 100 0, 5, 0, 0, 0, 0, /* 20-29 */ 101 }; 102 103 static int multt[NFS_NPROCS] = { 104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00-09 */ 105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 10-19 */ 106 1, 2, 1, 1, 1, 1, /* 20-29 */ 107 }; 108 109 static int nfs_backoff[8] = { 2, 3, 5, 8, 13, 21, 34, 55 }; 110 static int nfs_realign_test; 111 static int nfs_realign_count; 112 static int nfs_showrtt; 113 static int nfs_showrexmit; 114 int nfs_maxasyncbio = NFS_MAXASYNCBIO; 115 116 SYSCTL_DECL(_vfs_nfs); 117 118 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, 119 "Number of times mbufs have been tested for bad alignment"); 120 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, 121 "Number of realignments for badly aligned mbuf data"); 122 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrtt, CTLFLAG_RW, &nfs_showrtt, 0, 123 "Show round trip time output"); 124 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrexmit, CTLFLAG_RW, &nfs_showrexmit, 0, 125 "Show retransmits info"); 126 SYSCTL_INT(_vfs_nfs, OID_AUTO, maxasyncbio, CTLFLAG_RW, &nfs_maxasyncbio, 0, 127 "Max number of asynchronous bio's"); 128 129 static int nfs_request_setup(nfsm_info_t info); 130 static int nfs_request_auth(struct nfsreq *rep); 131 static int nfs_request_try(struct nfsreq *rep); 132 static int nfs_request_waitreply(struct nfsreq *rep); 133 static int nfs_request_processreply(nfsm_info_t info, int); 134 135 int nfsrtton = 0; 136 struct nfsrtt nfsrtt; 137 struct callout nfs_timer_handle; 138 139 static int nfs_msg (struct thread *,char *,char *); 140 static int nfs_rcvlock (struct nfsmount *nmp, struct nfsreq *myreq); 141 static void nfs_rcvunlock (struct nfsmount *nmp); 142 static void nfs_realign (struct mbuf **pm, int hsiz); 143 static int nfs_receive (struct nfsmount *nmp, struct nfsreq *rep, 144 struct sockaddr **aname, struct mbuf **mp); 145 static void nfs_softterm (struct nfsreq *rep, int islocked); 146 static void nfs_hardterm (struct nfsreq *rep, int islocked); 147 static int nfs_reconnect (struct nfsmount *nmp, struct nfsreq *rep); 148 #ifndef NFS_NOSERVER 149 static int nfsrv_getstream (struct nfssvc_sock *, int, int *); 150 static void nfs_timer_req(struct nfsreq *req); 151 static void nfs_checkpkt(struct mbuf *m, int len); 152 153 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd, 154 struct nfssvc_sock *slp, 155 struct thread *td, 156 struct mbuf **mreqp) = { 157 nfsrv_null, 158 nfsrv_getattr, 159 nfsrv_setattr, 160 nfsrv_lookup, 161 nfsrv3_access, 162 nfsrv_readlink, 163 nfsrv_read, 164 nfsrv_write, 165 nfsrv_create, 166 nfsrv_mkdir, 167 nfsrv_symlink, 168 nfsrv_mknod, 169 nfsrv_remove, 170 nfsrv_rmdir, 171 nfsrv_rename, 172 nfsrv_link, 173 nfsrv_readdir, 174 nfsrv_readdirplus, 175 nfsrv_statfs, 176 nfsrv_fsinfo, 177 nfsrv_pathconf, 178 nfsrv_commit, 179 nfsrv_noop, 180 nfsrv_noop, 181 nfsrv_noop, 182 nfsrv_noop 183 }; 184 #endif /* NFS_NOSERVER */ 185 186 /* 187 * Initialize sockets and congestion for a new NFS connection. 188 * We do not free the sockaddr if error. 189 */ 190 int 191 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep) 192 { 193 struct socket *so; 194 int error; 195 struct sockaddr *saddr; 196 struct sockaddr_in *sin; 197 struct thread *td = &thread0; /* only used for socreate and sobind */ 198 199 nmp->nm_so = so = NULL; 200 if (nmp->nm_flag & NFSMNT_FORCE) 201 return (EINVAL); 202 saddr = nmp->nm_nam; 203 error = socreate(saddr->sa_family, &so, nmp->nm_sotype, 204 nmp->nm_soproto, td); 205 if (error) 206 goto bad; 207 nmp->nm_soflags = so->so_proto->pr_flags; 208 209 /* 210 * Some servers require that the client port be a reserved port number. 211 */ 212 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 213 struct sockopt sopt; 214 int ip; 215 struct sockaddr_in ssin; 216 217 bzero(&sopt, sizeof sopt); 218 ip = IP_PORTRANGE_LOW; 219 sopt.sopt_level = IPPROTO_IP; 220 sopt.sopt_name = IP_PORTRANGE; 221 sopt.sopt_val = (void *)&ip; 222 sopt.sopt_valsize = sizeof(ip); 223 sopt.sopt_td = NULL; 224 error = sosetopt(so, &sopt); 225 if (error) 226 goto bad; 227 bzero(&ssin, sizeof ssin); 228 sin = &ssin; 229 sin->sin_len = sizeof (struct sockaddr_in); 230 sin->sin_family = AF_INET; 231 sin->sin_addr.s_addr = INADDR_ANY; 232 sin->sin_port = htons(0); 233 error = sobind(so, (struct sockaddr *)sin, td); 234 if (error) 235 goto bad; 236 bzero(&sopt, sizeof sopt); 237 ip = IP_PORTRANGE_DEFAULT; 238 sopt.sopt_level = IPPROTO_IP; 239 sopt.sopt_name = IP_PORTRANGE; 240 sopt.sopt_val = (void *)&ip; 241 sopt.sopt_valsize = sizeof(ip); 242 sopt.sopt_td = NULL; 243 error = sosetopt(so, &sopt); 244 if (error) 245 goto bad; 246 } 247 248 /* 249 * Protocols that do not require connections may be optionally left 250 * unconnected for servers that reply from a port other than NFS_PORT. 251 */ 252 if (nmp->nm_flag & NFSMNT_NOCONN) { 253 if (nmp->nm_soflags & PR_CONNREQUIRED) { 254 error = ENOTCONN; 255 goto bad; 256 } 257 } else { 258 error = soconnect(so, nmp->nm_nam, td, TRUE); 259 if (error) 260 goto bad; 261 262 /* 263 * Wait for the connection to complete. Cribbed from the 264 * connect system call but with the wait timing out so 265 * that interruptible mounts don't hang here for a long time. 266 */ 267 crit_enter(); 268 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 269 (void) tsleep((caddr_t)&so->so_timeo, 0, 270 "nfscon", 2 * hz); 271 if ((so->so_state & SS_ISCONNECTING) && 272 so->so_error == 0 && rep && 273 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){ 274 soclrstate(so, SS_ISCONNECTING); 275 crit_exit(); 276 goto bad; 277 } 278 } 279 if (so->so_error) { 280 error = so->so_error; 281 so->so_error = 0; 282 crit_exit(); 283 goto bad; 284 } 285 crit_exit(); 286 } 287 so->so_rcv.ssb_timeo = (5 * hz); 288 so->so_snd.ssb_timeo = (5 * hz); 289 290 /* 291 * Get buffer reservation size from sysctl, but impose reasonable 292 * limits. 293 */ 294 if (nmp->nm_sotype == SOCK_STREAM) { 295 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 296 struct sockopt sopt; 297 int val; 298 299 bzero(&sopt, sizeof sopt); 300 sopt.sopt_level = SOL_SOCKET; 301 sopt.sopt_name = SO_KEEPALIVE; 302 sopt.sopt_val = &val; 303 sopt.sopt_valsize = sizeof val; 304 val = 1; 305 sosetopt(so, &sopt); 306 } 307 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 308 struct sockopt sopt; 309 int val; 310 311 bzero(&sopt, sizeof sopt); 312 sopt.sopt_level = IPPROTO_TCP; 313 sopt.sopt_name = TCP_NODELAY; 314 sopt.sopt_val = &val; 315 sopt.sopt_valsize = sizeof val; 316 val = 1; 317 sosetopt(so, &sopt); 318 319 bzero(&sopt, sizeof sopt); 320 sopt.sopt_level = IPPROTO_TCP; 321 sopt.sopt_name = TCP_FASTKEEP; 322 sopt.sopt_val = &val; 323 sopt.sopt_valsize = sizeof val; 324 val = 1; 325 sosetopt(so, &sopt); 326 } 327 } 328 error = soreserve(so, nfs_soreserve, nfs_soreserve, NULL); 329 if (error) 330 goto bad; 331 atomic_set_int(&so->so_rcv.ssb_flags, SSB_NOINTR); 332 atomic_set_int(&so->so_snd.ssb_flags, SSB_NOINTR); 333 334 /* 335 * Clear AUTOSIZE, otherwise the socket buffer could be reduced 336 * to the point where rpc's cannot be queued using the mbuf 337 * interface. 338 */ 339 atomic_clear_int(&so->so_rcv.ssb_flags, SSB_AUTOSIZE); 340 atomic_clear_int(&so->so_snd.ssb_flags, SSB_AUTOSIZE); 341 342 /* Initialize other non-zero congestion variables */ 343 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = 344 nmp->nm_srtt[3] = (NFS_TIMEO << NFS_RTT_SCALE_BITS); 345 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 346 nmp->nm_sdrtt[3] = 0; 347 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED; 348 nmp->nm_timeouts = 0; 349 350 /* 351 * Assign nm_so last. The moment nm_so is assigned the nfs_timer() 352 * can mess with the socket. 353 */ 354 nmp->nm_so = so; 355 return (0); 356 357 bad: 358 if (so) { 359 soshutdown(so, SHUT_RDWR); 360 soclose(so, FNONBLOCK); 361 } 362 return (error); 363 } 364 365 /* 366 * Reconnect routine: 367 * Called when a connection is broken on a reliable protocol. 368 * - clean up the old socket 369 * - nfs_connect() again 370 * - set R_NEEDSXMIT for all outstanding requests on mount point 371 * If this fails the mount point is DEAD! 372 * nb: Must be called with the nfs_sndlock() set on the mount point. 373 */ 374 static int 375 nfs_reconnect(struct nfsmount *nmp, struct nfsreq *rep) 376 { 377 struct nfsreq *req; 378 int error; 379 380 nfs_disconnect(nmp); 381 if (nmp->nm_rxstate >= NFSSVC_STOPPING) 382 return (EINTR); 383 while ((error = nfs_connect(nmp, rep)) != 0) { 384 if (error == EINTR || error == ERESTART) 385 return (EINTR); 386 if (error == EINVAL) 387 return (error); 388 if (nmp->nm_rxstate >= NFSSVC_STOPPING) 389 return (EINTR); 390 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0); 391 } 392 393 /* 394 * Loop through outstanding request list and fix up all requests 395 * on old socket. 396 */ 397 crit_enter(); 398 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 399 KKASSERT(req->r_nmp == nmp); 400 req->r_flags |= R_NEEDSXMIT; 401 } 402 crit_exit(); 403 return (0); 404 } 405 406 /* 407 * NFS disconnect. Clean up and unlink. 408 */ 409 void 410 nfs_disconnect(struct nfsmount *nmp) 411 { 412 struct socket *so; 413 414 if (nmp->nm_so) { 415 so = nmp->nm_so; 416 nmp->nm_so = NULL; 417 soshutdown(so, SHUT_RDWR); 418 soclose(so, FNONBLOCK); 419 } 420 } 421 422 void 423 nfs_safedisconnect(struct nfsmount *nmp) 424 { 425 int error; 426 427 error = nfs_rcvlock(nmp, NULL); 428 nfs_disconnect(nmp); 429 if (error == 0) 430 nfs_rcvunlock(nmp); 431 } 432 433 /* 434 * This is the nfs send routine. For connection based socket types, it 435 * must be called with an nfs_sndlock() on the socket. 436 * "rep == NULL" indicates that it has been called from a server. 437 * For the client side: 438 * - return EINTR if the RPC is terminated, 0 otherwise 439 * - set R_NEEDSXMIT if the send fails for any reason 440 * - do any cleanup required by recoverable socket errors (?) 441 * For the server side: 442 * - return EINTR or ERESTART if interrupted by a signal 443 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 444 * - do any cleanup required by recoverable socket errors (?) 445 */ 446 int 447 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top, 448 struct nfsreq *rep) 449 { 450 struct sockaddr *sendnam; 451 int error, soflags, flags; 452 453 if (rep) { 454 if (rep->r_flags & R_SOFTTERM) { 455 m_freem(top); 456 return (EINTR); 457 } 458 if ((so = rep->r_nmp->nm_so) == NULL) { 459 rep->r_flags |= R_NEEDSXMIT; 460 m_freem(top); 461 return (0); 462 } 463 rep->r_flags &= ~R_NEEDSXMIT; 464 soflags = rep->r_nmp->nm_soflags; 465 } else { 466 soflags = so->so_proto->pr_flags; 467 } 468 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 469 sendnam = NULL; 470 else 471 sendnam = nam; 472 if (so->so_type == SOCK_SEQPACKET) 473 flags = MSG_EOR; 474 else 475 flags = 0; 476 477 /* 478 * calls pru_sosend -> sosend -> so_pru_send -> netrpc 479 */ 480 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags, 481 curthread /*XXX*/); 482 483 /* 484 * ENOBUFS for dgram sockets is transient and non fatal. 485 * No need to log, and no need to break a soft mount. 486 */ 487 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) { 488 error = 0; 489 /* 490 * do backoff retransmit on client 491 */ 492 if (rep) { 493 if ((rep->r_nmp->nm_state & NFSSTA_SENDSPACE) == 0) { 494 rep->r_nmp->nm_state |= NFSSTA_SENDSPACE; 495 kprintf("Warning: NFS: Insufficient sendspace " 496 "(%lu),\n" 497 "\t You must increase vfs.nfs.soreserve" 498 "or decrease vfs.nfs.maxasyncbio\n", 499 so->so_snd.ssb_hiwat); 500 } 501 rep->r_flags |= R_NEEDSXMIT; 502 } 503 } 504 505 if (error) { 506 if (rep) { 507 log(LOG_INFO, "nfs send error %d for server %s\n",error, 508 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 509 /* 510 * Deal with errors for the client side. 511 */ 512 if (rep->r_flags & R_SOFTTERM) 513 error = EINTR; 514 else 515 rep->r_flags |= R_NEEDSXMIT; 516 } else { 517 log(LOG_INFO, "nfsd send error %d\n", error); 518 } 519 520 /* 521 * Handle any recoverable (soft) socket errors here. (?) 522 */ 523 if (error != EINTR && error != ERESTART && 524 error != EWOULDBLOCK && error != EPIPE) 525 error = 0; 526 } 527 return (error); 528 } 529 530 /* 531 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all 532 * done by soreceive(), but for SOCK_STREAM we must deal with the Record 533 * Mark and consolidate the data into a new mbuf list. 534 * nb: Sometimes TCP passes the data up to soreceive() in long lists of 535 * small mbufs. 536 * For SOCK_STREAM we must be very careful to read an entire record once 537 * we have read any of it, even if the system call has been interrupted. 538 */ 539 static int 540 nfs_receive(struct nfsmount *nmp, struct nfsreq *rep, 541 struct sockaddr **aname, struct mbuf **mp) 542 { 543 struct socket *so; 544 struct sockbuf sio; 545 struct uio auio; 546 struct iovec aio; 547 struct mbuf *m; 548 struct mbuf *control; 549 u_int32_t len; 550 struct sockaddr **getnam; 551 int error, sotype, rcvflg; 552 struct thread *td = curthread; /* XXX */ 553 554 /* 555 * Set up arguments for soreceive() 556 */ 557 *mp = NULL; 558 *aname = NULL; 559 sotype = nmp->nm_sotype; 560 561 /* 562 * For reliable protocols, lock against other senders/receivers 563 * in case a reconnect is necessary. 564 * For SOCK_STREAM, first get the Record Mark to find out how much 565 * more there is to get. 566 * We must lock the socket against other receivers 567 * until we have an entire rpc request/reply. 568 */ 569 if (sotype != SOCK_DGRAM) { 570 error = nfs_sndlock(nmp, rep); 571 if (error) 572 return (error); 573 tryagain: 574 /* 575 * Check for fatal errors and resending request. 576 */ 577 /* 578 * Ugh: If a reconnect attempt just happened, nm_so 579 * would have changed. NULL indicates a failed 580 * attempt that has essentially shut down this 581 * mount point. 582 */ 583 if (rep && (rep->r_mrep || (rep->r_flags & R_SOFTTERM))) { 584 nfs_sndunlock(nmp); 585 return (EINTR); 586 } 587 so = nmp->nm_so; 588 if (so == NULL) { 589 error = nfs_reconnect(nmp, rep); 590 if (error) { 591 nfs_sndunlock(nmp); 592 return (error); 593 } 594 goto tryagain; 595 } 596 while (rep && (rep->r_flags & R_NEEDSXMIT)) { 597 m = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAITOK); 598 nfsstats.rpcretries++; 599 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep); 600 if (error) { 601 if (error == EINTR || error == ERESTART || 602 (error = nfs_reconnect(nmp, rep)) != 0) { 603 nfs_sndunlock(nmp); 604 return (error); 605 } 606 goto tryagain; 607 } 608 } 609 nfs_sndunlock(nmp); 610 if (sotype == SOCK_STREAM) { 611 /* 612 * Get the length marker from the stream 613 */ 614 aio.iov_base = (caddr_t)&len; 615 aio.iov_len = sizeof(u_int32_t); 616 auio.uio_iov = &aio; 617 auio.uio_iovcnt = 1; 618 auio.uio_segflg = UIO_SYSSPACE; 619 auio.uio_rw = UIO_READ; 620 auio.uio_offset = 0; 621 auio.uio_resid = sizeof(u_int32_t); 622 auio.uio_td = td; 623 do { 624 rcvflg = MSG_WAITALL; 625 error = so_pru_soreceive(so, NULL, &auio, NULL, 626 NULL, &rcvflg); 627 if (error == EWOULDBLOCK && rep) { 628 if (rep->r_flags & R_SOFTTERM) 629 return (EINTR); 630 } 631 } while (error == EWOULDBLOCK); 632 633 if (error == 0 && auio.uio_resid > 0) { 634 /* 635 * Only log short packets if not EOF 636 */ 637 if (auio.uio_resid != sizeof(u_int32_t)) { 638 log(LOG_INFO, 639 "short receive (%d/%d) from nfs server %s\n", 640 (int)(sizeof(u_int32_t) - auio.uio_resid), 641 (int)sizeof(u_int32_t), 642 nmp->nm_mountp->mnt_stat.f_mntfromname); 643 } 644 error = EPIPE; 645 } 646 if (error) 647 goto errout; 648 len = ntohl(len) & ~0x80000000; 649 /* 650 * This is SERIOUS! We are out of sync with the sender 651 * and forcing a disconnect/reconnect is all I can do. 652 */ 653 if (len > NFS_MAXPACKET) { 654 log(LOG_ERR, "%s (%d) from nfs server %s\n", 655 "impossible packet length", 656 len, 657 nmp->nm_mountp->mnt_stat.f_mntfromname); 658 error = EFBIG; 659 goto errout; 660 } 661 662 /* 663 * Get the rest of the packet as an mbuf chain 664 */ 665 sbinit(&sio, len); 666 do { 667 rcvflg = MSG_WAITALL; 668 error = so_pru_soreceive(so, NULL, NULL, &sio, 669 NULL, &rcvflg); 670 } while (error == EWOULDBLOCK || error == EINTR || 671 error == ERESTART); 672 if (error == 0 && sio.sb_cc != len) { 673 if (sio.sb_cc != 0) { 674 log(LOG_INFO, 675 "short receive (%zu/%d) from nfs server %s\n", 676 (size_t)len - auio.uio_resid, len, 677 nmp->nm_mountp->mnt_stat.f_mntfromname); 678 } 679 error = EPIPE; 680 } 681 *mp = sio.sb_mb; 682 } else { 683 /* 684 * Non-stream, so get the whole packet by not 685 * specifying MSG_WAITALL and by specifying a large 686 * length. 687 * 688 * We have no use for control msg., but must grab them 689 * and then throw them away so we know what is going 690 * on. 691 */ 692 sbinit(&sio, 100000000); 693 do { 694 rcvflg = 0; 695 error = so_pru_soreceive(so, NULL, NULL, &sio, 696 &control, &rcvflg); 697 if (control) 698 m_freem(control); 699 if (error == EWOULDBLOCK && rep) { 700 if (rep->r_flags & R_SOFTTERM) { 701 m_freem(sio.sb_mb); 702 return (EINTR); 703 } 704 } 705 } while (error == EWOULDBLOCK || 706 (error == 0 && sio.sb_mb == NULL && control)); 707 if ((rcvflg & MSG_EOR) == 0) 708 kprintf("Egad!!\n"); 709 if (error == 0 && sio.sb_mb == NULL) 710 error = EPIPE; 711 len = sio.sb_cc; 712 *mp = sio.sb_mb; 713 } 714 errout: 715 if (error && error != EINTR && error != ERESTART) { 716 m_freem(*mp); 717 *mp = NULL; 718 if (error != EPIPE) { 719 log(LOG_INFO, 720 "receive error %d from nfs server %s\n", 721 error, 722 nmp->nm_mountp->mnt_stat.f_mntfromname); 723 } 724 error = nfs_sndlock(nmp, rep); 725 if (!error) { 726 error = nfs_reconnect(nmp, rep); 727 if (!error) 728 goto tryagain; 729 else 730 nfs_sndunlock(nmp); 731 } 732 } 733 } else { 734 if ((so = nmp->nm_so) == NULL) 735 return (EACCES); 736 if (so->so_state & SS_ISCONNECTED) 737 getnam = NULL; 738 else 739 getnam = aname; 740 sbinit(&sio, 100000000); 741 do { 742 rcvflg = 0; 743 error = so_pru_soreceive(so, getnam, NULL, &sio, 744 NULL, &rcvflg); 745 if (error == EWOULDBLOCK && rep && 746 (rep->r_flags & R_SOFTTERM)) { 747 m_freem(sio.sb_mb); 748 return (EINTR); 749 } 750 } while (error == EWOULDBLOCK); 751 752 len = sio.sb_cc; 753 *mp = sio.sb_mb; 754 755 /* 756 * A shutdown may result in no error and no mbuf. 757 * Convert to EPIPE. 758 */ 759 if (*mp == NULL && error == 0) 760 error = EPIPE; 761 } 762 if (error) { 763 m_freem(*mp); 764 *mp = NULL; 765 } 766 767 /* 768 * Search for any mbufs that are not a multiple of 4 bytes long 769 * or with m_data not longword aligned. 770 * These could cause pointer alignment problems, so copy them to 771 * well aligned mbufs. 772 */ 773 nfs_realign(mp, 5 * NFSX_UNSIGNED); 774 return (error); 775 } 776 777 /* 778 * Implement receipt of reply on a socket. 779 * 780 * We must search through the list of received datagrams matching them 781 * with outstanding requests using the xid, until ours is found. 782 * 783 * If myrep is NULL we process packets on the socket until 784 * interrupted or until nm_reqrxq is non-empty. 785 */ 786 /* ARGSUSED */ 787 int 788 nfs_reply(struct nfsmount *nmp, struct nfsreq *myrep) 789 { 790 struct nfsreq *rep; 791 struct sockaddr *nam; 792 u_int32_t rxid; 793 u_int32_t *tl; 794 int error; 795 struct nfsm_info info; 796 797 /* 798 * Loop around until we get our own reply 799 */ 800 for (;;) { 801 /* 802 * Lock against other receivers so that I don't get stuck in 803 * sbwait() after someone else has received my reply for me. 804 * Also necessary for connection based protocols to avoid 805 * race conditions during a reconnect. 806 * 807 * If nfs_rcvlock() returns EALREADY, that means that 808 * the reply has already been recieved by another 809 * process and we can return immediately. In this 810 * case, the lock is not taken to avoid races with 811 * other processes. 812 */ 813 info.mrep = NULL; 814 815 error = nfs_rcvlock(nmp, myrep); 816 if (error == EALREADY) 817 return (0); 818 if (error) 819 return (error); 820 821 /* 822 * If myrep is NULL we are the receiver helper thread. 823 * Stop waiting for incoming replies if there are 824 * messages sitting on reqrxq that we need to process, 825 * or if a shutdown request is pending. 826 */ 827 if (myrep == NULL && (TAILQ_FIRST(&nmp->nm_reqrxq) || 828 nmp->nm_rxstate > NFSSVC_PENDING)) { 829 nfs_rcvunlock(nmp); 830 return(EWOULDBLOCK); 831 } 832 833 /* 834 * Get the next Rpc reply off the socket 835 * 836 * We cannot release the receive lock until we've 837 * filled in rep->r_mrep, otherwise a waiting 838 * thread may deadlock in soreceive with no incoming 839 * packets expected. 840 */ 841 error = nfs_receive(nmp, myrep, &nam, &info.mrep); 842 if (error) { 843 /* 844 * Ignore routing errors on connectionless protocols?? 845 */ 846 nfs_rcvunlock(nmp); 847 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 848 if (nmp->nm_so == NULL) 849 return (error); 850 nmp->nm_so->so_error = 0; 851 continue; 852 } 853 return (error); 854 } 855 if (nam) 856 kfree(nam, M_SONAME); 857 858 /* 859 * Get the xid and check that it is an rpc reply 860 */ 861 info.md = info.mrep; 862 info.dpos = mtod(info.md, caddr_t); 863 NULLOUT(tl = nfsm_dissect(&info, 2*NFSX_UNSIGNED)); 864 rxid = *tl++; 865 if (*tl != rpc_reply) { 866 nfsstats.rpcinvalid++; 867 m_freem(info.mrep); 868 info.mrep = NULL; 869 nfsmout: 870 nfs_rcvunlock(nmp); 871 continue; 872 } 873 874 /* 875 * Loop through the request list to match up the reply 876 * Iff no match, just drop the datagram. On match, set 877 * r_mrep atomically to prevent the timer from messing 878 * around with the request after we have exited the critical 879 * section. 880 */ 881 crit_enter(); 882 TAILQ_FOREACH(rep, &nmp->nm_reqq, r_chain) { 883 if (rep->r_mrep == NULL && rxid == rep->r_xid) 884 break; 885 } 886 887 /* 888 * Fill in the rest of the reply if we found a match. 889 * 890 * Deal with duplicate responses if there was no match. 891 */ 892 if (rep) { 893 rep->r_md = info.md; 894 rep->r_dpos = info.dpos; 895 if (nfsrtton) { 896 struct rttl *rt; 897 898 rt = &nfsrtt.rttl[nfsrtt.pos]; 899 rt->proc = rep->r_procnum; 900 rt->rto = 0; 901 rt->sent = 0; 902 rt->cwnd = nmp->nm_maxasync_scaled; 903 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1]; 904 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1]; 905 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid; 906 getmicrotime(&rt->tstamp); 907 if (rep->r_flags & R_TIMING) 908 rt->rtt = rep->r_rtt; 909 else 910 rt->rtt = 1000000; 911 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ; 912 } 913 914 /* 915 * New congestion control is based only on async 916 * requests. 917 */ 918 if (nmp->nm_maxasync_scaled < NFS_MAXASYNC_SCALED) 919 ++nmp->nm_maxasync_scaled; 920 if (rep->r_flags & R_SENT) { 921 rep->r_flags &= ~R_SENT; 922 } 923 /* 924 * Update rtt using a gain of 0.125 on the mean 925 * and a gain of 0.25 on the deviation. 926 * 927 * NOTE SRTT/SDRTT are only good if R_TIMING is set. 928 */ 929 if ((rep->r_flags & R_TIMING) && rep->r_rexmit == 0) { 930 /* 931 * Since the timer resolution of 932 * NFS_HZ is so course, it can often 933 * result in r_rtt == 0. Since 934 * r_rtt == N means that the actual 935 * rtt is between N+dt and N+2-dt ticks, 936 * add 1. 937 */ 938 int n; 939 int d; 940 941 #define NFSRSB NFS_RTT_SCALE_BITS 942 n = ((NFS_SRTT(rep) * 7) + 943 (rep->r_rtt << NFSRSB)) >> 3; 944 d = n - NFS_SRTT(rep); 945 NFS_SRTT(rep) = n; 946 947 /* 948 * Don't let the jitter calculation decay 949 * too quickly, but we want a fast rampup. 950 */ 951 if (d < 0) 952 d = -d; 953 d <<= NFSRSB; 954 if (d < NFS_SDRTT(rep)) 955 n = ((NFS_SDRTT(rep) * 15) + d) >> 4; 956 else 957 n = ((NFS_SDRTT(rep) * 3) + d) >> 2; 958 NFS_SDRTT(rep) = n; 959 #undef NFSRSB 960 } 961 nmp->nm_timeouts = 0; 962 rep->r_mrep = info.mrep; 963 nfs_hardterm(rep, 0); 964 } else { 965 /* 966 * Extract vers, prog, nfsver, procnum. A duplicate 967 * response means we didn't wait long enough so 968 * we increase the SRTT to avoid future spurious 969 * timeouts. 970 */ 971 u_int procnum = nmp->nm_lastreprocnum; 972 int n; 973 974 if (procnum < NFS_NPROCS && proct[procnum]) { 975 if (nfs_showrexmit) 976 kprintf("D"); 977 n = nmp->nm_srtt[proct[procnum]]; 978 n += NFS_ASYSCALE * NFS_HZ; 979 if (n < NFS_ASYSCALE * NFS_HZ * 10) 980 n = NFS_ASYSCALE * NFS_HZ * 10; 981 nmp->nm_srtt[proct[procnum]] = n; 982 } 983 } 984 nfs_rcvunlock(nmp); 985 crit_exit(); 986 987 /* 988 * If not matched to a request, drop it. 989 * If it's mine, get out. 990 */ 991 if (rep == NULL) { 992 nfsstats.rpcunexpected++; 993 m_freem(info.mrep); 994 info.mrep = NULL; 995 } else if (rep == myrep) { 996 if (rep->r_mrep == NULL) 997 panic("nfsreply nil"); 998 return (0); 999 } 1000 } 1001 } 1002 1003 /* 1004 * Run the request state machine until the target state is reached 1005 * or a fatal error occurs. The target state is not run. Specifying 1006 * a target of NFSM_STATE_DONE runs the state machine until the rpc 1007 * is complete. 1008 * 1009 * EINPROGRESS is returned for all states other then the DONE state, 1010 * indicating that the rpc is still in progress. 1011 */ 1012 int 1013 nfs_request(struct nfsm_info *info, nfsm_state_t bstate, nfsm_state_t estate) 1014 { 1015 struct nfsreq *req; 1016 1017 while (info->state >= bstate && info->state < estate) { 1018 switch(info->state) { 1019 case NFSM_STATE_SETUP: 1020 /* 1021 * Setup the nfsreq. Any error which occurs during 1022 * this state is fatal. 1023 */ 1024 info->error = nfs_request_setup(info); 1025 if (info->error) { 1026 info->state = NFSM_STATE_DONE; 1027 return (info->error); 1028 } else { 1029 req = info->req; 1030 req->r_mrp = &info->mrep; 1031 req->r_mdp = &info->md; 1032 req->r_dposp = &info->dpos; 1033 info->state = NFSM_STATE_AUTH; 1034 } 1035 break; 1036 case NFSM_STATE_AUTH: 1037 /* 1038 * Authenticate the nfsreq. Any error which occurs 1039 * during this state is fatal. 1040 */ 1041 info->error = nfs_request_auth(info->req); 1042 if (info->error) { 1043 info->state = NFSM_STATE_DONE; 1044 return (info->error); 1045 } else { 1046 info->state = NFSM_STATE_TRY; 1047 } 1048 break; 1049 case NFSM_STATE_TRY: 1050 /* 1051 * Transmit or retransmit attempt. An error in this 1052 * state is ignored and we always move on to the 1053 * next state. 1054 * 1055 * This can trivially race the receiver if the 1056 * request is asynchronous. nfs_request_try() 1057 * will thus set the state for us and we 1058 * must also return immediately if we are 1059 * running an async state machine, because 1060 * info can become invalid due to races after 1061 * try() returns. 1062 */ 1063 if (info->req->r_flags & R_ASYNC) { 1064 nfs_request_try(info->req); 1065 if (estate == NFSM_STATE_WAITREPLY) 1066 return (EINPROGRESS); 1067 } else { 1068 nfs_request_try(info->req); 1069 info->state = NFSM_STATE_WAITREPLY; 1070 } 1071 break; 1072 case NFSM_STATE_WAITREPLY: 1073 /* 1074 * Wait for a reply or timeout and move on to the 1075 * next state. The error returned by this state 1076 * is passed to the processing code in the next 1077 * state. 1078 */ 1079 info->error = nfs_request_waitreply(info->req); 1080 info->state = NFSM_STATE_PROCESSREPLY; 1081 break; 1082 case NFSM_STATE_PROCESSREPLY: 1083 /* 1084 * Process the reply or timeout. Errors which occur 1085 * in this state may cause the state machine to 1086 * go back to an earlier state, and are fatal 1087 * otherwise. 1088 */ 1089 info->error = nfs_request_processreply(info, 1090 info->error); 1091 switch(info->error) { 1092 case ENEEDAUTH: 1093 info->state = NFSM_STATE_AUTH; 1094 break; 1095 case EAGAIN: 1096 info->state = NFSM_STATE_TRY; 1097 break; 1098 default: 1099 /* 1100 * Operation complete, with or without an 1101 * error. We are done. 1102 */ 1103 info->req = NULL; 1104 info->state = NFSM_STATE_DONE; 1105 return (info->error); 1106 } 1107 break; 1108 case NFSM_STATE_DONE: 1109 /* 1110 * Shouldn't be reached 1111 */ 1112 return (info->error); 1113 /* NOT REACHED */ 1114 } 1115 } 1116 1117 /* 1118 * If we are done return the error code (if any). 1119 * Otherwise return EINPROGRESS. 1120 */ 1121 if (info->state == NFSM_STATE_DONE) 1122 return (info->error); 1123 return (EINPROGRESS); 1124 } 1125 1126 /* 1127 * nfs_request - goes something like this 1128 * - fill in request struct 1129 * - links it into list 1130 * - calls nfs_send() for first transmit 1131 * - calls nfs_receive() to get reply 1132 * - break down rpc header and return with nfs reply pointed to 1133 * by mrep or error 1134 * nb: always frees up mreq mbuf list 1135 */ 1136 static int 1137 nfs_request_setup(nfsm_info_t info) 1138 { 1139 struct nfsreq *req; 1140 struct nfsmount *nmp; 1141 struct mbuf *m; 1142 int i; 1143 1144 /* 1145 * Reject requests while attempting a forced unmount. 1146 */ 1147 if (info->vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) { 1148 m_freem(info->mreq); 1149 info->mreq = NULL; 1150 return (EIO); 1151 } 1152 nmp = VFSTONFS(info->vp->v_mount); 1153 req = kmalloc(sizeof(struct nfsreq), M_NFSREQ, M_WAITOK); 1154 req->r_nmp = nmp; 1155 req->r_vp = info->vp; 1156 req->r_td = info->td; 1157 req->r_procnum = info->procnum; 1158 req->r_mreq = NULL; 1159 req->r_cred = info->cred; 1160 1161 i = 0; 1162 m = info->mreq; 1163 while (m) { 1164 i += m->m_len; 1165 m = m->m_next; 1166 } 1167 req->r_mrest = info->mreq; 1168 req->r_mrest_len = i; 1169 1170 /* 1171 * The presence of a non-NULL r_info in req indicates 1172 * async completion via our helper threads. See the receiver 1173 * code. 1174 */ 1175 if (info->bio) { 1176 req->r_info = info; 1177 req->r_flags = R_ASYNC; 1178 } else { 1179 req->r_info = NULL; 1180 req->r_flags = 0; 1181 } 1182 info->req = req; 1183 return(0); 1184 } 1185 1186 static int 1187 nfs_request_auth(struct nfsreq *rep) 1188 { 1189 struct nfsmount *nmp = rep->r_nmp; 1190 struct mbuf *m; 1191 char nickv[RPCX_NICKVERF]; 1192 int error = 0, auth_len, auth_type; 1193 int verf_len; 1194 u_int32_t xid; 1195 char *auth_str, *verf_str; 1196 struct ucred *cred; 1197 1198 cred = rep->r_cred; 1199 rep->r_failed_auth = 0; 1200 1201 /* 1202 * Get the RPC header with authorization. 1203 */ 1204 verf_str = auth_str = NULL; 1205 if (nmp->nm_flag & NFSMNT_KERB) { 1206 verf_str = nickv; 1207 verf_len = sizeof (nickv); 1208 auth_type = RPCAUTH_KERB4; 1209 bzero((caddr_t)rep->r_key, sizeof(rep->r_key)); 1210 if (rep->r_failed_auth || 1211 nfs_getnickauth(nmp, cred, &auth_str, &auth_len, 1212 verf_str, verf_len)) { 1213 error = nfs_getauth(nmp, rep, cred, &auth_str, 1214 &auth_len, verf_str, &verf_len, rep->r_key); 1215 if (error) { 1216 m_freem(rep->r_mrest); 1217 rep->r_mrest = NULL; 1218 kfree((caddr_t)rep, M_NFSREQ); 1219 return (error); 1220 } 1221 } 1222 } else { 1223 auth_type = RPCAUTH_UNIX; 1224 if (cred->cr_ngroups < 1) 1225 panic("nfsreq nogrps"); 1226 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ? 1227 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) + 1228 5 * NFSX_UNSIGNED; 1229 } 1230 if (rep->r_mrest) 1231 nfs_checkpkt(rep->r_mrest, rep->r_mrest_len); 1232 m = nfsm_rpchead(cred, nmp->nm_flag, rep->r_procnum, auth_type, 1233 auth_len, auth_str, verf_len, verf_str, 1234 rep->r_mrest, rep->r_mrest_len, &rep->r_mheadend, &xid); 1235 rep->r_mrest = NULL; 1236 if (auth_str) 1237 kfree(auth_str, M_TEMP); 1238 1239 /* 1240 * For stream protocols, insert a Sun RPC Record Mark. 1241 */ 1242 if (nmp->nm_sotype == SOCK_STREAM) { 1243 M_PREPEND(m, NFSX_UNSIGNED, M_WAITOK); 1244 if (m == NULL) { 1245 kfree(rep, M_NFSREQ); 1246 return (ENOBUFS); 1247 } 1248 *mtod(m, u_int32_t *) = htonl(0x80000000 | 1249 (m->m_pkthdr.len - NFSX_UNSIGNED)); 1250 } 1251 1252 nfs_checkpkt(m, m->m_pkthdr.len); 1253 1254 rep->r_mreq = m; 1255 rep->r_xid = xid; 1256 return (0); 1257 } 1258 1259 static int 1260 nfs_request_try(struct nfsreq *rep) 1261 { 1262 struct nfsmount *nmp = rep->r_nmp; 1263 struct mbuf *m2; 1264 int error; 1265 1266 /* 1267 * Request is not on any queue, only the owner has access to it 1268 * so it should not be locked by anyone atm. 1269 * 1270 * Interlock to prevent races. While locked the only remote 1271 * action possible is for r_mrep to be set (once we enqueue it). 1272 */ 1273 if (rep->r_flags == 0xdeadc0de) { 1274 print_backtrace(-1); 1275 panic("flags nbad"); 1276 } 1277 KKASSERT((rep->r_flags & (R_LOCKED | R_ONREQQ)) == 0); 1278 if (nmp->nm_flag & NFSMNT_SOFT) 1279 rep->r_retry = nmp->nm_retry; 1280 else 1281 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ 1282 rep->r_rtt = rep->r_rexmit = 0; 1283 if (proct[rep->r_procnum] > 0) 1284 rep->r_flags |= R_TIMING | R_LOCKED; 1285 else 1286 rep->r_flags |= R_LOCKED; 1287 rep->r_mrep = NULL; 1288 1289 nfsstats.rpcrequests++; 1290 1291 if (nmp->nm_flag & NFSMNT_FORCE) { 1292 rep->r_flags |= R_SOFTTERM; 1293 rep->r_flags &= ~R_LOCKED; 1294 if (rep->r_info) 1295 rep->r_info->error = EINTR; 1296 return (0); 1297 } 1298 rep->r_flags |= R_NEEDSXMIT; /* in case send lock races us */ 1299 1300 /* 1301 * Do the client side RPC. 1302 * 1303 * Chain request into list of outstanding requests. Be sure 1304 * to put it LAST so timer finds oldest requests first. Note 1305 * that our control of R_LOCKED prevents the request from 1306 * getting ripped out from under us or transmitted by the 1307 * timer code. 1308 * 1309 * For requests with info structures we must atomically set the 1310 * info's state because the structure could become invalid upon 1311 * return due to races (i.e., if async) 1312 */ 1313 crit_enter(); 1314 mtx_link_init(&rep->r_link); 1315 KKASSERT((rep->r_flags & R_ONREQQ) == 0); 1316 TAILQ_INSERT_TAIL(&nmp->nm_reqq, rep, r_chain); 1317 rep->r_flags |= R_ONREQQ; 1318 ++nmp->nm_reqqlen; 1319 if (rep->r_flags & R_ASYNC) 1320 rep->r_info->state = NFSM_STATE_WAITREPLY; 1321 crit_exit(); 1322 1323 error = 0; 1324 1325 /* 1326 * Send if we can. Congestion control is not handled here any more 1327 * becausing trying to defer the initial send based on the nfs_timer 1328 * requires having a very fast nfs_timer, which is silly. 1329 */ 1330 if (nmp->nm_so) { 1331 if (nmp->nm_soflags & PR_CONNREQUIRED) 1332 error = nfs_sndlock(nmp, rep); 1333 if (error == 0 && (rep->r_flags & R_NEEDSXMIT)) { 1334 m2 = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAITOK); 1335 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep); 1336 rep->r_flags &= ~R_NEEDSXMIT; 1337 if ((rep->r_flags & R_SENT) == 0) { 1338 rep->r_flags |= R_SENT; 1339 } 1340 if (nmp->nm_soflags & PR_CONNREQUIRED) 1341 nfs_sndunlock(nmp); 1342 } 1343 } else { 1344 rep->r_rtt = -1; 1345 } 1346 if (error == EPIPE) 1347 error = 0; 1348 1349 /* 1350 * Release the lock. The only remote action that may have occurred 1351 * would have been the setting of rep->r_mrep. If this occured 1352 * and the request was async we have to move it to the reader 1353 * thread's queue for action. 1354 * 1355 * For async requests also make sure the reader is woken up so 1356 * it gets on the socket to read responses. 1357 */ 1358 crit_enter(); 1359 if (rep->r_flags & R_ASYNC) { 1360 if (rep->r_mrep) 1361 nfs_hardterm(rep, 1); 1362 rep->r_flags &= ~R_LOCKED; 1363 nfssvc_iod_reader_wakeup(nmp); 1364 } else { 1365 rep->r_flags &= ~R_LOCKED; 1366 } 1367 if (rep->r_flags & R_WANTED) { 1368 rep->r_flags &= ~R_WANTED; 1369 wakeup(rep); 1370 } 1371 crit_exit(); 1372 return (error); 1373 } 1374 1375 /* 1376 * This code is only called for synchronous requests. Completed synchronous 1377 * requests are left on reqq and we remove them before moving on to the 1378 * processing state. 1379 */ 1380 static int 1381 nfs_request_waitreply(struct nfsreq *rep) 1382 { 1383 struct nfsmount *nmp = rep->r_nmp; 1384 int error; 1385 1386 KKASSERT((rep->r_flags & R_ASYNC) == 0); 1387 1388 /* 1389 * Wait until the request is finished. 1390 */ 1391 error = nfs_reply(nmp, rep); 1392 1393 /* 1394 * RPC done, unlink the request, but don't rip it out from under 1395 * the callout timer. 1396 * 1397 * Once unlinked no other receiver or the timer will have 1398 * visibility, so we do not have to set R_LOCKED. 1399 */ 1400 crit_enter(); 1401 while (rep->r_flags & R_LOCKED) { 1402 rep->r_flags |= R_WANTED; 1403 tsleep(rep, 0, "nfstrac", 0); 1404 } 1405 KKASSERT(rep->r_flags & R_ONREQQ); 1406 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain); 1407 rep->r_flags &= ~R_ONREQQ; 1408 --nmp->nm_reqqlen; 1409 if (TAILQ_FIRST(&nmp->nm_bioq) && 1410 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) { 1411 nfssvc_iod_writer_wakeup(nmp); 1412 } 1413 crit_exit(); 1414 1415 /* 1416 * Decrement the outstanding request count. 1417 */ 1418 if (rep->r_flags & R_SENT) { 1419 rep->r_flags &= ~R_SENT; 1420 } 1421 return (error); 1422 } 1423 1424 /* 1425 * Process reply with error returned from nfs_requet_waitreply(). 1426 * 1427 * Returns EAGAIN if it wants us to loop up to nfs_request_try() again. 1428 * Returns ENEEDAUTH if it wants us to loop up to nfs_request_auth() again. 1429 */ 1430 static int 1431 nfs_request_processreply(nfsm_info_t info, int error) 1432 { 1433 struct nfsreq *req = info->req; 1434 struct nfsmount *nmp = req->r_nmp; 1435 u_int32_t *tl; 1436 int verf_type; 1437 int i; 1438 1439 /* 1440 * If there was a successful reply and a tprintf msg. 1441 * tprintf a response. 1442 */ 1443 if (error == 0 && (req->r_flags & R_TPRINTFMSG)) { 1444 nfs_msg(req->r_td, 1445 nmp->nm_mountp->mnt_stat.f_mntfromname, 1446 "is alive again"); 1447 } 1448 1449 /* 1450 * Assign response and handle any pre-process error. Response 1451 * fields can be NULL if an error is already pending. 1452 */ 1453 info->mrep = req->r_mrep; 1454 info->md = req->r_md; 1455 info->dpos = req->r_dpos; 1456 1457 if (error) { 1458 m_freem(req->r_mreq); 1459 req->r_mreq = NULL; 1460 kfree(req, M_NFSREQ); 1461 info->req = NULL; 1462 return (error); 1463 } 1464 1465 /* 1466 * break down the rpc header and check if ok 1467 */ 1468 NULLOUT(tl = nfsm_dissect(info, 3 * NFSX_UNSIGNED)); 1469 if (*tl++ == rpc_msgdenied) { 1470 if (*tl == rpc_mismatch) { 1471 error = EOPNOTSUPP; 1472 } else if ((nmp->nm_flag & NFSMNT_KERB) && 1473 *tl++ == rpc_autherr) { 1474 if (req->r_failed_auth == 0) { 1475 req->r_failed_auth++; 1476 req->r_mheadend->m_next = NULL; 1477 m_freem(info->mrep); 1478 info->mrep = NULL; 1479 m_freem(req->r_mreq); 1480 req->r_mreq = NULL; 1481 return (ENEEDAUTH); 1482 } else { 1483 error = EAUTH; 1484 } 1485 } else { 1486 error = EACCES; 1487 } 1488 m_freem(info->mrep); 1489 info->mrep = NULL; 1490 m_freem(req->r_mreq); 1491 req->r_mreq = NULL; 1492 kfree(req, M_NFSREQ); 1493 info->req = NULL; 1494 return (error); 1495 } 1496 1497 /* 1498 * Grab any Kerberos verifier, otherwise just throw it away. 1499 */ 1500 verf_type = fxdr_unsigned(int, *tl++); 1501 i = fxdr_unsigned(int32_t, *tl); 1502 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) { 1503 error = nfs_savenickauth(nmp, req->r_cred, i, req->r_key, 1504 &info->md, &info->dpos, info->mrep); 1505 if (error) 1506 goto nfsmout; 1507 } else if (i > 0) { 1508 ERROROUT(nfsm_adv(info, nfsm_rndup(i))); 1509 } 1510 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED)); 1511 /* 0 == ok */ 1512 if (*tl == 0) { 1513 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED)); 1514 if (*tl != 0) { 1515 error = fxdr_unsigned(int, *tl); 1516 1517 /* 1518 * Does anyone even implement this? Just impose 1519 * a 1-second delay. 1520 */ 1521 if ((nmp->nm_flag & NFSMNT_NFSV3) && 1522 error == NFSERR_TRYLATER) { 1523 m_freem(info->mrep); 1524 info->mrep = NULL; 1525 error = 0; 1526 1527 tsleep((caddr_t)&lbolt, 0, "nqnfstry", 0); 1528 return (EAGAIN); /* goto tryagain */ 1529 } 1530 1531 #if 0 1532 /* 1533 * XXX We can't do this here any more because the 1534 * caller may be holding a shared lock on the 1535 * namecache entry. 1536 * 1537 * If the File Handle was stale, invalidate the 1538 * lookup cache, just in case. 1539 * 1540 * To avoid namecache<->vnode deadlocks we must 1541 * release the vnode lock if we hold it. 1542 */ 1543 if (error == ESTALE) { 1544 struct vnode *vp = req->r_vp; 1545 int ltype; 1546 1547 ltype = lockstatus(&vp->v_lock, curthread); 1548 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED) 1549 lockmgr(&vp->v_lock, LK_RELEASE); 1550 cache_inval_vp(vp, CINV_CHILDREN); 1551 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED) 1552 lockmgr(&vp->v_lock, ltype); 1553 } 1554 #endif 1555 if (nmp->nm_flag & NFSMNT_NFSV3) { 1556 KKASSERT(*req->r_mrp == info->mrep); 1557 KKASSERT(*req->r_mdp == info->md); 1558 KKASSERT(*req->r_dposp == info->dpos); 1559 error |= NFSERR_RETERR; 1560 } else { 1561 m_freem(info->mrep); 1562 info->mrep = NULL; 1563 } 1564 m_freem(req->r_mreq); 1565 req->r_mreq = NULL; 1566 kfree(req, M_NFSREQ); 1567 info->req = NULL; 1568 return (error); 1569 } 1570 1571 KKASSERT(*req->r_mrp == info->mrep); 1572 KKASSERT(*req->r_mdp == info->md); 1573 KKASSERT(*req->r_dposp == info->dpos); 1574 m_freem(req->r_mreq); 1575 req->r_mreq = NULL; 1576 kfree(req, M_NFSREQ); 1577 return (0); 1578 } 1579 m_freem(info->mrep); 1580 info->mrep = NULL; 1581 error = EPROTONOSUPPORT; 1582 nfsmout: 1583 m_freem(req->r_mreq); 1584 req->r_mreq = NULL; 1585 kfree(req, M_NFSREQ); 1586 info->req = NULL; 1587 return (error); 1588 } 1589 1590 #ifndef NFS_NOSERVER 1591 /* 1592 * Generate the rpc reply header 1593 * siz arg. is used to decide if adding a cluster is worthwhile 1594 */ 1595 int 1596 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp, 1597 int err, struct mbuf **mrq, struct mbuf **mbp, caddr_t *bposp) 1598 { 1599 u_int32_t *tl; 1600 struct nfsm_info info; 1601 1602 siz += RPC_REPLYSIZ; 1603 info.mb = m_getl(max_hdr + siz, M_WAITOK, MT_DATA, M_PKTHDR, NULL); 1604 info.mreq = info.mb; 1605 info.mreq->m_pkthdr.len = 0; 1606 /* 1607 * If this is not a cluster, try and leave leading space 1608 * for the lower level headers. 1609 */ 1610 if ((max_hdr + siz) < MINCLSIZE) 1611 info.mreq->m_data += max_hdr; 1612 tl = mtod(info.mreq, u_int32_t *); 1613 info.mreq->m_len = 6 * NFSX_UNSIGNED; 1614 info.bpos = ((caddr_t)tl) + info.mreq->m_len; 1615 *tl++ = txdr_unsigned(nd->nd_retxid); 1616 *tl++ = rpc_reply; 1617 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) { 1618 *tl++ = rpc_msgdenied; 1619 if (err & NFSERR_AUTHERR) { 1620 *tl++ = rpc_autherr; 1621 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR); 1622 info.mreq->m_len -= NFSX_UNSIGNED; 1623 info.bpos -= NFSX_UNSIGNED; 1624 } else { 1625 *tl++ = rpc_mismatch; 1626 *tl++ = txdr_unsigned(RPC_VER2); 1627 *tl = txdr_unsigned(RPC_VER2); 1628 } 1629 } else { 1630 *tl++ = rpc_msgaccepted; 1631 1632 /* 1633 * For Kerberos authentication, we must send the nickname 1634 * verifier back, otherwise just RPCAUTH_NULL. 1635 */ 1636 if (nd->nd_flag & ND_KERBFULL) { 1637 struct nfsuid *nuidp; 1638 struct timeval ktvout; 1639 1640 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first; 1641 nuidp != NULL; nuidp = nuidp->nu_hash.le_next) { 1642 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid && 1643 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp), 1644 &nuidp->nu_haddr, nd->nd_nam2))) 1645 break; 1646 } 1647 if (nuidp) { 1648 /* 1649 * Encrypt the timestamp in ecb mode using the 1650 * session key. 1651 */ 1652 #ifdef NFSKERB 1653 XXX 1654 #else 1655 ktvout.tv_sec = 0; 1656 ktvout.tv_usec = 0; 1657 #endif 1658 1659 *tl++ = rpc_auth_kerb; 1660 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED); 1661 *tl = ktvout.tv_sec; 1662 tl = nfsm_build(&info, 3 * NFSX_UNSIGNED); 1663 *tl++ = ktvout.tv_usec; 1664 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid); 1665 } else { 1666 *tl++ = 0; 1667 *tl++ = 0; 1668 } 1669 } else { 1670 *tl++ = 0; 1671 *tl++ = 0; 1672 } 1673 switch (err) { 1674 case EPROGUNAVAIL: 1675 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 1676 break; 1677 case EPROGMISMATCH: 1678 *tl = txdr_unsigned(RPC_PROGMISMATCH); 1679 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED); 1680 *tl++ = txdr_unsigned(2); 1681 *tl = txdr_unsigned(3); 1682 break; 1683 case EPROCUNAVAIL: 1684 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 1685 break; 1686 case EBADRPC: 1687 *tl = txdr_unsigned(RPC_GARBAGE); 1688 break; 1689 default: 1690 *tl = 0; 1691 if (err != NFSERR_RETVOID) { 1692 tl = nfsm_build(&info, NFSX_UNSIGNED); 1693 if (err) 1694 *tl = txdr_unsigned(nfsrv_errmap(nd, err)); 1695 else 1696 *tl = 0; 1697 } 1698 break; 1699 } 1700 } 1701 1702 if (mrq != NULL) 1703 *mrq = info.mreq; 1704 *mbp = info.mb; 1705 *bposp = info.bpos; 1706 if (err != 0 && err != NFSERR_RETVOID) 1707 nfsstats.srvrpc_errs++; 1708 return (0); 1709 } 1710 1711 1712 #endif /* NFS_NOSERVER */ 1713 1714 /* 1715 * Nfs timer routine. 1716 * 1717 * Scan the nfsreq list and retranmit any requests that have timed out 1718 * To avoid retransmission attempts on STREAM sockets (in the future) make 1719 * sure to set the r_retry field to 0 (implies nm_retry == 0). 1720 * 1721 * Requests with attached responses, terminated requests, and 1722 * locked requests are ignored. Locked requests will be picked up 1723 * in a later timer call. 1724 */ 1725 void 1726 nfs_timer_callout(void *arg /* never used */) 1727 { 1728 struct nfsmount *nmp; 1729 struct nfsreq *req; 1730 #ifndef NFS_NOSERVER 1731 struct nfssvc_sock *slp; 1732 u_quad_t cur_usec; 1733 #endif /* NFS_NOSERVER */ 1734 1735 lwkt_gettoken(&nfs_token); 1736 TAILQ_FOREACH(nmp, &nfs_mountq, nm_entry) { 1737 lwkt_gettoken(&nmp->nm_token); 1738 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1739 KKASSERT(nmp == req->r_nmp); 1740 if (req->r_mrep) 1741 continue; 1742 if (req->r_flags & (R_SOFTTERM | R_LOCKED)) 1743 continue; 1744 1745 /* 1746 * Handle timeout/retry. Be sure to process r_mrep 1747 * for async requests that completed while we had 1748 * the request locked or they will hang in the reqq 1749 * forever. 1750 */ 1751 req->r_flags |= R_LOCKED; 1752 if (nfs_sigintr(nmp, req, req->r_td)) { 1753 nfs_softterm(req, 1); 1754 req->r_flags &= ~R_LOCKED; 1755 } else { 1756 nfs_timer_req(req); 1757 if (req->r_flags & R_ASYNC) { 1758 if (req->r_mrep) 1759 nfs_hardterm(req, 1); 1760 req->r_flags &= ~R_LOCKED; 1761 nfssvc_iod_reader_wakeup(nmp); 1762 } else { 1763 req->r_flags &= ~R_LOCKED; 1764 } 1765 } 1766 if (req->r_flags & R_WANTED) { 1767 req->r_flags &= ~R_WANTED; 1768 wakeup(req); 1769 } 1770 } 1771 lwkt_reltoken(&nmp->nm_token); 1772 } 1773 #ifndef NFS_NOSERVER 1774 1775 /* 1776 * Scan the write gathering queues for writes that need to be 1777 * completed now. 1778 */ 1779 cur_usec = nfs_curusec(); 1780 1781 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) { 1782 /* XXX race against removal */ 1783 if (lwkt_trytoken(&slp->ns_token)) { 1784 if (slp->ns_tq.lh_first && 1785 (slp->ns_tq.lh_first->nd_time <= cur_usec)) { 1786 nfsrv_wakenfsd(slp, 1); 1787 } 1788 lwkt_reltoken(&slp->ns_token); 1789 } 1790 } 1791 #endif /* NFS_NOSERVER */ 1792 1793 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer_callout, NULL); 1794 lwkt_reltoken(&nfs_token); 1795 } 1796 1797 static 1798 void 1799 nfs_timer_req(struct nfsreq *req) 1800 { 1801 struct thread *td = &thread0; /* XXX for creds, will break if sleep */ 1802 struct nfsmount *nmp = req->r_nmp; 1803 struct mbuf *m; 1804 struct socket *so; 1805 int timeo; 1806 int error; 1807 1808 /* 1809 * rtt ticks and timeout calculation. Return if the timeout 1810 * has not been reached yet, unless the packet is flagged 1811 * for an immediate send. 1812 * 1813 * The mean rtt doesn't help when we get random I/Os, we have 1814 * to multiply by fairly large numbers. 1815 */ 1816 if (req->r_rtt >= 0) { 1817 /* 1818 * Calculate the timeout to test against. 1819 */ 1820 req->r_rtt++; 1821 if (nmp->nm_flag & NFSMNT_DUMBTIMR) { 1822 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS; 1823 } else if (req->r_flags & R_TIMING) { 1824 timeo = NFS_SRTT(req) + NFS_SDRTT(req); 1825 } else { 1826 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS; 1827 } 1828 timeo *= multt[req->r_procnum]; 1829 /* timeo is still scaled by SCALE_BITS */ 1830 1831 #define NFSFS (NFS_RTT_SCALE * NFS_HZ) 1832 if (req->r_flags & R_TIMING) { 1833 static long last_time; 1834 if (nfs_showrtt && last_time != time_uptime) { 1835 kprintf("rpccmd %d NFS SRTT %d SDRTT %d " 1836 "timeo %d.%03d\n", 1837 proct[req->r_procnum], 1838 NFS_SRTT(req), NFS_SDRTT(req), 1839 timeo / NFSFS, 1840 timeo % NFSFS * 1000 / NFSFS); 1841 last_time = time_uptime; 1842 } 1843 } 1844 #undef NFSFS 1845 1846 /* 1847 * deal with nfs_timer jitter. 1848 */ 1849 timeo = (timeo >> NFS_RTT_SCALE_BITS) + 1; 1850 if (timeo < 2) 1851 timeo = 2; 1852 1853 if (nmp->nm_timeouts > 0) 1854 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 1855 if (timeo > NFS_MAXTIMEO) 1856 timeo = NFS_MAXTIMEO; 1857 if (req->r_rtt <= timeo) { 1858 if ((req->r_flags & R_NEEDSXMIT) == 0) 1859 return; 1860 } else if (nmp->nm_timeouts < 8) { 1861 nmp->nm_timeouts++; 1862 } 1863 } 1864 1865 /* 1866 * Check for server not responding 1867 */ 1868 if ((req->r_flags & R_TPRINTFMSG) == 0 && 1869 req->r_rexmit > nmp->nm_deadthresh) { 1870 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname, 1871 "not responding"); 1872 req->r_flags |= R_TPRINTFMSG; 1873 } 1874 if (req->r_rexmit >= req->r_retry) { /* too many */ 1875 nfsstats.rpctimeouts++; 1876 nfs_softterm(req, 1); 1877 return; 1878 } 1879 1880 /* 1881 * Generally disable retransmission on reliable sockets, 1882 * unless the request is flagged for immediate send. 1883 */ 1884 if (nmp->nm_sotype != SOCK_DGRAM) { 1885 if (++req->r_rexmit > NFS_MAXREXMIT) 1886 req->r_rexmit = NFS_MAXREXMIT; 1887 if ((req->r_flags & R_NEEDSXMIT) == 0) 1888 return; 1889 } 1890 1891 /* 1892 * Stop here if we do not have a socket! 1893 */ 1894 if ((so = nmp->nm_so) == NULL) 1895 return; 1896 1897 /* 1898 * If there is enough space and the window allows.. resend it. 1899 * 1900 * r_rtt is left intact in case we get an answer after the 1901 * retry that was a reply to the original packet. 1902 * 1903 * NOTE: so_pru_send() 1904 */ 1905 if (ssb_space(&so->so_snd) >= req->r_mreq->m_pkthdr.len && 1906 (req->r_flags & (R_SENT | R_NEEDSXMIT)) && 1907 (m = m_copym(req->r_mreq, 0, M_COPYALL, M_NOWAIT))){ 1908 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0) 1909 error = so_pru_send(so, 0, m, NULL, NULL, td); 1910 else 1911 error = so_pru_send(so, 0, m, nmp->nm_nam, NULL, td); 1912 if (error) { 1913 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) 1914 so->so_error = 0; 1915 req->r_flags |= R_NEEDSXMIT; 1916 } else if (req->r_mrep == NULL) { 1917 /* 1918 * Iff first send, start timing 1919 * else turn timing off, backoff timer 1920 * and divide congestion window by 2. 1921 * 1922 * It is possible for the so_pru_send() to 1923 * block and for us to race a reply so we 1924 * only do this if the reply field has not 1925 * been filled in. R_LOCKED will prevent 1926 * the request from being ripped out from under 1927 * us entirely. 1928 * 1929 * Record the last resent procnum to aid us 1930 * in duplicate detection on receive. 1931 */ 1932 if ((req->r_flags & R_NEEDSXMIT) == 0) { 1933 if (nfs_showrexmit) 1934 kprintf("X"); 1935 if (++req->r_rexmit > NFS_MAXREXMIT) 1936 req->r_rexmit = NFS_MAXREXMIT; 1937 nmp->nm_maxasync_scaled >>= 1; 1938 if (nmp->nm_maxasync_scaled < NFS_MINASYNC_SCALED) 1939 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED; 1940 nfsstats.rpcretries++; 1941 nmp->nm_lastreprocnum = req->r_procnum; 1942 } else { 1943 req->r_flags |= R_SENT; 1944 req->r_flags &= ~R_NEEDSXMIT; 1945 } 1946 } 1947 } 1948 } 1949 1950 /* 1951 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and 1952 * wait for all requests to complete. This is used by forced unmounts 1953 * to terminate any outstanding RPCs. 1954 * 1955 * Locked requests cannot be canceled but will be marked for 1956 * soft-termination. 1957 */ 1958 int 1959 nfs_nmcancelreqs(struct nfsmount *nmp) 1960 { 1961 struct nfsreq *req; 1962 int i; 1963 1964 crit_enter(); 1965 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1966 if (req->r_mrep != NULL || (req->r_flags & R_SOFTTERM)) 1967 continue; 1968 nfs_softterm(req, 0); 1969 } 1970 /* XXX the other two queues as well */ 1971 crit_exit(); 1972 1973 for (i = 0; i < 30; i++) { 1974 crit_enter(); 1975 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1976 if (nmp == req->r_nmp) 1977 break; 1978 } 1979 crit_exit(); 1980 if (req == NULL) 1981 return (0); 1982 tsleep(&lbolt, 0, "nfscancel", 0); 1983 } 1984 return (EBUSY); 1985 } 1986 1987 /* 1988 * Soft-terminate a request, effectively marking it as failed. 1989 * 1990 * Must be called from within a critical section. 1991 */ 1992 static void 1993 nfs_softterm(struct nfsreq *rep, int islocked) 1994 { 1995 rep->r_flags |= R_SOFTTERM; 1996 if (rep->r_info) 1997 rep->r_info->error = EINTR; 1998 nfs_hardterm(rep, islocked); 1999 } 2000 2001 /* 2002 * Hard-terminate a request, typically after getting a response. 2003 * 2004 * The state machine can still decide to re-issue it later if necessary. 2005 * 2006 * Must be called from within a critical section. 2007 */ 2008 static void 2009 nfs_hardterm(struct nfsreq *rep, int islocked) 2010 { 2011 struct nfsmount *nmp = rep->r_nmp; 2012 2013 /* 2014 * The nm_send count is decremented now to avoid deadlocks 2015 * when the process in soreceive() hasn't yet managed to send 2016 * its own request. 2017 */ 2018 if (rep->r_flags & R_SENT) { 2019 rep->r_flags &= ~R_SENT; 2020 } 2021 2022 /* 2023 * If we locked the request or nobody else has locked the request, 2024 * and the request is async, we can move it to the reader thread's 2025 * queue now and fix up the state. 2026 * 2027 * If we locked the request or nobody else has locked the request, 2028 * we can wake up anyone blocked waiting for a response on the 2029 * request. 2030 */ 2031 if (islocked || (rep->r_flags & R_LOCKED) == 0) { 2032 if ((rep->r_flags & (R_ONREQQ | R_ASYNC)) == 2033 (R_ONREQQ | R_ASYNC)) { 2034 rep->r_flags &= ~R_ONREQQ; 2035 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain); 2036 --nmp->nm_reqqlen; 2037 TAILQ_INSERT_TAIL(&nmp->nm_reqrxq, rep, r_chain); 2038 KKASSERT(rep->r_info->state == NFSM_STATE_TRY || 2039 rep->r_info->state == NFSM_STATE_WAITREPLY); 2040 2041 /* 2042 * When setting the state to PROCESSREPLY we must 2043 * roll-up any error not related to the contents of 2044 * the reply (i.e. if there is no contents). 2045 */ 2046 rep->r_info->state = NFSM_STATE_PROCESSREPLY; 2047 nfssvc_iod_reader_wakeup(nmp); 2048 if (TAILQ_FIRST(&nmp->nm_bioq) && 2049 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) { 2050 nfssvc_iod_writer_wakeup(nmp); 2051 } 2052 } 2053 mtx_abort_link(&nmp->nm_rxlock, &rep->r_link); 2054 } 2055 } 2056 2057 /* 2058 * Test for a termination condition pending on the process. 2059 * This is used for NFSMNT_INT mounts. 2060 */ 2061 int 2062 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td) 2063 { 2064 sigset_t tmpset; 2065 struct proc *p; 2066 struct lwp *lp; 2067 2068 if (rep && (rep->r_flags & R_SOFTTERM)) 2069 return (EINTR); 2070 /* Terminate all requests while attempting a forced unmount. */ 2071 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) 2072 return (EINTR); 2073 if (!(nmp->nm_flag & NFSMNT_INT)) 2074 return (0); 2075 /* td might be NULL YYY */ 2076 if (td == NULL || (p = td->td_proc) == NULL) 2077 return (0); 2078 2079 lp = td->td_lwp; 2080 tmpset = lwp_sigpend(lp); 2081 SIGSETNAND(tmpset, lp->lwp_sigmask); 2082 SIGSETNAND(tmpset, p->p_sigignore); 2083 if (SIGNOTEMPTY(tmpset) && NFSINT_SIGMASK(tmpset)) 2084 return (EINTR); 2085 2086 return (0); 2087 } 2088 2089 /* 2090 * Lock a socket against others. 2091 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply 2092 * and also to avoid race conditions between the processes with nfs requests 2093 * in progress when a reconnect is necessary. 2094 */ 2095 int 2096 nfs_sndlock(struct nfsmount *nmp, struct nfsreq *rep) 2097 { 2098 mtx_t *mtx = &nmp->nm_txlock; 2099 struct thread *td; 2100 int slptimeo; 2101 int slpflag; 2102 int error; 2103 2104 slpflag = 0; 2105 slptimeo = 0; 2106 td = rep ? rep->r_td : NULL; 2107 if (nmp->nm_flag & NFSMNT_INT) 2108 slpflag = PCATCH; 2109 2110 while ((error = mtx_lock_ex_try(mtx)) != 0) { 2111 if (nfs_sigintr(nmp, rep, td)) { 2112 error = EINTR; 2113 break; 2114 } 2115 error = mtx_lock_ex(mtx, "nfsndlck", slpflag, slptimeo); 2116 if (error == 0) 2117 break; 2118 if (slpflag == PCATCH) { 2119 slpflag = 0; 2120 slptimeo = 2 * hz; 2121 } 2122 } 2123 /* Always fail if our request has been cancelled. */ 2124 if (rep && (rep->r_flags & R_SOFTTERM)) { 2125 if (error == 0) 2126 mtx_unlock(mtx); 2127 error = EINTR; 2128 } 2129 return (error); 2130 } 2131 2132 /* 2133 * Unlock the stream socket for others. 2134 */ 2135 void 2136 nfs_sndunlock(struct nfsmount *nmp) 2137 { 2138 mtx_unlock(&nmp->nm_txlock); 2139 } 2140 2141 /* 2142 * Lock the receiver side of the socket. 2143 * 2144 * rep may be NULL. 2145 */ 2146 static int 2147 nfs_rcvlock(struct nfsmount *nmp, struct nfsreq *rep) 2148 { 2149 mtx_t *mtx = &nmp->nm_rxlock; 2150 int slpflag; 2151 int slptimeo; 2152 int error; 2153 2154 /* 2155 * Unconditionally check for completion in case another nfsiod 2156 * get the packet while the caller was blocked, before the caller 2157 * called us. Packet reception is handled by mainline code which 2158 * is protected by the BGL at the moment. 2159 * 2160 * We do not strictly need the second check just before the 2161 * tsleep(), but it's good defensive programming. 2162 */ 2163 if (rep && rep->r_mrep != NULL) 2164 return (EALREADY); 2165 2166 if (nmp->nm_flag & NFSMNT_INT) 2167 slpflag = PCATCH; 2168 else 2169 slpflag = 0; 2170 slptimeo = 0; 2171 2172 while ((error = mtx_lock_ex_try(mtx)) != 0) { 2173 if (nfs_sigintr(nmp, rep, (rep ? rep->r_td : NULL))) { 2174 error = EINTR; 2175 break; 2176 } 2177 if (rep && rep->r_mrep != NULL) { 2178 error = EALREADY; 2179 break; 2180 } 2181 2182 /* 2183 * NOTE: can return ENOLCK, but in that case rep->r_mrep 2184 * will already be set. 2185 */ 2186 if (rep) { 2187 error = mtx_lock_ex_link(mtx, &rep->r_link, 2188 "nfsrcvlk", 2189 slpflag, slptimeo); 2190 } else { 2191 error = mtx_lock_ex(mtx, "nfsrcvlk", slpflag, slptimeo); 2192 } 2193 if (error == 0) 2194 break; 2195 2196 /* 2197 * If our reply was recieved while we were sleeping, 2198 * then just return without taking the lock to avoid a 2199 * situation where a single iod could 'capture' the 2200 * recieve lock. 2201 */ 2202 if (rep && rep->r_mrep != NULL) { 2203 error = EALREADY; 2204 break; 2205 } 2206 if (slpflag == PCATCH) { 2207 slpflag = 0; 2208 slptimeo = 2 * hz; 2209 } 2210 } 2211 if (error == 0) { 2212 if (rep && rep->r_mrep != NULL) { 2213 error = EALREADY; 2214 mtx_unlock(mtx); 2215 } 2216 } 2217 return (error); 2218 } 2219 2220 /* 2221 * Unlock the stream socket for others. 2222 */ 2223 static void 2224 nfs_rcvunlock(struct nfsmount *nmp) 2225 { 2226 mtx_unlock(&nmp->nm_rxlock); 2227 } 2228 2229 /* 2230 * nfs_realign: 2231 * 2232 * Check for badly aligned mbuf data and realign by copying the unaligned 2233 * portion of the data into a new mbuf chain and freeing the portions 2234 * of the old chain that were replaced. 2235 * 2236 * We cannot simply realign the data within the existing mbuf chain 2237 * because the underlying buffers may contain other rpc commands and 2238 * we cannot afford to overwrite them. 2239 * 2240 * We would prefer to avoid this situation entirely. The situation does 2241 * not occur with NFS/UDP and is supposed to only occassionally occur 2242 * with TCP. Use vfs.nfs.realign_count and realign_test to check this. 2243 * 2244 * NOTE! M_NOWAIT cannot be used here. The mbufs must be acquired 2245 * because the rpc request OR reply cannot be thrown away. TCP NFS 2246 * mounts do not retry their RPCs unless the TCP connection itself 2247 * is dropped so throwing away a RPC will basically cause the NFS 2248 * operation to lockup indefinitely. 2249 */ 2250 static void 2251 nfs_realign(struct mbuf **pm, int hsiz) 2252 { 2253 struct mbuf *m; 2254 struct mbuf *n = NULL; 2255 2256 /* 2257 * Check for misalignemnt 2258 */ 2259 ++nfs_realign_test; 2260 while ((m = *pm) != NULL) { 2261 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) 2262 break; 2263 pm = &m->m_next; 2264 } 2265 2266 /* 2267 * If misalignment found make a completely new copy. 2268 */ 2269 if (m) { 2270 ++nfs_realign_count; 2271 n = m_dup_data(m, M_WAITOK); 2272 m_freem(*pm); 2273 *pm = n; 2274 } 2275 } 2276 2277 #ifndef NFS_NOSERVER 2278 2279 /* 2280 * Parse an RPC request 2281 * - verify it 2282 * - fill in the cred struct. 2283 */ 2284 int 2285 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header) 2286 { 2287 int len, i; 2288 u_int32_t *tl; 2289 struct uio uio; 2290 struct iovec iov; 2291 caddr_t cp; 2292 u_int32_t nfsvers, auth_type; 2293 uid_t nickuid; 2294 int error = 0, ticklen; 2295 struct nfsuid *nuidp; 2296 struct timeval tvin, tvout; 2297 struct nfsm_info info; 2298 #if 0 /* until encrypted keys are implemented */ 2299 NFSKERBKEYSCHED_T keys; /* stores key schedule */ 2300 #endif 2301 2302 info.mrep = nd->nd_mrep; 2303 info.md = nd->nd_md; 2304 info.dpos = nd->nd_dpos; 2305 2306 if (has_header) { 2307 NULLOUT(tl = nfsm_dissect(&info, 10 * NFSX_UNSIGNED)); 2308 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++); 2309 if (*tl++ != rpc_call) { 2310 m_freem(info.mrep); 2311 return (EBADRPC); 2312 } 2313 } else { 2314 NULLOUT(tl = nfsm_dissect(&info, 8 * NFSX_UNSIGNED)); 2315 } 2316 nd->nd_repstat = 0; 2317 nd->nd_flag = 0; 2318 if (*tl++ != rpc_vers) { 2319 nd->nd_repstat = ERPCMISMATCH; 2320 nd->nd_procnum = NFSPROC_NOOP; 2321 return (0); 2322 } 2323 if (*tl != nfs_prog) { 2324 nd->nd_repstat = EPROGUNAVAIL; 2325 nd->nd_procnum = NFSPROC_NOOP; 2326 return (0); 2327 } 2328 tl++; 2329 nfsvers = fxdr_unsigned(u_int32_t, *tl++); 2330 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) { 2331 nd->nd_repstat = EPROGMISMATCH; 2332 nd->nd_procnum = NFSPROC_NOOP; 2333 return (0); 2334 } 2335 if (nfsvers == NFS_VER3) 2336 nd->nd_flag = ND_NFSV3; 2337 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++); 2338 if (nd->nd_procnum == NFSPROC_NULL) 2339 return (0); 2340 if (nd->nd_procnum >= NFS_NPROCS || 2341 (nd->nd_procnum >= NQNFSPROC_GETLEASE) || 2342 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) { 2343 nd->nd_repstat = EPROCUNAVAIL; 2344 nd->nd_procnum = NFSPROC_NOOP; 2345 return (0); 2346 } 2347 if ((nd->nd_flag & ND_NFSV3) == 0) 2348 nd->nd_procnum = nfsv3_procid[nd->nd_procnum]; 2349 auth_type = *tl++; 2350 len = fxdr_unsigned(int, *tl++); 2351 if (len < 0 || len > RPCAUTH_MAXSIZ) { 2352 m_freem(info.mrep); 2353 return (EBADRPC); 2354 } 2355 2356 nd->nd_flag &= ~ND_KERBAUTH; 2357 /* 2358 * Handle auth_unix or auth_kerb. 2359 */ 2360 if (auth_type == rpc_auth_unix) { 2361 len = fxdr_unsigned(int, *++tl); 2362 if (len < 0 || len > NFS_MAXNAMLEN) { 2363 m_freem(info.mrep); 2364 return (EBADRPC); 2365 } 2366 ERROROUT(nfsm_adv(&info, nfsm_rndup(len))); 2367 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED)); 2368 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred)); 2369 nd->nd_cr.cr_ref = 1; 2370 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 2371 nd->nd_cr.cr_ruid = nd->nd_cr.cr_svuid = nd->nd_cr.cr_uid; 2372 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++); 2373 nd->nd_cr.cr_rgid = nd->nd_cr.cr_svgid = nd->nd_cr.cr_gid; 2374 len = fxdr_unsigned(int, *tl); 2375 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 2376 m_freem(info.mrep); 2377 return (EBADRPC); 2378 } 2379 NULLOUT(tl = nfsm_dissect(&info, (len + 2) * NFSX_UNSIGNED)); 2380 for (i = 1; i <= len; i++) 2381 if (i < NGROUPS) 2382 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++); 2383 else 2384 tl++; 2385 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); 2386 if (nd->nd_cr.cr_ngroups > 1) 2387 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups); 2388 len = fxdr_unsigned(int, *++tl); 2389 if (len < 0 || len > RPCAUTH_MAXSIZ) { 2390 m_freem(info.mrep); 2391 return (EBADRPC); 2392 } 2393 if (len > 0) { 2394 ERROROUT(nfsm_adv(&info, nfsm_rndup(len))); 2395 } 2396 } else if (auth_type == rpc_auth_kerb) { 2397 switch (fxdr_unsigned(int, *tl++)) { 2398 case RPCAKN_FULLNAME: 2399 ticklen = fxdr_unsigned(int, *tl); 2400 *((u_int32_t *)nfsd->nfsd_authstr) = *tl; 2401 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED; 2402 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED; 2403 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) { 2404 m_freem(info.mrep); 2405 return (EBADRPC); 2406 } 2407 uio.uio_offset = 0; 2408 uio.uio_iov = &iov; 2409 uio.uio_iovcnt = 1; 2410 uio.uio_segflg = UIO_SYSSPACE; 2411 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4]; 2412 iov.iov_len = RPCAUTH_MAXSIZ - 4; 2413 ERROROUT(nfsm_mtouio(&info, &uio, uio.uio_resid)); 2414 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED)); 2415 if (*tl++ != rpc_auth_kerb || 2416 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) { 2417 kprintf("Bad kerb verifier\n"); 2418 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2419 nd->nd_procnum = NFSPROC_NOOP; 2420 return (0); 2421 } 2422 NULLOUT(cp = nfsm_dissect(&info, 4 * NFSX_UNSIGNED)); 2423 tl = (u_int32_t *)cp; 2424 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) { 2425 kprintf("Not fullname kerb verifier\n"); 2426 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2427 nd->nd_procnum = NFSPROC_NOOP; 2428 return (0); 2429 } 2430 cp += NFSX_UNSIGNED; 2431 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED); 2432 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED; 2433 nd->nd_flag |= ND_KERBFULL; 2434 nfsd->nfsd_flag |= NFSD_NEEDAUTH; 2435 break; 2436 case RPCAKN_NICKNAME: 2437 if (len != 2 * NFSX_UNSIGNED) { 2438 kprintf("Kerb nickname short\n"); 2439 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED); 2440 nd->nd_procnum = NFSPROC_NOOP; 2441 return (0); 2442 } 2443 nickuid = fxdr_unsigned(uid_t, *tl); 2444 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED)); 2445 if (*tl++ != rpc_auth_kerb || 2446 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) { 2447 kprintf("Kerb nick verifier bad\n"); 2448 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2449 nd->nd_procnum = NFSPROC_NOOP; 2450 return (0); 2451 } 2452 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED)); 2453 tvin.tv_sec = *tl++; 2454 tvin.tv_usec = *tl; 2455 2456 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first; 2457 nuidp != NULL; nuidp = nuidp->nu_hash.le_next) { 2458 if (nuidp->nu_cr.cr_uid == nickuid && 2459 (!nd->nd_nam2 || 2460 netaddr_match(NU_NETFAM(nuidp), 2461 &nuidp->nu_haddr, nd->nd_nam2))) 2462 break; 2463 } 2464 if (!nuidp) { 2465 nd->nd_repstat = 2466 (NFSERR_AUTHERR|AUTH_REJECTCRED); 2467 nd->nd_procnum = NFSPROC_NOOP; 2468 return (0); 2469 } 2470 2471 /* 2472 * Now, decrypt the timestamp using the session key 2473 * and validate it. 2474 */ 2475 #ifdef NFSKERB 2476 XXX 2477 #else 2478 tvout.tv_sec = 0; 2479 tvout.tv_usec = 0; 2480 #endif 2481 2482 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec); 2483 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec); 2484 if (nuidp->nu_expire != time_uptime || 2485 nuidp->nu_timestamp.tv_sec > tvout.tv_sec || 2486 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec && 2487 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) { 2488 nuidp->nu_expire = 0; 2489 nd->nd_repstat = 2490 (NFSERR_AUTHERR|AUTH_REJECTVERF); 2491 nd->nd_procnum = NFSPROC_NOOP; 2492 return (0); 2493 } 2494 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr); 2495 nd->nd_flag |= ND_KERBNICK; 2496 break; 2497 } 2498 } else { 2499 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED); 2500 nd->nd_procnum = NFSPROC_NOOP; 2501 return (0); 2502 } 2503 2504 nd->nd_md = info.md; 2505 nd->nd_dpos = info.dpos; 2506 return (0); 2507 nfsmout: 2508 return (error); 2509 } 2510 2511 #endif 2512 2513 /* 2514 * Send a message to the originating process's terminal. The thread and/or 2515 * process may be NULL. YYY the thread should not be NULL but there may 2516 * still be some uio_td's that are still being passed as NULL through to 2517 * nfsm_request(). 2518 */ 2519 static int 2520 nfs_msg(struct thread *td, char *server, char *msg) 2521 { 2522 tpr_t tpr; 2523 2524 if (td && td->td_proc) 2525 tpr = tprintf_open(td->td_proc); 2526 else 2527 tpr = NULL; 2528 tprintf(tpr, "nfs server %s: %s\n", server, msg); 2529 tprintf_close(tpr); 2530 return (0); 2531 } 2532 2533 #ifndef NFS_NOSERVER 2534 2535 /* 2536 * Socket upcall routine for nfsd sockets. This runs in the protocol 2537 * thread and passes waitflag == M_NOWAIT. 2538 */ 2539 void 2540 nfsrv_rcv_upcall(struct socket *so, void *arg, int waitflag) 2541 { 2542 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 2543 2544 if (slp->ns_needq_upcall == 0) { 2545 slp->ns_needq_upcall = 1; /* ok to race */ 2546 lwkt_gettoken(&nfs_token); 2547 nfsrv_wakenfsd(slp, 1); 2548 lwkt_reltoken(&nfs_token); 2549 } 2550 #if 0 2551 lwkt_gettoken(&slp->ns_token); 2552 slp->ns_flag |= SLP_NEEDQ; 2553 nfsrv_rcv(so, arg, waitflag); 2554 lwkt_reltoken(&slp->ns_token); 2555 #endif 2556 } 2557 2558 /* 2559 * Process new data on a receive socket. Essentially do as much as we can 2560 * non-blocking, else punt and it will be called with M_WAITOK from an nfsd. 2561 * 2562 * slp->ns_token is held on call 2563 */ 2564 void 2565 nfsrv_rcv(struct socket *so, void *arg, int waitflag) 2566 { 2567 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 2568 struct mbuf *m; 2569 struct sockaddr *nam; 2570 struct sockbuf sio; 2571 int flags, error; 2572 int nparallel_wakeup = 0; 2573 2574 ASSERT_LWKT_TOKEN_HELD(&slp->ns_token); 2575 2576 if ((slp->ns_flag & SLP_VALID) == 0) 2577 return; 2578 2579 /* 2580 * Do not allow an infinite number of completed RPC records to build 2581 * up before we stop reading data from the socket. Otherwise we could 2582 * end up holding onto an unreasonable number of mbufs for requests 2583 * waiting for service. 2584 * 2585 * This should give pretty good feedback to the TCP layer and 2586 * prevents a memory crunch for other protocols. 2587 * 2588 * Note that the same service socket can be dispatched to several 2589 * nfs servers simultaniously. The tcp protocol callback calls us 2590 * with M_NOWAIT. nfsd calls us with M_WAITOK (typically). 2591 */ 2592 if (NFSRV_RECLIMIT(slp)) 2593 return; 2594 2595 /* 2596 * Handle protocol specifics to parse an RPC request. We always 2597 * pull from the socket using non-blocking I/O. 2598 */ 2599 if (so->so_type == SOCK_STREAM) { 2600 /* 2601 * The data has to be read in an orderly fashion from a TCP 2602 * stream, unlike a UDP socket. It is possible for soreceive 2603 * and/or nfsrv_getstream() to block, so make sure only one 2604 * entity is messing around with the TCP stream at any given 2605 * moment. The receive sockbuf's lock in soreceive is not 2606 * sufficient. 2607 */ 2608 if (slp->ns_flag & SLP_GETSTREAM) 2609 return; 2610 slp->ns_flag |= SLP_GETSTREAM; 2611 2612 /* 2613 * Do soreceive(). Pull out as much data as possible without 2614 * blocking. 2615 */ 2616 sbinit(&sio, 1000000000); 2617 flags = MSG_DONTWAIT; 2618 error = so_pru_soreceive(so, &nam, NULL, &sio, NULL, &flags); 2619 if (error || sio.sb_mb == NULL) { 2620 if (error != EWOULDBLOCK) 2621 slp->ns_flag |= SLP_DISCONN; 2622 slp->ns_flag &= ~(SLP_GETSTREAM | SLP_NEEDQ); 2623 goto done; 2624 } 2625 m = sio.sb_mb; 2626 if (slp->ns_rawend) { 2627 slp->ns_rawend->m_next = m; 2628 slp->ns_cc += sio.sb_cc; 2629 } else { 2630 slp->ns_raw = m; 2631 slp->ns_cc = sio.sb_cc; 2632 } 2633 while (m->m_next) 2634 m = m->m_next; 2635 slp->ns_rawend = m; 2636 2637 /* 2638 * Now try and parse as many record(s) as we can out of the 2639 * raw stream data. This will set SLP_DOREC. 2640 */ 2641 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup); 2642 if (error && error != EWOULDBLOCK) 2643 slp->ns_flag |= SLP_DISCONN; 2644 slp->ns_flag &= ~SLP_GETSTREAM; 2645 } else { 2646 /* 2647 * For UDP soreceive typically pulls just one packet, loop 2648 * to get the whole batch. 2649 */ 2650 do { 2651 sbinit(&sio, 1000000000); 2652 flags = MSG_DONTWAIT; 2653 error = so_pru_soreceive(so, &nam, NULL, &sio, 2654 NULL, &flags); 2655 if (sio.sb_mb) { 2656 struct nfsrv_rec *rec; 2657 int mf = (waitflag & M_NOWAIT) ? 2658 M_NOWAIT : M_WAITOK; 2659 rec = kmalloc(sizeof(struct nfsrv_rec), 2660 M_NFSRVDESC, mf); 2661 if (!rec) { 2662 if (nam) 2663 kfree(nam, M_SONAME); 2664 m_freem(sio.sb_mb); 2665 continue; 2666 } 2667 nfs_realign(&sio.sb_mb, 10 * NFSX_UNSIGNED); 2668 rec->nr_address = nam; 2669 rec->nr_packet = sio.sb_mb; 2670 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2671 ++slp->ns_numrec; 2672 slp->ns_flag |= SLP_DOREC; 2673 ++nparallel_wakeup; 2674 } else { 2675 slp->ns_flag &= ~SLP_NEEDQ; 2676 } 2677 if (error) { 2678 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) 2679 && error != EWOULDBLOCK) { 2680 slp->ns_flag |= SLP_DISCONN; 2681 break; 2682 } 2683 } 2684 if (NFSRV_RECLIMIT(slp)) 2685 break; 2686 } while (sio.sb_mb); 2687 } 2688 2689 /* 2690 * If we were upcalled from the tcp protocol layer and we have 2691 * fully parsed records ready to go, or there is new data pending, 2692 * or something went wrong, try to wake up a nfsd thread to deal 2693 * with it. 2694 */ 2695 done: 2696 /* XXX this code is currently not executed (nfsrv_rcv_upcall) */ 2697 if (waitflag == M_NOWAIT && (slp->ns_flag & SLP_ACTION_MASK)) { 2698 lwkt_gettoken(&nfs_token); 2699 nfsrv_wakenfsd(slp, nparallel_wakeup); 2700 lwkt_reltoken(&nfs_token); 2701 } 2702 } 2703 2704 /* 2705 * Try and extract an RPC request from the mbuf data list received on a 2706 * stream socket. The "waitflag" argument indicates whether or not it 2707 * can sleep. 2708 */ 2709 static int 2710 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp) 2711 { 2712 struct mbuf *m, **mpp; 2713 char *cp1, *cp2; 2714 int len; 2715 struct mbuf *om, *m2, *recm; 2716 u_int32_t recmark; 2717 2718 for (;;) { 2719 if (slp->ns_reclen == 0) { 2720 if (slp->ns_cc < NFSX_UNSIGNED) 2721 return (0); 2722 m = slp->ns_raw; 2723 if (m->m_len >= NFSX_UNSIGNED) { 2724 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED); 2725 m->m_data += NFSX_UNSIGNED; 2726 m->m_len -= NFSX_UNSIGNED; 2727 } else { 2728 cp1 = (caddr_t)&recmark; 2729 cp2 = mtod(m, caddr_t); 2730 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { 2731 while (m->m_len == 0) { 2732 m = m->m_next; 2733 cp2 = mtod(m, caddr_t); 2734 } 2735 *cp1++ = *cp2++; 2736 m->m_data++; 2737 m->m_len--; 2738 } 2739 } 2740 slp->ns_cc -= NFSX_UNSIGNED; 2741 recmark = ntohl(recmark); 2742 slp->ns_reclen = recmark & ~0x80000000; 2743 if (recmark & 0x80000000) 2744 slp->ns_flag |= SLP_LASTFRAG; 2745 else 2746 slp->ns_flag &= ~SLP_LASTFRAG; 2747 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) { 2748 log(LOG_ERR, "%s (%d) from nfs client\n", 2749 "impossible packet length", 2750 slp->ns_reclen); 2751 return (EPERM); 2752 } 2753 } 2754 2755 /* 2756 * Now get the record part. 2757 * 2758 * Note that slp->ns_reclen may be 0. Linux sometimes 2759 * generates 0-length RPCs 2760 */ 2761 recm = NULL; 2762 if (slp->ns_cc == slp->ns_reclen) { 2763 recm = slp->ns_raw; 2764 slp->ns_raw = slp->ns_rawend = NULL; 2765 slp->ns_cc = slp->ns_reclen = 0; 2766 } else if (slp->ns_cc > slp->ns_reclen) { 2767 len = 0; 2768 m = slp->ns_raw; 2769 om = NULL; 2770 2771 while (len < slp->ns_reclen) { 2772 if ((len + m->m_len) > slp->ns_reclen) { 2773 m2 = m_copym(m, 0, slp->ns_reclen - len, 2774 waitflag); 2775 if (m2) { 2776 if (om) { 2777 om->m_next = m2; 2778 recm = slp->ns_raw; 2779 } else 2780 recm = m2; 2781 m->m_data += slp->ns_reclen - len; 2782 m->m_len -= slp->ns_reclen - len; 2783 len = slp->ns_reclen; 2784 } else { 2785 return (EWOULDBLOCK); 2786 } 2787 } else if ((len + m->m_len) == slp->ns_reclen) { 2788 om = m; 2789 len += m->m_len; 2790 m = m->m_next; 2791 recm = slp->ns_raw; 2792 om->m_next = NULL; 2793 } else { 2794 om = m; 2795 len += m->m_len; 2796 m = m->m_next; 2797 } 2798 } 2799 slp->ns_raw = m; 2800 slp->ns_cc -= len; 2801 slp->ns_reclen = 0; 2802 } else { 2803 return (0); 2804 } 2805 2806 /* 2807 * Accumulate the fragments into a record. 2808 */ 2809 mpp = &slp->ns_frag; 2810 while (*mpp) 2811 mpp = &((*mpp)->m_next); 2812 *mpp = recm; 2813 if (slp->ns_flag & SLP_LASTFRAG) { 2814 struct nfsrv_rec *rec; 2815 int mf = (waitflag & M_NOWAIT) ? M_NOWAIT : M_WAITOK; 2816 rec = kmalloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf); 2817 if (!rec) { 2818 m_freem(slp->ns_frag); 2819 } else { 2820 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED); 2821 rec->nr_address = NULL; 2822 rec->nr_packet = slp->ns_frag; 2823 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2824 ++slp->ns_numrec; 2825 slp->ns_flag |= SLP_DOREC; 2826 ++*countp; 2827 } 2828 slp->ns_frag = NULL; 2829 } 2830 } 2831 } 2832 2833 #ifdef INVARIANTS 2834 2835 /* 2836 * Sanity check our mbuf chain. 2837 */ 2838 static void 2839 nfs_checkpkt(struct mbuf *m, int len) 2840 { 2841 int xlen = 0; 2842 while (m) { 2843 xlen += m->m_len; 2844 m = m->m_next; 2845 } 2846 if (xlen != len) { 2847 panic("nfs_checkpkt: len mismatch %d/%d mbuf %p", 2848 xlen, len, m); 2849 } 2850 } 2851 2852 #else 2853 2854 static void 2855 nfs_checkpkt(struct mbuf *m __unused, int len __unused) 2856 { 2857 } 2858 2859 #endif 2860 2861 /* 2862 * Parse an RPC header. 2863 * 2864 * If the socket is invalid or no records are pending we return ENOBUFS. 2865 * The caller must deal with NEEDQ races. 2866 */ 2867 int 2868 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd, 2869 struct nfsrv_descript **ndp) 2870 { 2871 struct nfsrv_rec *rec; 2872 struct mbuf *m; 2873 struct sockaddr *nam; 2874 struct nfsrv_descript *nd; 2875 int error; 2876 2877 *ndp = NULL; 2878 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec)) 2879 return (ENOBUFS); 2880 rec = STAILQ_FIRST(&slp->ns_rec); 2881 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link); 2882 KKASSERT(slp->ns_numrec > 0); 2883 if (--slp->ns_numrec == 0) 2884 slp->ns_flag &= ~SLP_DOREC; 2885 nam = rec->nr_address; 2886 m = rec->nr_packet; 2887 kfree(rec, M_NFSRVDESC); 2888 nd = kmalloc(sizeof(struct nfsrv_descript), M_NFSRVDESC, M_WAITOK); 2889 nd->nd_md = nd->nd_mrep = m; 2890 nd->nd_nam2 = nam; 2891 nd->nd_dpos = mtod(m, caddr_t); 2892 error = nfs_getreq(nd, nfsd, TRUE); 2893 if (error) { 2894 if (nam) { 2895 kfree(nam, M_SONAME); 2896 } 2897 kfree((caddr_t)nd, M_NFSRVDESC); 2898 return (error); 2899 } 2900 *ndp = nd; 2901 nfsd->nfsd_nd = nd; 2902 return (0); 2903 } 2904 2905 /* 2906 * Try to assign service sockets to nfsd threads based on the number 2907 * of new rpc requests that have been queued on the service socket. 2908 * 2909 * If no nfsd's are available or additonal requests are pending, set the 2910 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for 2911 * the work in the nfssvc_sock list when it is finished processing its 2912 * current work. This flag is only cleared when an nfsd can not find 2913 * any new work to perform. 2914 */ 2915 void 2916 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel) 2917 { 2918 struct nfsd *nd; 2919 2920 if ((slp->ns_flag & SLP_VALID) == 0) 2921 return; 2922 if (nparallel <= 1) 2923 nparallel = 1; 2924 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) { 2925 if (nd->nfsd_flag & NFSD_WAITING) { 2926 nd->nfsd_flag &= ~NFSD_WAITING; 2927 if (nd->nfsd_slp) 2928 panic("nfsd wakeup"); 2929 nfsrv_slpref(slp); 2930 nd->nfsd_slp = slp; 2931 wakeup((caddr_t)nd); 2932 if (--nparallel == 0) 2933 break; 2934 } 2935 } 2936 2937 /* 2938 * If we couldn't assign slp then the NFSDs are all busy and 2939 * we set a flag indicating that there is pending work. 2940 */ 2941 if (nparallel) 2942 nfsd_head_flag |= NFSD_CHECKSLP; 2943 } 2944 #endif /* NFS_NOSERVER */ 2945