1 /* 2 * Copyright (c) 1989, 1991, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95 37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $ 38 * $DragonFly: src/sys/vfs/nfs/nfs_socket.c,v 1.45 2007/05/18 17:05:13 dillon Exp $ 39 */ 40 41 /* 42 * Socket operations for use by nfs 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/proc.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/kernel.h> 51 #include <sys/mbuf.h> 52 #include <sys/vnode.h> 53 #include <sys/fcntl.h> 54 #include <sys/protosw.h> 55 #include <sys/resourcevar.h> 56 #include <sys/socket.h> 57 #include <sys/socketvar.h> 58 #include <sys/socketops.h> 59 #include <sys/syslog.h> 60 #include <sys/thread.h> 61 #include <sys/tprintf.h> 62 #include <sys/sysctl.h> 63 #include <sys/signalvar.h> 64 65 #include <sys/signal2.h> 66 #include <sys/mutex2.h> 67 #include <sys/socketvar2.h> 68 69 #include <netinet/in.h> 70 #include <netinet/tcp.h> 71 #include <sys/thread2.h> 72 73 #include "rpcv2.h" 74 #include "nfsproto.h" 75 #include "nfs.h" 76 #include "xdr_subs.h" 77 #include "nfsm_subs.h" 78 #include "nfsmount.h" 79 #include "nfsnode.h" 80 #include "nfsrtt.h" 81 82 #define TRUE 1 83 #define FALSE 0 84 85 /* 86 * RTT calculations are scaled by 256 (8 bits). A proper fractional 87 * RTT will still be calculated even with a slow NFS timer. 88 */ 89 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum]] 90 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum]] 91 #define NFS_RTT_SCALE_BITS 8 /* bits */ 92 #define NFS_RTT_SCALE 256 /* value */ 93 94 /* 95 * Defines which timer to use for the procnum. 96 * 0 - default 97 * 1 - getattr 98 * 2 - lookup 99 * 3 - read 100 * 4 - write 101 */ 102 static int proct[NFS_NPROCS] = { 103 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, /* 00-09 */ 104 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, /* 10-19 */ 105 0, 5, 0, 0, 0, 0, /* 20-29 */ 106 }; 107 108 static int multt[NFS_NPROCS] = { 109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00-09 */ 110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 10-19 */ 111 1, 2, 1, 1, 1, 1, /* 20-29 */ 112 }; 113 114 static int nfs_backoff[8] = { 2, 3, 5, 8, 13, 21, 34, 55 }; 115 static int nfs_realign_test; 116 static int nfs_realign_count; 117 static int nfs_showrtt; 118 static int nfs_showrexmit; 119 int nfs_maxasyncbio = NFS_MAXASYNCBIO; 120 121 SYSCTL_DECL(_vfs_nfs); 122 123 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, ""); 124 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, ""); 125 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrtt, CTLFLAG_RW, &nfs_showrtt, 0, ""); 126 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrexmit, CTLFLAG_RW, &nfs_showrexmit, 0, ""); 127 SYSCTL_INT(_vfs_nfs, OID_AUTO, maxasyncbio, CTLFLAG_RW, &nfs_maxasyncbio, 0, ""); 128 129 static int nfs_request_setup(nfsm_info_t info); 130 static int nfs_request_auth(struct nfsreq *rep); 131 static int nfs_request_try(struct nfsreq *rep); 132 static int nfs_request_waitreply(struct nfsreq *rep); 133 static int nfs_request_processreply(nfsm_info_t info, int); 134 135 int nfsrtton = 0; 136 struct nfsrtt nfsrtt; 137 struct callout nfs_timer_handle; 138 139 static int nfs_msg (struct thread *,char *,char *); 140 static int nfs_rcvlock (struct nfsmount *nmp, struct nfsreq *myreq); 141 static void nfs_rcvunlock (struct nfsmount *nmp); 142 static void nfs_realign (struct mbuf **pm, int hsiz); 143 static int nfs_receive (struct nfsmount *nmp, struct nfsreq *rep, 144 struct sockaddr **aname, struct mbuf **mp); 145 static void nfs_softterm (struct nfsreq *rep, int islocked); 146 static void nfs_hardterm (struct nfsreq *rep, int islocked); 147 static int nfs_reconnect (struct nfsmount *nmp, struct nfsreq *rep); 148 #ifndef NFS_NOSERVER 149 static int nfsrv_getstream (struct nfssvc_sock *, int, int *); 150 static void nfs_timer_req(struct nfsreq *req); 151 static void nfs_checkpkt(struct mbuf *m, int len); 152 153 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd, 154 struct nfssvc_sock *slp, 155 struct thread *td, 156 struct mbuf **mreqp) = { 157 nfsrv_null, 158 nfsrv_getattr, 159 nfsrv_setattr, 160 nfsrv_lookup, 161 nfsrv3_access, 162 nfsrv_readlink, 163 nfsrv_read, 164 nfsrv_write, 165 nfsrv_create, 166 nfsrv_mkdir, 167 nfsrv_symlink, 168 nfsrv_mknod, 169 nfsrv_remove, 170 nfsrv_rmdir, 171 nfsrv_rename, 172 nfsrv_link, 173 nfsrv_readdir, 174 nfsrv_readdirplus, 175 nfsrv_statfs, 176 nfsrv_fsinfo, 177 nfsrv_pathconf, 178 nfsrv_commit, 179 nfsrv_noop, 180 nfsrv_noop, 181 nfsrv_noop, 182 nfsrv_noop 183 }; 184 #endif /* NFS_NOSERVER */ 185 186 /* 187 * Initialize sockets and congestion for a new NFS connection. 188 * We do not free the sockaddr if error. 189 */ 190 int 191 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep) 192 { 193 struct socket *so; 194 int error; 195 struct sockaddr *saddr; 196 struct sockaddr_in *sin; 197 struct thread *td = &thread0; /* only used for socreate and sobind */ 198 199 nmp->nm_so = so = NULL; 200 if (nmp->nm_flag & NFSMNT_FORCE) 201 return (EINVAL); 202 saddr = nmp->nm_nam; 203 error = socreate(saddr->sa_family, &so, nmp->nm_sotype, 204 nmp->nm_soproto, td); 205 if (error) 206 goto bad; 207 nmp->nm_soflags = so->so_proto->pr_flags; 208 209 /* 210 * Some servers require that the client port be a reserved port number. 211 */ 212 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 213 struct sockopt sopt; 214 int ip; 215 struct sockaddr_in ssin; 216 217 bzero(&sopt, sizeof sopt); 218 ip = IP_PORTRANGE_LOW; 219 sopt.sopt_level = IPPROTO_IP; 220 sopt.sopt_name = IP_PORTRANGE; 221 sopt.sopt_val = (void *)&ip; 222 sopt.sopt_valsize = sizeof(ip); 223 sopt.sopt_td = NULL; 224 error = sosetopt(so, &sopt); 225 if (error) 226 goto bad; 227 bzero(&ssin, sizeof ssin); 228 sin = &ssin; 229 sin->sin_len = sizeof (struct sockaddr_in); 230 sin->sin_family = AF_INET; 231 sin->sin_addr.s_addr = INADDR_ANY; 232 sin->sin_port = htons(0); 233 error = sobind(so, (struct sockaddr *)sin, td); 234 if (error) 235 goto bad; 236 bzero(&sopt, sizeof sopt); 237 ip = IP_PORTRANGE_DEFAULT; 238 sopt.sopt_level = IPPROTO_IP; 239 sopt.sopt_name = IP_PORTRANGE; 240 sopt.sopt_val = (void *)&ip; 241 sopt.sopt_valsize = sizeof(ip); 242 sopt.sopt_td = NULL; 243 error = sosetopt(so, &sopt); 244 if (error) 245 goto bad; 246 } 247 248 /* 249 * Protocols that do not require connections may be optionally left 250 * unconnected for servers that reply from a port other than NFS_PORT. 251 */ 252 if (nmp->nm_flag & NFSMNT_NOCONN) { 253 if (nmp->nm_soflags & PR_CONNREQUIRED) { 254 error = ENOTCONN; 255 goto bad; 256 } 257 } else { 258 error = soconnect(so, nmp->nm_nam, td); 259 if (error) 260 goto bad; 261 262 /* 263 * Wait for the connection to complete. Cribbed from the 264 * connect system call but with the wait timing out so 265 * that interruptible mounts don't hang here for a long time. 266 */ 267 crit_enter(); 268 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 269 (void) tsleep((caddr_t)&so->so_timeo, 0, 270 "nfscon", 2 * hz); 271 if ((so->so_state & SS_ISCONNECTING) && 272 so->so_error == 0 && rep && 273 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){ 274 soclrstate(so, SS_ISCONNECTING); 275 crit_exit(); 276 goto bad; 277 } 278 } 279 if (so->so_error) { 280 error = so->so_error; 281 so->so_error = 0; 282 crit_exit(); 283 goto bad; 284 } 285 crit_exit(); 286 } 287 so->so_rcv.ssb_timeo = (5 * hz); 288 so->so_snd.ssb_timeo = (5 * hz); 289 290 /* 291 * Get buffer reservation size from sysctl, but impose reasonable 292 * limits. 293 */ 294 if (nmp->nm_sotype == SOCK_STREAM) { 295 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 296 struct sockopt sopt; 297 int val; 298 299 bzero(&sopt, sizeof sopt); 300 sopt.sopt_level = SOL_SOCKET; 301 sopt.sopt_name = SO_KEEPALIVE; 302 sopt.sopt_val = &val; 303 sopt.sopt_valsize = sizeof val; 304 val = 1; 305 sosetopt(so, &sopt); 306 } 307 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 308 struct sockopt sopt; 309 int val; 310 311 bzero(&sopt, sizeof sopt); 312 sopt.sopt_level = IPPROTO_TCP; 313 sopt.sopt_name = TCP_NODELAY; 314 sopt.sopt_val = &val; 315 sopt.sopt_valsize = sizeof val; 316 val = 1; 317 sosetopt(so, &sopt); 318 } 319 } 320 error = soreserve(so, nfs_soreserve, nfs_soreserve, NULL); 321 if (error) 322 goto bad; 323 atomic_set_int(&so->so_rcv.ssb_flags, SSB_NOINTR); 324 atomic_set_int(&so->so_snd.ssb_flags, SSB_NOINTR); 325 326 /* Initialize other non-zero congestion variables */ 327 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = 328 nmp->nm_srtt[3] = (NFS_TIMEO << NFS_RTT_SCALE_BITS); 329 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 330 nmp->nm_sdrtt[3] = 0; 331 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED; 332 nmp->nm_timeouts = 0; 333 334 /* 335 * Assign nm_so last. The moment nm_so is assigned the nfs_timer() 336 * can mess with the socket. 337 */ 338 nmp->nm_so = so; 339 return (0); 340 341 bad: 342 if (so) { 343 soshutdown(so, SHUT_RDWR); 344 soclose(so, FNONBLOCK); 345 } 346 return (error); 347 } 348 349 /* 350 * Reconnect routine: 351 * Called when a connection is broken on a reliable protocol. 352 * - clean up the old socket 353 * - nfs_connect() again 354 * - set R_NEEDSXMIT for all outstanding requests on mount point 355 * If this fails the mount point is DEAD! 356 * nb: Must be called with the nfs_sndlock() set on the mount point. 357 */ 358 static int 359 nfs_reconnect(struct nfsmount *nmp, struct nfsreq *rep) 360 { 361 struct nfsreq *req; 362 int error; 363 364 nfs_disconnect(nmp); 365 if (nmp->nm_rxstate >= NFSSVC_STOPPING) 366 return (EINTR); 367 while ((error = nfs_connect(nmp, rep)) != 0) { 368 if (error == EINTR || error == ERESTART) 369 return (EINTR); 370 if (error == EINVAL) 371 return (error); 372 if (nmp->nm_rxstate >= NFSSVC_STOPPING) 373 return (EINTR); 374 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0); 375 } 376 377 /* 378 * Loop through outstanding request list and fix up all requests 379 * on old socket. 380 */ 381 crit_enter(); 382 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 383 KKASSERT(req->r_nmp == nmp); 384 req->r_flags |= R_NEEDSXMIT; 385 } 386 crit_exit(); 387 return (0); 388 } 389 390 /* 391 * NFS disconnect. Clean up and unlink. 392 */ 393 void 394 nfs_disconnect(struct nfsmount *nmp) 395 { 396 struct socket *so; 397 398 if (nmp->nm_so) { 399 so = nmp->nm_so; 400 nmp->nm_so = NULL; 401 soshutdown(so, SHUT_RDWR); 402 soclose(so, FNONBLOCK); 403 } 404 } 405 406 void 407 nfs_safedisconnect(struct nfsmount *nmp) 408 { 409 nfs_rcvlock(nmp, NULL); 410 nfs_disconnect(nmp); 411 nfs_rcvunlock(nmp); 412 } 413 414 /* 415 * This is the nfs send routine. For connection based socket types, it 416 * must be called with an nfs_sndlock() on the socket. 417 * "rep == NULL" indicates that it has been called from a server. 418 * For the client side: 419 * - return EINTR if the RPC is terminated, 0 otherwise 420 * - set R_NEEDSXMIT if the send fails for any reason 421 * - do any cleanup required by recoverable socket errors (?) 422 * For the server side: 423 * - return EINTR or ERESTART if interrupted by a signal 424 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 425 * - do any cleanup required by recoverable socket errors (?) 426 */ 427 int 428 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top, 429 struct nfsreq *rep) 430 { 431 struct sockaddr *sendnam; 432 int error, soflags, flags; 433 434 if (rep) { 435 if (rep->r_flags & R_SOFTTERM) { 436 m_freem(top); 437 return (EINTR); 438 } 439 if ((so = rep->r_nmp->nm_so) == NULL) { 440 rep->r_flags |= R_NEEDSXMIT; 441 m_freem(top); 442 return (0); 443 } 444 rep->r_flags &= ~R_NEEDSXMIT; 445 soflags = rep->r_nmp->nm_soflags; 446 } else { 447 soflags = so->so_proto->pr_flags; 448 } 449 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 450 sendnam = NULL; 451 else 452 sendnam = nam; 453 if (so->so_type == SOCK_SEQPACKET) 454 flags = MSG_EOR; 455 else 456 flags = 0; 457 458 /* 459 * calls pru_sosend -> sosend -> so_pru_send -> netrpc 460 */ 461 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags, 462 curthread /*XXX*/); 463 /* 464 * ENOBUFS for dgram sockets is transient and non fatal. 465 * No need to log, and no need to break a soft mount. 466 */ 467 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) { 468 error = 0; 469 /* 470 * do backoff retransmit on client 471 */ 472 if (rep) { 473 if ((rep->r_nmp->nm_state & NFSSTA_SENDSPACE) == 0) { 474 rep->r_nmp->nm_state |= NFSSTA_SENDSPACE; 475 kprintf("Warning: NFS: Insufficient sendspace " 476 "(%lu),\n" 477 "\t You must increase vfs.nfs.soreserve" 478 "or decrease vfs.nfs.maxasyncbio\n", 479 so->so_snd.ssb_hiwat); 480 } 481 rep->r_flags |= R_NEEDSXMIT; 482 } 483 } 484 485 if (error) { 486 if (rep) { 487 log(LOG_INFO, "nfs send error %d for server %s\n",error, 488 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 489 /* 490 * Deal with errors for the client side. 491 */ 492 if (rep->r_flags & R_SOFTTERM) 493 error = EINTR; 494 else 495 rep->r_flags |= R_NEEDSXMIT; 496 } else { 497 log(LOG_INFO, "nfsd send error %d\n", error); 498 } 499 500 /* 501 * Handle any recoverable (soft) socket errors here. (?) 502 */ 503 if (error != EINTR && error != ERESTART && 504 error != EWOULDBLOCK && error != EPIPE) 505 error = 0; 506 } 507 return (error); 508 } 509 510 /* 511 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all 512 * done by soreceive(), but for SOCK_STREAM we must deal with the Record 513 * Mark and consolidate the data into a new mbuf list. 514 * nb: Sometimes TCP passes the data up to soreceive() in long lists of 515 * small mbufs. 516 * For SOCK_STREAM we must be very careful to read an entire record once 517 * we have read any of it, even if the system call has been interrupted. 518 */ 519 static int 520 nfs_receive(struct nfsmount *nmp, struct nfsreq *rep, 521 struct sockaddr **aname, struct mbuf **mp) 522 { 523 struct socket *so; 524 struct sockbuf sio; 525 struct uio auio; 526 struct iovec aio; 527 struct mbuf *m; 528 struct mbuf *control; 529 u_int32_t len; 530 struct sockaddr **getnam; 531 int error, sotype, rcvflg; 532 struct thread *td = curthread; /* XXX */ 533 534 /* 535 * Set up arguments for soreceive() 536 */ 537 *mp = NULL; 538 *aname = NULL; 539 sotype = nmp->nm_sotype; 540 541 /* 542 * For reliable protocols, lock against other senders/receivers 543 * in case a reconnect is necessary. 544 * For SOCK_STREAM, first get the Record Mark to find out how much 545 * more there is to get. 546 * We must lock the socket against other receivers 547 * until we have an entire rpc request/reply. 548 */ 549 if (sotype != SOCK_DGRAM) { 550 error = nfs_sndlock(nmp, rep); 551 if (error) 552 return (error); 553 tryagain: 554 /* 555 * Check for fatal errors and resending request. 556 */ 557 /* 558 * Ugh: If a reconnect attempt just happened, nm_so 559 * would have changed. NULL indicates a failed 560 * attempt that has essentially shut down this 561 * mount point. 562 */ 563 if (rep && (rep->r_mrep || (rep->r_flags & R_SOFTTERM))) { 564 nfs_sndunlock(nmp); 565 return (EINTR); 566 } 567 so = nmp->nm_so; 568 if (so == NULL) { 569 error = nfs_reconnect(nmp, rep); 570 if (error) { 571 nfs_sndunlock(nmp); 572 return (error); 573 } 574 goto tryagain; 575 } 576 while (rep && (rep->r_flags & R_NEEDSXMIT)) { 577 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT); 578 nfsstats.rpcretries++; 579 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep); 580 if (error) { 581 if (error == EINTR || error == ERESTART || 582 (error = nfs_reconnect(nmp, rep)) != 0) { 583 nfs_sndunlock(nmp); 584 return (error); 585 } 586 goto tryagain; 587 } 588 } 589 nfs_sndunlock(nmp); 590 if (sotype == SOCK_STREAM) { 591 /* 592 * Get the length marker from the stream 593 */ 594 aio.iov_base = (caddr_t)&len; 595 aio.iov_len = sizeof(u_int32_t); 596 auio.uio_iov = &aio; 597 auio.uio_iovcnt = 1; 598 auio.uio_segflg = UIO_SYSSPACE; 599 auio.uio_rw = UIO_READ; 600 auio.uio_offset = 0; 601 auio.uio_resid = sizeof(u_int32_t); 602 auio.uio_td = td; 603 do { 604 rcvflg = MSG_WAITALL; 605 error = so_pru_soreceive(so, NULL, &auio, NULL, 606 NULL, &rcvflg); 607 if (error == EWOULDBLOCK && rep) { 608 if (rep->r_flags & R_SOFTTERM) 609 return (EINTR); 610 } 611 } while (error == EWOULDBLOCK); 612 613 if (error == 0 && auio.uio_resid > 0) { 614 /* 615 * Only log short packets if not EOF 616 */ 617 if (auio.uio_resid != sizeof(u_int32_t)) 618 log(LOG_INFO, 619 "short receive (%d/%d) from nfs server %s\n", 620 (int)(sizeof(u_int32_t) - auio.uio_resid), 621 (int)sizeof(u_int32_t), 622 nmp->nm_mountp->mnt_stat.f_mntfromname); 623 error = EPIPE; 624 } 625 if (error) 626 goto errout; 627 len = ntohl(len) & ~0x80000000; 628 /* 629 * This is SERIOUS! We are out of sync with the sender 630 * and forcing a disconnect/reconnect is all I can do. 631 */ 632 if (len > NFS_MAXPACKET) { 633 log(LOG_ERR, "%s (%d) from nfs server %s\n", 634 "impossible packet length", 635 len, 636 nmp->nm_mountp->mnt_stat.f_mntfromname); 637 error = EFBIG; 638 goto errout; 639 } 640 641 /* 642 * Get the rest of the packet as an mbuf chain 643 */ 644 sbinit(&sio, len); 645 do { 646 rcvflg = MSG_WAITALL; 647 error = so_pru_soreceive(so, NULL, NULL, &sio, 648 NULL, &rcvflg); 649 } while (error == EWOULDBLOCK || error == EINTR || 650 error == ERESTART); 651 if (error == 0 && sio.sb_cc != len) { 652 if (sio.sb_cc != 0) 653 log(LOG_INFO, 654 "short receive (%zu/%d) from nfs server %s\n", 655 (size_t)len - auio.uio_resid, len, 656 nmp->nm_mountp->mnt_stat.f_mntfromname); 657 error = EPIPE; 658 } 659 *mp = sio.sb_mb; 660 } else { 661 /* 662 * Non-stream, so get the whole packet by not 663 * specifying MSG_WAITALL and by specifying a large 664 * length. 665 * 666 * We have no use for control msg., but must grab them 667 * and then throw them away so we know what is going 668 * on. 669 */ 670 sbinit(&sio, 100000000); 671 do { 672 rcvflg = 0; 673 error = so_pru_soreceive(so, NULL, NULL, &sio, 674 &control, &rcvflg); 675 if (control) 676 m_freem(control); 677 if (error == EWOULDBLOCK && rep) { 678 if (rep->r_flags & R_SOFTTERM) { 679 m_freem(sio.sb_mb); 680 return (EINTR); 681 } 682 } 683 } while (error == EWOULDBLOCK || 684 (error == 0 && sio.sb_mb == NULL && control)); 685 if ((rcvflg & MSG_EOR) == 0) 686 kprintf("Egad!!\n"); 687 if (error == 0 && sio.sb_mb == NULL) 688 error = EPIPE; 689 len = sio.sb_cc; 690 *mp = sio.sb_mb; 691 } 692 errout: 693 if (error && error != EINTR && error != ERESTART) { 694 m_freem(*mp); 695 *mp = NULL; 696 if (error != EPIPE) { 697 log(LOG_INFO, 698 "receive error %d from nfs server %s\n", 699 error, 700 nmp->nm_mountp->mnt_stat.f_mntfromname); 701 } 702 error = nfs_sndlock(nmp, rep); 703 if (!error) { 704 error = nfs_reconnect(nmp, rep); 705 if (!error) 706 goto tryagain; 707 else 708 nfs_sndunlock(nmp); 709 } 710 } 711 } else { 712 if ((so = nmp->nm_so) == NULL) 713 return (EACCES); 714 if (so->so_state & SS_ISCONNECTED) 715 getnam = NULL; 716 else 717 getnam = aname; 718 sbinit(&sio, 100000000); 719 do { 720 rcvflg = 0; 721 error = so_pru_soreceive(so, getnam, NULL, &sio, 722 NULL, &rcvflg); 723 if (error == EWOULDBLOCK && rep && 724 (rep->r_flags & R_SOFTTERM)) { 725 m_freem(sio.sb_mb); 726 return (EINTR); 727 } 728 } while (error == EWOULDBLOCK); 729 730 len = sio.sb_cc; 731 *mp = sio.sb_mb; 732 733 /* 734 * A shutdown may result in no error and no mbuf. 735 * Convert to EPIPE. 736 */ 737 if (*mp == NULL && error == 0) 738 error = EPIPE; 739 } 740 if (error) { 741 m_freem(*mp); 742 *mp = NULL; 743 } 744 745 /* 746 * Search for any mbufs that are not a multiple of 4 bytes long 747 * or with m_data not longword aligned. 748 * These could cause pointer alignment problems, so copy them to 749 * well aligned mbufs. 750 */ 751 nfs_realign(mp, 5 * NFSX_UNSIGNED); 752 return (error); 753 } 754 755 /* 756 * Implement receipt of reply on a socket. 757 * 758 * We must search through the list of received datagrams matching them 759 * with outstanding requests using the xid, until ours is found. 760 * 761 * If myrep is NULL we process packets on the socket until 762 * interrupted or until nm_reqrxq is non-empty. 763 */ 764 /* ARGSUSED */ 765 int 766 nfs_reply(struct nfsmount *nmp, struct nfsreq *myrep) 767 { 768 struct nfsreq *rep; 769 struct sockaddr *nam; 770 u_int32_t rxid; 771 u_int32_t *tl; 772 int error; 773 struct nfsm_info info; 774 775 /* 776 * Loop around until we get our own reply 777 */ 778 for (;;) { 779 /* 780 * Lock against other receivers so that I don't get stuck in 781 * sbwait() after someone else has received my reply for me. 782 * Also necessary for connection based protocols to avoid 783 * race conditions during a reconnect. 784 * 785 * If nfs_rcvlock() returns EALREADY, that means that 786 * the reply has already been recieved by another 787 * process and we can return immediately. In this 788 * case, the lock is not taken to avoid races with 789 * other processes. 790 */ 791 info.mrep = NULL; 792 793 error = nfs_rcvlock(nmp, myrep); 794 if (error == EALREADY) 795 return (0); 796 if (error) 797 return (error); 798 799 /* 800 * If myrep is NULL we are the receiver helper thread. 801 * Stop waiting for incoming replies if there are 802 * messages sitting on reqrxq that we need to process, 803 * or if a shutdown request is pending. 804 */ 805 if (myrep == NULL && (TAILQ_FIRST(&nmp->nm_reqrxq) || 806 nmp->nm_rxstate > NFSSVC_PENDING)) { 807 nfs_rcvunlock(nmp); 808 return(EWOULDBLOCK); 809 } 810 811 /* 812 * Get the next Rpc reply off the socket 813 * 814 * We cannot release the receive lock until we've 815 * filled in rep->r_mrep, otherwise a waiting 816 * thread may deadlock in soreceive with no incoming 817 * packets expected. 818 */ 819 error = nfs_receive(nmp, myrep, &nam, &info.mrep); 820 if (error) { 821 /* 822 * Ignore routing errors on connectionless protocols?? 823 */ 824 nfs_rcvunlock(nmp); 825 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 826 if (nmp->nm_so == NULL) 827 return (error); 828 nmp->nm_so->so_error = 0; 829 continue; 830 } 831 return (error); 832 } 833 if (nam) 834 FREE(nam, M_SONAME); 835 836 /* 837 * Get the xid and check that it is an rpc reply 838 */ 839 info.md = info.mrep; 840 info.dpos = mtod(info.md, caddr_t); 841 NULLOUT(tl = nfsm_dissect(&info, 2*NFSX_UNSIGNED)); 842 rxid = *tl++; 843 if (*tl != rpc_reply) { 844 nfsstats.rpcinvalid++; 845 m_freem(info.mrep); 846 info.mrep = NULL; 847 nfsmout: 848 nfs_rcvunlock(nmp); 849 continue; 850 } 851 852 /* 853 * Loop through the request list to match up the reply 854 * Iff no match, just drop the datagram. On match, set 855 * r_mrep atomically to prevent the timer from messing 856 * around with the request after we have exited the critical 857 * section. 858 */ 859 crit_enter(); 860 TAILQ_FOREACH(rep, &nmp->nm_reqq, r_chain) { 861 if (rep->r_mrep == NULL && rxid == rep->r_xid) 862 break; 863 } 864 865 /* 866 * Fill in the rest of the reply if we found a match. 867 * 868 * Deal with duplicate responses if there was no match. 869 */ 870 if (rep) { 871 rep->r_md = info.md; 872 rep->r_dpos = info.dpos; 873 if (nfsrtton) { 874 struct rttl *rt; 875 876 rt = &nfsrtt.rttl[nfsrtt.pos]; 877 rt->proc = rep->r_procnum; 878 rt->rto = 0; 879 rt->sent = 0; 880 rt->cwnd = nmp->nm_maxasync_scaled; 881 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1]; 882 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1]; 883 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid; 884 getmicrotime(&rt->tstamp); 885 if (rep->r_flags & R_TIMING) 886 rt->rtt = rep->r_rtt; 887 else 888 rt->rtt = 1000000; 889 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ; 890 } 891 892 /* 893 * New congestion control is based only on async 894 * requests. 895 */ 896 if (nmp->nm_maxasync_scaled < NFS_MAXASYNC_SCALED) 897 ++nmp->nm_maxasync_scaled; 898 if (rep->r_flags & R_SENT) { 899 rep->r_flags &= ~R_SENT; 900 } 901 /* 902 * Update rtt using a gain of 0.125 on the mean 903 * and a gain of 0.25 on the deviation. 904 * 905 * NOTE SRTT/SDRTT are only good if R_TIMING is set. 906 */ 907 if ((rep->r_flags & R_TIMING) && rep->r_rexmit == 0) { 908 /* 909 * Since the timer resolution of 910 * NFS_HZ is so course, it can often 911 * result in r_rtt == 0. Since 912 * r_rtt == N means that the actual 913 * rtt is between N+dt and N+2-dt ticks, 914 * add 1. 915 */ 916 int n; 917 int d; 918 919 #define NFSRSB NFS_RTT_SCALE_BITS 920 n = ((NFS_SRTT(rep) * 7) + 921 (rep->r_rtt << NFSRSB)) >> 3; 922 d = n - NFS_SRTT(rep); 923 NFS_SRTT(rep) = n; 924 925 /* 926 * Don't let the jitter calculation decay 927 * too quickly, but we want a fast rampup. 928 */ 929 if (d < 0) 930 d = -d; 931 d <<= NFSRSB; 932 if (d < NFS_SDRTT(rep)) 933 n = ((NFS_SDRTT(rep) * 15) + d) >> 4; 934 else 935 n = ((NFS_SDRTT(rep) * 3) + d) >> 2; 936 NFS_SDRTT(rep) = n; 937 #undef NFSRSB 938 } 939 nmp->nm_timeouts = 0; 940 rep->r_mrep = info.mrep; 941 nfs_hardterm(rep, 0); 942 } else { 943 /* 944 * Extract vers, prog, nfsver, procnum. A duplicate 945 * response means we didn't wait long enough so 946 * we increase the SRTT to avoid future spurious 947 * timeouts. 948 */ 949 u_int procnum = nmp->nm_lastreprocnum; 950 int n; 951 952 if (procnum < NFS_NPROCS && proct[procnum]) { 953 if (nfs_showrexmit) 954 kprintf("D"); 955 n = nmp->nm_srtt[proct[procnum]]; 956 n += NFS_ASYSCALE * NFS_HZ; 957 if (n < NFS_ASYSCALE * NFS_HZ * 10) 958 n = NFS_ASYSCALE * NFS_HZ * 10; 959 nmp->nm_srtt[proct[procnum]] = n; 960 } 961 } 962 nfs_rcvunlock(nmp); 963 crit_exit(); 964 965 /* 966 * If not matched to a request, drop it. 967 * If it's mine, get out. 968 */ 969 if (rep == NULL) { 970 nfsstats.rpcunexpected++; 971 m_freem(info.mrep); 972 info.mrep = NULL; 973 } else if (rep == myrep) { 974 if (rep->r_mrep == NULL) 975 panic("nfsreply nil"); 976 return (0); 977 } 978 } 979 } 980 981 /* 982 * Run the request state machine until the target state is reached 983 * or a fatal error occurs. The target state is not run. Specifying 984 * a target of NFSM_STATE_DONE runs the state machine until the rpc 985 * is complete. 986 * 987 * EINPROGRESS is returned for all states other then the DONE state, 988 * indicating that the rpc is still in progress. 989 */ 990 int 991 nfs_request(struct nfsm_info *info, nfsm_state_t bstate, nfsm_state_t estate) 992 { 993 struct nfsreq *req; 994 995 while (info->state >= bstate && info->state < estate) { 996 switch(info->state) { 997 case NFSM_STATE_SETUP: 998 /* 999 * Setup the nfsreq. Any error which occurs during 1000 * this state is fatal. 1001 */ 1002 info->error = nfs_request_setup(info); 1003 if (info->error) { 1004 info->state = NFSM_STATE_DONE; 1005 return (info->error); 1006 } else { 1007 req = info->req; 1008 req->r_mrp = &info->mrep; 1009 req->r_mdp = &info->md; 1010 req->r_dposp = &info->dpos; 1011 info->state = NFSM_STATE_AUTH; 1012 } 1013 break; 1014 case NFSM_STATE_AUTH: 1015 /* 1016 * Authenticate the nfsreq. Any error which occurs 1017 * during this state is fatal. 1018 */ 1019 info->error = nfs_request_auth(info->req); 1020 if (info->error) { 1021 info->state = NFSM_STATE_DONE; 1022 return (info->error); 1023 } else { 1024 info->state = NFSM_STATE_TRY; 1025 } 1026 break; 1027 case NFSM_STATE_TRY: 1028 /* 1029 * Transmit or retransmit attempt. An error in this 1030 * state is ignored and we always move on to the 1031 * next state. 1032 * 1033 * This can trivially race the receiver if the 1034 * request is asynchronous. nfs_request_try() 1035 * will thus set the state for us and we 1036 * must also return immediately if we are 1037 * running an async state machine, because 1038 * info can become invalid due to races after 1039 * try() returns. 1040 */ 1041 if (info->req->r_flags & R_ASYNC) { 1042 nfs_request_try(info->req); 1043 if (estate == NFSM_STATE_WAITREPLY) 1044 return (EINPROGRESS); 1045 } else { 1046 nfs_request_try(info->req); 1047 info->state = NFSM_STATE_WAITREPLY; 1048 } 1049 break; 1050 case NFSM_STATE_WAITREPLY: 1051 /* 1052 * Wait for a reply or timeout and move on to the 1053 * next state. The error returned by this state 1054 * is passed to the processing code in the next 1055 * state. 1056 */ 1057 info->error = nfs_request_waitreply(info->req); 1058 info->state = NFSM_STATE_PROCESSREPLY; 1059 break; 1060 case NFSM_STATE_PROCESSREPLY: 1061 /* 1062 * Process the reply or timeout. Errors which occur 1063 * in this state may cause the state machine to 1064 * go back to an earlier state, and are fatal 1065 * otherwise. 1066 */ 1067 info->error = nfs_request_processreply(info, 1068 info->error); 1069 switch(info->error) { 1070 case ENEEDAUTH: 1071 info->state = NFSM_STATE_AUTH; 1072 break; 1073 case EAGAIN: 1074 info->state = NFSM_STATE_TRY; 1075 break; 1076 default: 1077 /* 1078 * Operation complete, with or without an 1079 * error. We are done. 1080 */ 1081 info->req = NULL; 1082 info->state = NFSM_STATE_DONE; 1083 return (info->error); 1084 } 1085 break; 1086 case NFSM_STATE_DONE: 1087 /* 1088 * Shouldn't be reached 1089 */ 1090 return (info->error); 1091 /* NOT REACHED */ 1092 } 1093 } 1094 1095 /* 1096 * If we are done return the error code (if any). 1097 * Otherwise return EINPROGRESS. 1098 */ 1099 if (info->state == NFSM_STATE_DONE) 1100 return (info->error); 1101 return (EINPROGRESS); 1102 } 1103 1104 /* 1105 * nfs_request - goes something like this 1106 * - fill in request struct 1107 * - links it into list 1108 * - calls nfs_send() for first transmit 1109 * - calls nfs_receive() to get reply 1110 * - break down rpc header and return with nfs reply pointed to 1111 * by mrep or error 1112 * nb: always frees up mreq mbuf list 1113 */ 1114 static int 1115 nfs_request_setup(nfsm_info_t info) 1116 { 1117 struct nfsreq *req; 1118 struct nfsmount *nmp; 1119 struct mbuf *m; 1120 int i; 1121 1122 /* 1123 * Reject requests while attempting a forced unmount. 1124 */ 1125 if (info->vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) { 1126 m_freem(info->mreq); 1127 info->mreq = NULL; 1128 return (ESTALE); 1129 } 1130 nmp = VFSTONFS(info->vp->v_mount); 1131 req = kmalloc(sizeof(struct nfsreq), M_NFSREQ, M_WAITOK); 1132 req->r_nmp = nmp; 1133 req->r_vp = info->vp; 1134 req->r_td = info->td; 1135 req->r_procnum = info->procnum; 1136 req->r_mreq = NULL; 1137 req->r_cred = info->cred; 1138 1139 i = 0; 1140 m = info->mreq; 1141 while (m) { 1142 i += m->m_len; 1143 m = m->m_next; 1144 } 1145 req->r_mrest = info->mreq; 1146 req->r_mrest_len = i; 1147 1148 /* 1149 * The presence of a non-NULL r_info in req indicates 1150 * async completion via our helper threads. See the receiver 1151 * code. 1152 */ 1153 if (info->bio) { 1154 req->r_info = info; 1155 req->r_flags = R_ASYNC; 1156 } else { 1157 req->r_info = NULL; 1158 req->r_flags = 0; 1159 } 1160 info->req = req; 1161 return(0); 1162 } 1163 1164 static int 1165 nfs_request_auth(struct nfsreq *rep) 1166 { 1167 struct nfsmount *nmp = rep->r_nmp; 1168 struct mbuf *m; 1169 char nickv[RPCX_NICKVERF]; 1170 int error = 0, auth_len, auth_type; 1171 int verf_len; 1172 u_int32_t xid; 1173 char *auth_str, *verf_str; 1174 struct ucred *cred; 1175 1176 cred = rep->r_cred; 1177 rep->r_failed_auth = 0; 1178 1179 /* 1180 * Get the RPC header with authorization. 1181 */ 1182 verf_str = auth_str = NULL; 1183 if (nmp->nm_flag & NFSMNT_KERB) { 1184 verf_str = nickv; 1185 verf_len = sizeof (nickv); 1186 auth_type = RPCAUTH_KERB4; 1187 bzero((caddr_t)rep->r_key, sizeof(rep->r_key)); 1188 if (rep->r_failed_auth || 1189 nfs_getnickauth(nmp, cred, &auth_str, &auth_len, 1190 verf_str, verf_len)) { 1191 error = nfs_getauth(nmp, rep, cred, &auth_str, 1192 &auth_len, verf_str, &verf_len, rep->r_key); 1193 if (error) { 1194 m_freem(rep->r_mrest); 1195 rep->r_mrest = NULL; 1196 kfree((caddr_t)rep, M_NFSREQ); 1197 return (error); 1198 } 1199 } 1200 } else { 1201 auth_type = RPCAUTH_UNIX; 1202 if (cred->cr_ngroups < 1) 1203 panic("nfsreq nogrps"); 1204 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ? 1205 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) + 1206 5 * NFSX_UNSIGNED; 1207 } 1208 if (rep->r_mrest) 1209 nfs_checkpkt(rep->r_mrest, rep->r_mrest_len); 1210 m = nfsm_rpchead(cred, nmp->nm_flag, rep->r_procnum, auth_type, 1211 auth_len, auth_str, verf_len, verf_str, 1212 rep->r_mrest, rep->r_mrest_len, &rep->r_mheadend, &xid); 1213 rep->r_mrest = NULL; 1214 if (auth_str) 1215 kfree(auth_str, M_TEMP); 1216 1217 /* 1218 * For stream protocols, insert a Sun RPC Record Mark. 1219 */ 1220 if (nmp->nm_sotype == SOCK_STREAM) { 1221 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT); 1222 if (m == NULL) { 1223 kfree(rep, M_NFSREQ); 1224 return (ENOBUFS); 1225 } 1226 *mtod(m, u_int32_t *) = htonl(0x80000000 | 1227 (m->m_pkthdr.len - NFSX_UNSIGNED)); 1228 } 1229 1230 nfs_checkpkt(m, m->m_pkthdr.len); 1231 1232 rep->r_mreq = m; 1233 rep->r_xid = xid; 1234 return (0); 1235 } 1236 1237 static int 1238 nfs_request_try(struct nfsreq *rep) 1239 { 1240 struct nfsmount *nmp = rep->r_nmp; 1241 struct mbuf *m2; 1242 int error; 1243 1244 /* 1245 * Request is not on any queue, only the owner has access to it 1246 * so it should not be locked by anyone atm. 1247 * 1248 * Interlock to prevent races. While locked the only remote 1249 * action possible is for r_mrep to be set (once we enqueue it). 1250 */ 1251 if (rep->r_flags == 0xdeadc0de) { 1252 print_backtrace(-1); 1253 panic("flags nbad\n"); 1254 } 1255 KKASSERT((rep->r_flags & (R_LOCKED | R_ONREQQ)) == 0); 1256 if (nmp->nm_flag & NFSMNT_SOFT) 1257 rep->r_retry = nmp->nm_retry; 1258 else 1259 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ 1260 rep->r_rtt = rep->r_rexmit = 0; 1261 if (proct[rep->r_procnum] > 0) 1262 rep->r_flags |= R_TIMING | R_LOCKED; 1263 else 1264 rep->r_flags |= R_LOCKED; 1265 rep->r_mrep = NULL; 1266 1267 /* 1268 * Do the client side RPC. 1269 */ 1270 nfsstats.rpcrequests++; 1271 1272 if (nmp->nm_flag & NFSMNT_FORCE) { 1273 rep->r_flags |= R_SOFTTERM; 1274 rep->r_flags &= ~R_LOCKED; 1275 return (0); 1276 } 1277 1278 /* 1279 * Chain request into list of outstanding requests. Be sure 1280 * to put it LAST so timer finds oldest requests first. Note 1281 * that our control of R_LOCKED prevents the request from 1282 * getting ripped out from under us or transmitted by the 1283 * timer code. 1284 * 1285 * For requests with info structures we must atomically set the 1286 * info's state because the structure could become invalid upon 1287 * return due to races (i.e., if async) 1288 */ 1289 crit_enter(); 1290 mtx_link_init(&rep->r_link); 1291 KKASSERT((rep->r_flags & R_ONREQQ) == 0); 1292 TAILQ_INSERT_TAIL(&nmp->nm_reqq, rep, r_chain); 1293 rep->r_flags |= R_ONREQQ; 1294 ++nmp->nm_reqqlen; 1295 if (rep->r_flags & R_ASYNC) 1296 rep->r_info->state = NFSM_STATE_WAITREPLY; 1297 crit_exit(); 1298 1299 error = 0; 1300 1301 /* 1302 * Send if we can. Congestion control is not handled here any more 1303 * becausing trying to defer the initial send based on the nfs_timer 1304 * requires having a very fast nfs_timer, which is silly. 1305 */ 1306 if (nmp->nm_so) { 1307 if (nmp->nm_soflags & PR_CONNREQUIRED) 1308 error = nfs_sndlock(nmp, rep); 1309 if (error == 0) { 1310 m2 = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT); 1311 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep); 1312 if (nmp->nm_soflags & PR_CONNREQUIRED) 1313 nfs_sndunlock(nmp); 1314 rep->r_flags &= ~R_NEEDSXMIT; 1315 if ((rep->r_flags & R_SENT) == 0) { 1316 rep->r_flags |= R_SENT; 1317 } 1318 } else { 1319 rep->r_flags |= R_NEEDSXMIT; 1320 } 1321 } else { 1322 rep->r_flags |= R_NEEDSXMIT; 1323 rep->r_rtt = -1; 1324 } 1325 if (error == EPIPE) 1326 error = 0; 1327 1328 /* 1329 * Release the lock. The only remote action that may have occurred 1330 * would have been the setting of rep->r_mrep. If this occured 1331 * and the request was async we have to move it to the reader 1332 * thread's queue for action. 1333 * 1334 * For async requests also make sure the reader is woken up so 1335 * it gets on the socket to read responses. 1336 */ 1337 crit_enter(); 1338 if (rep->r_flags & R_ASYNC) { 1339 if (rep->r_mrep) 1340 nfs_hardterm(rep, 1); 1341 rep->r_flags &= ~R_LOCKED; 1342 nfssvc_iod_reader_wakeup(nmp); 1343 } else { 1344 rep->r_flags &= ~R_LOCKED; 1345 } 1346 if (rep->r_flags & R_WANTED) { 1347 rep->r_flags &= ~R_WANTED; 1348 wakeup(rep); 1349 } 1350 crit_exit(); 1351 return (error); 1352 } 1353 1354 /* 1355 * This code is only called for synchronous requests. Completed synchronous 1356 * requests are left on reqq and we remove them before moving on to the 1357 * processing state. 1358 */ 1359 static int 1360 nfs_request_waitreply(struct nfsreq *rep) 1361 { 1362 struct nfsmount *nmp = rep->r_nmp; 1363 int error; 1364 1365 KKASSERT((rep->r_flags & R_ASYNC) == 0); 1366 1367 /* 1368 * Wait until the request is finished. 1369 */ 1370 error = nfs_reply(nmp, rep); 1371 1372 /* 1373 * RPC done, unlink the request, but don't rip it out from under 1374 * the callout timer. 1375 * 1376 * Once unlinked no other receiver or the timer will have 1377 * visibility, so we do not have to set R_LOCKED. 1378 */ 1379 crit_enter(); 1380 while (rep->r_flags & R_LOCKED) { 1381 rep->r_flags |= R_WANTED; 1382 tsleep(rep, 0, "nfstrac", 0); 1383 } 1384 KKASSERT(rep->r_flags & R_ONREQQ); 1385 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain); 1386 rep->r_flags &= ~R_ONREQQ; 1387 --nmp->nm_reqqlen; 1388 if (TAILQ_FIRST(&nmp->nm_bioq) && 1389 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) { 1390 nfssvc_iod_writer_wakeup(nmp); 1391 } 1392 crit_exit(); 1393 1394 /* 1395 * Decrement the outstanding request count. 1396 */ 1397 if (rep->r_flags & R_SENT) { 1398 rep->r_flags &= ~R_SENT; 1399 } 1400 return (error); 1401 } 1402 1403 /* 1404 * Process reply with error returned from nfs_requet_waitreply(). 1405 * 1406 * Returns EAGAIN if it wants us to loop up to nfs_request_try() again. 1407 * Returns ENEEDAUTH if it wants us to loop up to nfs_request_auth() again. 1408 */ 1409 static int 1410 nfs_request_processreply(nfsm_info_t info, int error) 1411 { 1412 struct nfsreq *req = info->req; 1413 struct nfsmount *nmp = req->r_nmp; 1414 u_int32_t *tl; 1415 int verf_type; 1416 int i; 1417 1418 /* 1419 * If there was a successful reply and a tprintf msg. 1420 * tprintf a response. 1421 */ 1422 if (error == 0 && (req->r_flags & R_TPRINTFMSG)) { 1423 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname, 1424 "is alive again"); 1425 } 1426 info->mrep = req->r_mrep; 1427 info->md = req->r_md; 1428 info->dpos = req->r_dpos; 1429 if (error) { 1430 m_freem(req->r_mreq); 1431 req->r_mreq = NULL; 1432 kfree(req, M_NFSREQ); 1433 info->req = NULL; 1434 return (error); 1435 } 1436 1437 /* 1438 * break down the rpc header and check if ok 1439 */ 1440 NULLOUT(tl = nfsm_dissect(info, 3 * NFSX_UNSIGNED)); 1441 if (*tl++ == rpc_msgdenied) { 1442 if (*tl == rpc_mismatch) { 1443 error = EOPNOTSUPP; 1444 } else if ((nmp->nm_flag & NFSMNT_KERB) && 1445 *tl++ == rpc_autherr) { 1446 if (req->r_failed_auth == 0) { 1447 req->r_failed_auth++; 1448 req->r_mheadend->m_next = NULL; 1449 m_freem(info->mrep); 1450 info->mrep = NULL; 1451 m_freem(req->r_mreq); 1452 req->r_mreq = NULL; 1453 return (ENEEDAUTH); 1454 } else { 1455 error = EAUTH; 1456 } 1457 } else { 1458 error = EACCES; 1459 } 1460 m_freem(info->mrep); 1461 info->mrep = NULL; 1462 m_freem(req->r_mreq); 1463 req->r_mreq = NULL; 1464 kfree(req, M_NFSREQ); 1465 info->req = NULL; 1466 return (error); 1467 } 1468 1469 /* 1470 * Grab any Kerberos verifier, otherwise just throw it away. 1471 */ 1472 verf_type = fxdr_unsigned(int, *tl++); 1473 i = fxdr_unsigned(int32_t, *tl); 1474 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) { 1475 error = nfs_savenickauth(nmp, req->r_cred, i, req->r_key, 1476 &info->md, &info->dpos, info->mrep); 1477 if (error) 1478 goto nfsmout; 1479 } else if (i > 0) { 1480 ERROROUT(nfsm_adv(info, nfsm_rndup(i))); 1481 } 1482 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED)); 1483 /* 0 == ok */ 1484 if (*tl == 0) { 1485 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED)); 1486 if (*tl != 0) { 1487 error = fxdr_unsigned(int, *tl); 1488 1489 /* 1490 * Does anyone even implement this? Just impose 1491 * a 1-second delay. 1492 */ 1493 if ((nmp->nm_flag & NFSMNT_NFSV3) && 1494 error == NFSERR_TRYLATER) { 1495 m_freem(info->mrep); 1496 info->mrep = NULL; 1497 error = 0; 1498 1499 tsleep((caddr_t)&lbolt, 0, "nqnfstry", 0); 1500 return (EAGAIN); /* goto tryagain */ 1501 } 1502 1503 /* 1504 * If the File Handle was stale, invalidate the 1505 * lookup cache, just in case. 1506 * 1507 * To avoid namecache<->vnode deadlocks we must 1508 * release the vnode lock if we hold it. 1509 */ 1510 if (error == ESTALE) { 1511 struct vnode *vp = req->r_vp; 1512 int ltype; 1513 1514 ltype = lockstatus(&vp->v_lock, curthread); 1515 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED) 1516 lockmgr(&vp->v_lock, LK_RELEASE); 1517 cache_inval_vp(vp, CINV_CHILDREN); 1518 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED) 1519 lockmgr(&vp->v_lock, ltype); 1520 } 1521 if (nmp->nm_flag & NFSMNT_NFSV3) { 1522 KKASSERT(*req->r_mrp == info->mrep); 1523 KKASSERT(*req->r_mdp == info->md); 1524 KKASSERT(*req->r_dposp == info->dpos); 1525 error |= NFSERR_RETERR; 1526 } else { 1527 m_freem(info->mrep); 1528 info->mrep = NULL; 1529 } 1530 m_freem(req->r_mreq); 1531 req->r_mreq = NULL; 1532 kfree(req, M_NFSREQ); 1533 info->req = NULL; 1534 return (error); 1535 } 1536 1537 KKASSERT(*req->r_mrp == info->mrep); 1538 KKASSERT(*req->r_mdp == info->md); 1539 KKASSERT(*req->r_dposp == info->dpos); 1540 m_freem(req->r_mreq); 1541 req->r_mreq = NULL; 1542 FREE(req, M_NFSREQ); 1543 return (0); 1544 } 1545 m_freem(info->mrep); 1546 info->mrep = NULL; 1547 error = EPROTONOSUPPORT; 1548 nfsmout: 1549 m_freem(req->r_mreq); 1550 req->r_mreq = NULL; 1551 kfree(req, M_NFSREQ); 1552 info->req = NULL; 1553 return (error); 1554 } 1555 1556 #ifndef NFS_NOSERVER 1557 /* 1558 * Generate the rpc reply header 1559 * siz arg. is used to decide if adding a cluster is worthwhile 1560 */ 1561 int 1562 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp, 1563 int err, struct mbuf **mrq, struct mbuf **mbp, caddr_t *bposp) 1564 { 1565 u_int32_t *tl; 1566 struct nfsm_info info; 1567 1568 siz += RPC_REPLYSIZ; 1569 info.mb = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL); 1570 info.mreq = info.mb; 1571 info.mreq->m_pkthdr.len = 0; 1572 /* 1573 * If this is not a cluster, try and leave leading space 1574 * for the lower level headers. 1575 */ 1576 if ((max_hdr + siz) < MINCLSIZE) 1577 info.mreq->m_data += max_hdr; 1578 tl = mtod(info.mreq, u_int32_t *); 1579 info.mreq->m_len = 6 * NFSX_UNSIGNED; 1580 info.bpos = ((caddr_t)tl) + info.mreq->m_len; 1581 *tl++ = txdr_unsigned(nd->nd_retxid); 1582 *tl++ = rpc_reply; 1583 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) { 1584 *tl++ = rpc_msgdenied; 1585 if (err & NFSERR_AUTHERR) { 1586 *tl++ = rpc_autherr; 1587 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR); 1588 info.mreq->m_len -= NFSX_UNSIGNED; 1589 info.bpos -= NFSX_UNSIGNED; 1590 } else { 1591 *tl++ = rpc_mismatch; 1592 *tl++ = txdr_unsigned(RPC_VER2); 1593 *tl = txdr_unsigned(RPC_VER2); 1594 } 1595 } else { 1596 *tl++ = rpc_msgaccepted; 1597 1598 /* 1599 * For Kerberos authentication, we must send the nickname 1600 * verifier back, otherwise just RPCAUTH_NULL. 1601 */ 1602 if (nd->nd_flag & ND_KERBFULL) { 1603 struct nfsuid *nuidp; 1604 struct timeval ktvin, ktvout; 1605 1606 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first; 1607 nuidp != 0; nuidp = nuidp->nu_hash.le_next) { 1608 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid && 1609 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp), 1610 &nuidp->nu_haddr, nd->nd_nam2))) 1611 break; 1612 } 1613 if (nuidp) { 1614 ktvin.tv_sec = 1615 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1); 1616 ktvin.tv_usec = 1617 txdr_unsigned(nuidp->nu_timestamp.tv_usec); 1618 1619 /* 1620 * Encrypt the timestamp in ecb mode using the 1621 * session key. 1622 */ 1623 #ifdef NFSKERB 1624 XXX 1625 #else 1626 ktvout.tv_sec = 0; 1627 ktvout.tv_usec = 0; 1628 #endif 1629 1630 *tl++ = rpc_auth_kerb; 1631 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED); 1632 *tl = ktvout.tv_sec; 1633 tl = nfsm_build(&info, 3 * NFSX_UNSIGNED); 1634 *tl++ = ktvout.tv_usec; 1635 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid); 1636 } else { 1637 *tl++ = 0; 1638 *tl++ = 0; 1639 } 1640 } else { 1641 *tl++ = 0; 1642 *tl++ = 0; 1643 } 1644 switch (err) { 1645 case EPROGUNAVAIL: 1646 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 1647 break; 1648 case EPROGMISMATCH: 1649 *tl = txdr_unsigned(RPC_PROGMISMATCH); 1650 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED); 1651 *tl++ = txdr_unsigned(2); 1652 *tl = txdr_unsigned(3); 1653 break; 1654 case EPROCUNAVAIL: 1655 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 1656 break; 1657 case EBADRPC: 1658 *tl = txdr_unsigned(RPC_GARBAGE); 1659 break; 1660 default: 1661 *tl = 0; 1662 if (err != NFSERR_RETVOID) { 1663 tl = nfsm_build(&info, NFSX_UNSIGNED); 1664 if (err) 1665 *tl = txdr_unsigned(nfsrv_errmap(nd, err)); 1666 else 1667 *tl = 0; 1668 } 1669 break; 1670 }; 1671 } 1672 1673 if (mrq != NULL) 1674 *mrq = info.mreq; 1675 *mbp = info.mb; 1676 *bposp = info.bpos; 1677 if (err != 0 && err != NFSERR_RETVOID) 1678 nfsstats.srvrpc_errs++; 1679 return (0); 1680 } 1681 1682 1683 #endif /* NFS_NOSERVER */ 1684 1685 /* 1686 * Nfs timer routine. 1687 * 1688 * Scan the nfsreq list and retranmit any requests that have timed out 1689 * To avoid retransmission attempts on STREAM sockets (in the future) make 1690 * sure to set the r_retry field to 0 (implies nm_retry == 0). 1691 * 1692 * Requests with attached responses, terminated requests, and 1693 * locked requests are ignored. Locked requests will be picked up 1694 * in a later timer call. 1695 */ 1696 void 1697 nfs_timer_callout(void *arg /* never used */) 1698 { 1699 struct nfsmount *nmp; 1700 struct nfsreq *req; 1701 #ifndef NFS_NOSERVER 1702 struct nfssvc_sock *slp; 1703 u_quad_t cur_usec; 1704 #endif /* NFS_NOSERVER */ 1705 1706 lwkt_gettoken(&nfs_token); 1707 TAILQ_FOREACH(nmp, &nfs_mountq, nm_entry) { 1708 lwkt_gettoken(&nmp->nm_token); 1709 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1710 KKASSERT(nmp == req->r_nmp); 1711 if (req->r_mrep) 1712 continue; 1713 if (req->r_flags & (R_SOFTTERM | R_LOCKED)) 1714 continue; 1715 req->r_flags |= R_LOCKED; 1716 if (nfs_sigintr(nmp, req, req->r_td)) { 1717 nfs_softterm(req, 1); 1718 } else { 1719 nfs_timer_req(req); 1720 } 1721 req->r_flags &= ~R_LOCKED; 1722 if (req->r_flags & R_WANTED) { 1723 req->r_flags &= ~R_WANTED; 1724 wakeup(req); 1725 } 1726 } 1727 lwkt_reltoken(&nmp->nm_token); 1728 } 1729 #ifndef NFS_NOSERVER 1730 1731 /* 1732 * Scan the write gathering queues for writes that need to be 1733 * completed now. 1734 */ 1735 cur_usec = nfs_curusec(); 1736 1737 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) { 1738 /* XXX race against removal */ 1739 lwkt_gettoken(&slp->ns_token); 1740 if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec) 1741 nfsrv_wakenfsd(slp, 1); 1742 lwkt_reltoken(&slp->ns_token); 1743 } 1744 #endif /* NFS_NOSERVER */ 1745 1746 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer_callout, NULL); 1747 lwkt_reltoken(&nfs_token); 1748 } 1749 1750 static 1751 void 1752 nfs_timer_req(struct nfsreq *req) 1753 { 1754 struct thread *td = &thread0; /* XXX for creds, will break if sleep */ 1755 struct nfsmount *nmp = req->r_nmp; 1756 struct mbuf *m; 1757 struct socket *so; 1758 int timeo; 1759 int error; 1760 1761 /* 1762 * rtt ticks and timeout calculation. Return if the timeout 1763 * has not been reached yet, unless the packet is flagged 1764 * for an immediate send. 1765 * 1766 * The mean rtt doesn't help when we get random I/Os, we have 1767 * to multiply by fairly large numbers. 1768 */ 1769 if (req->r_rtt >= 0) { 1770 /* 1771 * Calculate the timeout to test against. 1772 */ 1773 req->r_rtt++; 1774 if (nmp->nm_flag & NFSMNT_DUMBTIMR) { 1775 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS; 1776 } else if (req->r_flags & R_TIMING) { 1777 timeo = NFS_SRTT(req) + NFS_SDRTT(req); 1778 } else { 1779 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS; 1780 } 1781 timeo *= multt[req->r_procnum]; 1782 /* timeo is still scaled by SCALE_BITS */ 1783 1784 #define NFSFS (NFS_RTT_SCALE * NFS_HZ) 1785 if (req->r_flags & R_TIMING) { 1786 static long last_time; 1787 if (nfs_showrtt && last_time != time_second) { 1788 kprintf("rpccmd %d NFS SRTT %d SDRTT %d " 1789 "timeo %d.%03d\n", 1790 proct[req->r_procnum], 1791 NFS_SRTT(req), NFS_SDRTT(req), 1792 timeo / NFSFS, 1793 timeo % NFSFS * 1000 / NFSFS); 1794 last_time = time_second; 1795 } 1796 } 1797 #undef NFSFS 1798 1799 /* 1800 * deal with nfs_timer jitter. 1801 */ 1802 timeo = (timeo >> NFS_RTT_SCALE_BITS) + 1; 1803 if (timeo < 2) 1804 timeo = 2; 1805 1806 if (nmp->nm_timeouts > 0) 1807 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 1808 if (timeo > NFS_MAXTIMEO) 1809 timeo = NFS_MAXTIMEO; 1810 if (req->r_rtt <= timeo) { 1811 if ((req->r_flags & R_NEEDSXMIT) == 0) 1812 return; 1813 } else if (nmp->nm_timeouts < 8) { 1814 nmp->nm_timeouts++; 1815 } 1816 } 1817 1818 /* 1819 * Check for server not responding 1820 */ 1821 if ((req->r_flags & R_TPRINTFMSG) == 0 && 1822 req->r_rexmit > nmp->nm_deadthresh) { 1823 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname, 1824 "not responding"); 1825 req->r_flags |= R_TPRINTFMSG; 1826 } 1827 if (req->r_rexmit >= req->r_retry) { /* too many */ 1828 nfsstats.rpctimeouts++; 1829 nfs_softterm(req, 1); 1830 return; 1831 } 1832 1833 /* 1834 * Generally disable retransmission on reliable sockets, 1835 * unless the request is flagged for immediate send. 1836 */ 1837 if (nmp->nm_sotype != SOCK_DGRAM) { 1838 if (++req->r_rexmit > NFS_MAXREXMIT) 1839 req->r_rexmit = NFS_MAXREXMIT; 1840 if ((req->r_flags & R_NEEDSXMIT) == 0) 1841 return; 1842 } 1843 1844 /* 1845 * Stop here if we do not have a socket! 1846 */ 1847 if ((so = nmp->nm_so) == NULL) 1848 return; 1849 1850 /* 1851 * If there is enough space and the window allows.. resend it. 1852 * 1853 * r_rtt is left intact in case we get an answer after the 1854 * retry that was a reply to the original packet. 1855 * 1856 * NOTE: so_pru_send() 1857 */ 1858 if (ssb_space(&so->so_snd) >= req->r_mreq->m_pkthdr.len && 1859 (req->r_flags & (R_SENT | R_NEEDSXMIT)) && 1860 (m = m_copym(req->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){ 1861 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0) 1862 error = so_pru_send(so, 0, m, NULL, NULL, td); 1863 else 1864 error = so_pru_send(so, 0, m, nmp->nm_nam, NULL, td); 1865 if (error) { 1866 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) 1867 so->so_error = 0; 1868 req->r_flags |= R_NEEDSXMIT; 1869 } else if (req->r_mrep == NULL) { 1870 /* 1871 * Iff first send, start timing 1872 * else turn timing off, backoff timer 1873 * and divide congestion window by 2. 1874 * 1875 * It is possible for the so_pru_send() to 1876 * block and for us to race a reply so we 1877 * only do this if the reply field has not 1878 * been filled in. R_LOCKED will prevent 1879 * the request from being ripped out from under 1880 * us entirely. 1881 * 1882 * Record the last resent procnum to aid us 1883 * in duplicate detection on receive. 1884 */ 1885 if ((req->r_flags & R_NEEDSXMIT) == 0) { 1886 if (nfs_showrexmit) 1887 kprintf("X"); 1888 if (++req->r_rexmit > NFS_MAXREXMIT) 1889 req->r_rexmit = NFS_MAXREXMIT; 1890 nmp->nm_maxasync_scaled >>= 1; 1891 if (nmp->nm_maxasync_scaled < NFS_MINASYNC_SCALED) 1892 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED; 1893 nfsstats.rpcretries++; 1894 nmp->nm_lastreprocnum = req->r_procnum; 1895 } else { 1896 req->r_flags |= R_SENT; 1897 req->r_flags &= ~R_NEEDSXMIT; 1898 } 1899 } 1900 } 1901 } 1902 1903 /* 1904 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and 1905 * wait for all requests to complete. This is used by forced unmounts 1906 * to terminate any outstanding RPCs. 1907 * 1908 * Locked requests cannot be canceled but will be marked for 1909 * soft-termination. 1910 */ 1911 int 1912 nfs_nmcancelreqs(struct nfsmount *nmp) 1913 { 1914 struct nfsreq *req; 1915 int i; 1916 1917 crit_enter(); 1918 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1919 if (req->r_mrep != NULL || (req->r_flags & R_SOFTTERM)) 1920 continue; 1921 nfs_softterm(req, 0); 1922 } 1923 /* XXX the other two queues as well */ 1924 crit_exit(); 1925 1926 for (i = 0; i < 30; i++) { 1927 crit_enter(); 1928 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1929 if (nmp == req->r_nmp) 1930 break; 1931 } 1932 crit_exit(); 1933 if (req == NULL) 1934 return (0); 1935 tsleep(&lbolt, 0, "nfscancel", 0); 1936 } 1937 return (EBUSY); 1938 } 1939 1940 /* 1941 * Soft-terminate a request, effectively marking it as failed. 1942 * 1943 * Must be called from within a critical section. 1944 */ 1945 static void 1946 nfs_softterm(struct nfsreq *rep, int islocked) 1947 { 1948 rep->r_flags |= R_SOFTTERM; 1949 nfs_hardterm(rep, islocked); 1950 } 1951 1952 /* 1953 * Hard-terminate a request, typically after getting a response. 1954 * 1955 * The state machine can still decide to re-issue it later if necessary. 1956 * 1957 * Must be called from within a critical section. 1958 */ 1959 static void 1960 nfs_hardterm(struct nfsreq *rep, int islocked) 1961 { 1962 struct nfsmount *nmp = rep->r_nmp; 1963 1964 /* 1965 * The nm_send count is decremented now to avoid deadlocks 1966 * when the process in soreceive() hasn't yet managed to send 1967 * its own request. 1968 */ 1969 if (rep->r_flags & R_SENT) { 1970 rep->r_flags &= ~R_SENT; 1971 } 1972 1973 /* 1974 * If we locked the request or nobody else has locked the request, 1975 * and the request is async, we can move it to the reader thread's 1976 * queue now and fix up the state. 1977 * 1978 * If we locked the request or nobody else has locked the request, 1979 * we can wake up anyone blocked waiting for a response on the 1980 * request. 1981 */ 1982 if (islocked || (rep->r_flags & R_LOCKED) == 0) { 1983 if ((rep->r_flags & (R_ONREQQ | R_ASYNC)) == 1984 (R_ONREQQ | R_ASYNC)) { 1985 rep->r_flags &= ~R_ONREQQ; 1986 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain); 1987 --nmp->nm_reqqlen; 1988 TAILQ_INSERT_TAIL(&nmp->nm_reqrxq, rep, r_chain); 1989 KKASSERT(rep->r_info->state == NFSM_STATE_TRY || 1990 rep->r_info->state == NFSM_STATE_WAITREPLY); 1991 rep->r_info->state = NFSM_STATE_PROCESSREPLY; 1992 nfssvc_iod_reader_wakeup(nmp); 1993 if (TAILQ_FIRST(&nmp->nm_bioq) && 1994 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) { 1995 nfssvc_iod_writer_wakeup(nmp); 1996 } 1997 } 1998 mtx_abort_ex_link(&nmp->nm_rxlock, &rep->r_link); 1999 } 2000 } 2001 2002 /* 2003 * Test for a termination condition pending on the process. 2004 * This is used for NFSMNT_INT mounts. 2005 */ 2006 int 2007 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td) 2008 { 2009 sigset_t tmpset; 2010 struct proc *p; 2011 struct lwp *lp; 2012 2013 if (rep && (rep->r_flags & R_SOFTTERM)) 2014 return (EINTR); 2015 /* Terminate all requests while attempting a forced unmount. */ 2016 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) 2017 return (EINTR); 2018 if (!(nmp->nm_flag & NFSMNT_INT)) 2019 return (0); 2020 /* td might be NULL YYY */ 2021 if (td == NULL || (p = td->td_proc) == NULL) 2022 return (0); 2023 2024 lp = td->td_lwp; 2025 tmpset = lwp_sigpend(lp); 2026 SIGSETNAND(tmpset, lp->lwp_sigmask); 2027 SIGSETNAND(tmpset, p->p_sigignore); 2028 if (SIGNOTEMPTY(tmpset) && NFSINT_SIGMASK(tmpset)) 2029 return (EINTR); 2030 2031 return (0); 2032 } 2033 2034 /* 2035 * Lock a socket against others. 2036 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply 2037 * and also to avoid race conditions between the processes with nfs requests 2038 * in progress when a reconnect is necessary. 2039 */ 2040 int 2041 nfs_sndlock(struct nfsmount *nmp, struct nfsreq *rep) 2042 { 2043 mtx_t mtx = &nmp->nm_txlock; 2044 struct thread *td; 2045 int slptimeo; 2046 int slpflag; 2047 int error; 2048 2049 slpflag = 0; 2050 slptimeo = 0; 2051 td = rep ? rep->r_td : NULL; 2052 if (nmp->nm_flag & NFSMNT_INT) 2053 slpflag = PCATCH; 2054 2055 while ((error = mtx_lock_ex_try(mtx)) != 0) { 2056 if (nfs_sigintr(nmp, rep, td)) { 2057 error = EINTR; 2058 break; 2059 } 2060 error = mtx_lock_ex(mtx, "nfsndlck", slpflag, slptimeo); 2061 if (error == 0) 2062 break; 2063 if (slpflag == PCATCH) { 2064 slpflag = 0; 2065 slptimeo = 2 * hz; 2066 } 2067 } 2068 /* Always fail if our request has been cancelled. */ 2069 if (rep && (rep->r_flags & R_SOFTTERM)) { 2070 if (error == 0) 2071 mtx_unlock(mtx); 2072 error = EINTR; 2073 } 2074 return (error); 2075 } 2076 2077 /* 2078 * Unlock the stream socket for others. 2079 */ 2080 void 2081 nfs_sndunlock(struct nfsmount *nmp) 2082 { 2083 mtx_unlock(&nmp->nm_txlock); 2084 } 2085 2086 /* 2087 * Lock the receiver side of the socket. 2088 * 2089 * rep may be NULL. 2090 */ 2091 static int 2092 nfs_rcvlock(struct nfsmount *nmp, struct nfsreq *rep) 2093 { 2094 mtx_t mtx = &nmp->nm_rxlock; 2095 int slpflag; 2096 int slptimeo; 2097 int error; 2098 2099 /* 2100 * Unconditionally check for completion in case another nfsiod 2101 * get the packet while the caller was blocked, before the caller 2102 * called us. Packet reception is handled by mainline code which 2103 * is protected by the BGL at the moment. 2104 * 2105 * We do not strictly need the second check just before the 2106 * tsleep(), but it's good defensive programming. 2107 */ 2108 if (rep && rep->r_mrep != NULL) 2109 return (EALREADY); 2110 2111 if (nmp->nm_flag & NFSMNT_INT) 2112 slpflag = PCATCH; 2113 else 2114 slpflag = 0; 2115 slptimeo = 0; 2116 2117 while ((error = mtx_lock_ex_try(mtx)) != 0) { 2118 if (nfs_sigintr(nmp, rep, (rep ? rep->r_td : NULL))) { 2119 error = EINTR; 2120 break; 2121 } 2122 if (rep && rep->r_mrep != NULL) { 2123 error = EALREADY; 2124 break; 2125 } 2126 2127 /* 2128 * NOTE: can return ENOLCK, but in that case rep->r_mrep 2129 * will already be set. 2130 */ 2131 if (rep) { 2132 error = mtx_lock_ex_link(mtx, &rep->r_link, 2133 "nfsrcvlk", 2134 slpflag, slptimeo); 2135 } else { 2136 error = mtx_lock_ex(mtx, "nfsrcvlk", slpflag, slptimeo); 2137 } 2138 if (error == 0) 2139 break; 2140 2141 /* 2142 * If our reply was recieved while we were sleeping, 2143 * then just return without taking the lock to avoid a 2144 * situation where a single iod could 'capture' the 2145 * recieve lock. 2146 */ 2147 if (rep && rep->r_mrep != NULL) { 2148 error = EALREADY; 2149 break; 2150 } 2151 if (slpflag == PCATCH) { 2152 slpflag = 0; 2153 slptimeo = 2 * hz; 2154 } 2155 } 2156 if (error == 0) { 2157 if (rep && rep->r_mrep != NULL) { 2158 error = EALREADY; 2159 mtx_unlock(mtx); 2160 } 2161 } 2162 return (error); 2163 } 2164 2165 /* 2166 * Unlock the stream socket for others. 2167 */ 2168 static void 2169 nfs_rcvunlock(struct nfsmount *nmp) 2170 { 2171 mtx_unlock(&nmp->nm_rxlock); 2172 } 2173 2174 /* 2175 * nfs_realign: 2176 * 2177 * Check for badly aligned mbuf data and realign by copying the unaligned 2178 * portion of the data into a new mbuf chain and freeing the portions 2179 * of the old chain that were replaced. 2180 * 2181 * We cannot simply realign the data within the existing mbuf chain 2182 * because the underlying buffers may contain other rpc commands and 2183 * we cannot afford to overwrite them. 2184 * 2185 * We would prefer to avoid this situation entirely. The situation does 2186 * not occur with NFS/UDP and is supposed to only occassionally occur 2187 * with TCP. Use vfs.nfs.realign_count and realign_test to check this. 2188 * 2189 * NOTE! MB_DONTWAIT cannot be used here. The mbufs must be acquired 2190 * because the rpc request OR reply cannot be thrown away. TCP NFS 2191 * mounts do not retry their RPCs unless the TCP connection itself 2192 * is dropped so throwing away a RPC will basically cause the NFS 2193 * operation to lockup indefinitely. 2194 */ 2195 static void 2196 nfs_realign(struct mbuf **pm, int hsiz) 2197 { 2198 struct mbuf *m; 2199 struct mbuf *n = NULL; 2200 2201 /* 2202 * Check for misalignemnt 2203 */ 2204 ++nfs_realign_test; 2205 while ((m = *pm) != NULL) { 2206 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) 2207 break; 2208 pm = &m->m_next; 2209 } 2210 2211 /* 2212 * If misalignment found make a completely new copy. 2213 */ 2214 if (m) { 2215 ++nfs_realign_count; 2216 n = m_dup_data(m, MB_WAIT); 2217 m_freem(*pm); 2218 *pm = n; 2219 } 2220 } 2221 2222 #ifndef NFS_NOSERVER 2223 2224 /* 2225 * Parse an RPC request 2226 * - verify it 2227 * - fill in the cred struct. 2228 */ 2229 int 2230 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header) 2231 { 2232 int len, i; 2233 u_int32_t *tl; 2234 struct uio uio; 2235 struct iovec iov; 2236 caddr_t cp; 2237 u_int32_t nfsvers, auth_type; 2238 uid_t nickuid; 2239 int error = 0, ticklen; 2240 struct nfsuid *nuidp; 2241 struct timeval tvin, tvout; 2242 struct nfsm_info info; 2243 #if 0 /* until encrypted keys are implemented */ 2244 NFSKERBKEYSCHED_T keys; /* stores key schedule */ 2245 #endif 2246 2247 info.mrep = nd->nd_mrep; 2248 info.md = nd->nd_md; 2249 info.dpos = nd->nd_dpos; 2250 2251 if (has_header) { 2252 NULLOUT(tl = nfsm_dissect(&info, 10 * NFSX_UNSIGNED)); 2253 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++); 2254 if (*tl++ != rpc_call) { 2255 m_freem(info.mrep); 2256 return (EBADRPC); 2257 } 2258 } else { 2259 NULLOUT(tl = nfsm_dissect(&info, 8 * NFSX_UNSIGNED)); 2260 } 2261 nd->nd_repstat = 0; 2262 nd->nd_flag = 0; 2263 if (*tl++ != rpc_vers) { 2264 nd->nd_repstat = ERPCMISMATCH; 2265 nd->nd_procnum = NFSPROC_NOOP; 2266 return (0); 2267 } 2268 if (*tl != nfs_prog) { 2269 nd->nd_repstat = EPROGUNAVAIL; 2270 nd->nd_procnum = NFSPROC_NOOP; 2271 return (0); 2272 } 2273 tl++; 2274 nfsvers = fxdr_unsigned(u_int32_t, *tl++); 2275 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) { 2276 nd->nd_repstat = EPROGMISMATCH; 2277 nd->nd_procnum = NFSPROC_NOOP; 2278 return (0); 2279 } 2280 if (nfsvers == NFS_VER3) 2281 nd->nd_flag = ND_NFSV3; 2282 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++); 2283 if (nd->nd_procnum == NFSPROC_NULL) 2284 return (0); 2285 if (nd->nd_procnum >= NFS_NPROCS || 2286 (nd->nd_procnum >= NQNFSPROC_GETLEASE) || 2287 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) { 2288 nd->nd_repstat = EPROCUNAVAIL; 2289 nd->nd_procnum = NFSPROC_NOOP; 2290 return (0); 2291 } 2292 if ((nd->nd_flag & ND_NFSV3) == 0) 2293 nd->nd_procnum = nfsv3_procid[nd->nd_procnum]; 2294 auth_type = *tl++; 2295 len = fxdr_unsigned(int, *tl++); 2296 if (len < 0 || len > RPCAUTH_MAXSIZ) { 2297 m_freem(info.mrep); 2298 return (EBADRPC); 2299 } 2300 2301 nd->nd_flag &= ~ND_KERBAUTH; 2302 /* 2303 * Handle auth_unix or auth_kerb. 2304 */ 2305 if (auth_type == rpc_auth_unix) { 2306 len = fxdr_unsigned(int, *++tl); 2307 if (len < 0 || len > NFS_MAXNAMLEN) { 2308 m_freem(info.mrep); 2309 return (EBADRPC); 2310 } 2311 ERROROUT(nfsm_adv(&info, nfsm_rndup(len))); 2312 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED)); 2313 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred)); 2314 nd->nd_cr.cr_ref = 1; 2315 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 2316 nd->nd_cr.cr_ruid = nd->nd_cr.cr_svuid = nd->nd_cr.cr_uid; 2317 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++); 2318 nd->nd_cr.cr_rgid = nd->nd_cr.cr_svgid = nd->nd_cr.cr_gid; 2319 len = fxdr_unsigned(int, *tl); 2320 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 2321 m_freem(info.mrep); 2322 return (EBADRPC); 2323 } 2324 NULLOUT(tl = nfsm_dissect(&info, (len + 2) * NFSX_UNSIGNED)); 2325 for (i = 1; i <= len; i++) 2326 if (i < NGROUPS) 2327 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++); 2328 else 2329 tl++; 2330 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); 2331 if (nd->nd_cr.cr_ngroups > 1) 2332 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups); 2333 len = fxdr_unsigned(int, *++tl); 2334 if (len < 0 || len > RPCAUTH_MAXSIZ) { 2335 m_freem(info.mrep); 2336 return (EBADRPC); 2337 } 2338 if (len > 0) { 2339 ERROROUT(nfsm_adv(&info, nfsm_rndup(len))); 2340 } 2341 } else if (auth_type == rpc_auth_kerb) { 2342 switch (fxdr_unsigned(int, *tl++)) { 2343 case RPCAKN_FULLNAME: 2344 ticklen = fxdr_unsigned(int, *tl); 2345 *((u_int32_t *)nfsd->nfsd_authstr) = *tl; 2346 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED; 2347 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED; 2348 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) { 2349 m_freem(info.mrep); 2350 return (EBADRPC); 2351 } 2352 uio.uio_offset = 0; 2353 uio.uio_iov = &iov; 2354 uio.uio_iovcnt = 1; 2355 uio.uio_segflg = UIO_SYSSPACE; 2356 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4]; 2357 iov.iov_len = RPCAUTH_MAXSIZ - 4; 2358 ERROROUT(nfsm_mtouio(&info, &uio, uio.uio_resid)); 2359 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED)); 2360 if (*tl++ != rpc_auth_kerb || 2361 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) { 2362 kprintf("Bad kerb verifier\n"); 2363 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2364 nd->nd_procnum = NFSPROC_NOOP; 2365 return (0); 2366 } 2367 NULLOUT(cp = nfsm_dissect(&info, 4 * NFSX_UNSIGNED)); 2368 tl = (u_int32_t *)cp; 2369 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) { 2370 kprintf("Not fullname kerb verifier\n"); 2371 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2372 nd->nd_procnum = NFSPROC_NOOP; 2373 return (0); 2374 } 2375 cp += NFSX_UNSIGNED; 2376 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED); 2377 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED; 2378 nd->nd_flag |= ND_KERBFULL; 2379 nfsd->nfsd_flag |= NFSD_NEEDAUTH; 2380 break; 2381 case RPCAKN_NICKNAME: 2382 if (len != 2 * NFSX_UNSIGNED) { 2383 kprintf("Kerb nickname short\n"); 2384 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED); 2385 nd->nd_procnum = NFSPROC_NOOP; 2386 return (0); 2387 } 2388 nickuid = fxdr_unsigned(uid_t, *tl); 2389 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED)); 2390 if (*tl++ != rpc_auth_kerb || 2391 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) { 2392 kprintf("Kerb nick verifier bad\n"); 2393 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2394 nd->nd_procnum = NFSPROC_NOOP; 2395 return (0); 2396 } 2397 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED)); 2398 tvin.tv_sec = *tl++; 2399 tvin.tv_usec = *tl; 2400 2401 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first; 2402 nuidp != 0; nuidp = nuidp->nu_hash.le_next) { 2403 if (nuidp->nu_cr.cr_uid == nickuid && 2404 (!nd->nd_nam2 || 2405 netaddr_match(NU_NETFAM(nuidp), 2406 &nuidp->nu_haddr, nd->nd_nam2))) 2407 break; 2408 } 2409 if (!nuidp) { 2410 nd->nd_repstat = 2411 (NFSERR_AUTHERR|AUTH_REJECTCRED); 2412 nd->nd_procnum = NFSPROC_NOOP; 2413 return (0); 2414 } 2415 2416 /* 2417 * Now, decrypt the timestamp using the session key 2418 * and validate it. 2419 */ 2420 #ifdef NFSKERB 2421 XXX 2422 #else 2423 tvout.tv_sec = 0; 2424 tvout.tv_usec = 0; 2425 #endif 2426 2427 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec); 2428 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec); 2429 if (nuidp->nu_expire < time_second || 2430 nuidp->nu_timestamp.tv_sec > tvout.tv_sec || 2431 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec && 2432 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) { 2433 nuidp->nu_expire = 0; 2434 nd->nd_repstat = 2435 (NFSERR_AUTHERR|AUTH_REJECTVERF); 2436 nd->nd_procnum = NFSPROC_NOOP; 2437 return (0); 2438 } 2439 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr); 2440 nd->nd_flag |= ND_KERBNICK; 2441 }; 2442 } else { 2443 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED); 2444 nd->nd_procnum = NFSPROC_NOOP; 2445 return (0); 2446 } 2447 2448 nd->nd_md = info.md; 2449 nd->nd_dpos = info.dpos; 2450 return (0); 2451 nfsmout: 2452 return (error); 2453 } 2454 2455 #endif 2456 2457 /* 2458 * Send a message to the originating process's terminal. The thread and/or 2459 * process may be NULL. YYY the thread should not be NULL but there may 2460 * still be some uio_td's that are still being passed as NULL through to 2461 * nfsm_request(). 2462 */ 2463 static int 2464 nfs_msg(struct thread *td, char *server, char *msg) 2465 { 2466 tpr_t tpr; 2467 2468 if (td && td->td_proc) 2469 tpr = tprintf_open(td->td_proc); 2470 else 2471 tpr = NULL; 2472 tprintf(tpr, "nfs server %s: %s\n", server, msg); 2473 tprintf_close(tpr); 2474 return (0); 2475 } 2476 2477 #ifndef NFS_NOSERVER 2478 2479 void 2480 nfsrv_rcv_upcall(struct socket *so, void *arg, int waitflag) 2481 { 2482 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 2483 2484 lwkt_gettoken(&slp->ns_token); 2485 nfsrv_rcv(so, arg, waitflag); 2486 lwkt_reltoken(&slp->ns_token); 2487 } 2488 2489 /* 2490 * Socket upcall routine for the nfsd sockets. 2491 * The caddr_t arg is a pointer to the "struct nfssvc_sock". 2492 * Essentially do as much as possible non-blocking, else punt and it will 2493 * be called with MB_WAIT from an nfsd. 2494 * 2495 * slp->ns_token is held on call 2496 */ 2497 void 2498 nfsrv_rcv(struct socket *so, void *arg, int waitflag) 2499 { 2500 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 2501 struct mbuf *m; 2502 struct sockaddr *nam; 2503 struct sockbuf sio; 2504 int flags, error; 2505 int nparallel_wakeup = 0; 2506 2507 ASSERT_LWKT_TOKEN_HELD(&slp->ns_token); 2508 2509 if ((slp->ns_flag & SLP_VALID) == 0) 2510 return; 2511 2512 /* 2513 * Do not allow an infinite number of completed RPC records to build 2514 * up before we stop reading data from the socket. Otherwise we could 2515 * end up holding onto an unreasonable number of mbufs for requests 2516 * waiting for service. 2517 * 2518 * This should give pretty good feedback to the TCP 2519 * layer and prevents a memory crunch for other protocols. 2520 * 2521 * Note that the same service socket can be dispatched to several 2522 * nfs servers simultaniously. 2523 * 2524 * the tcp protocol callback calls us with MB_DONTWAIT. 2525 * nfsd calls us with MB_WAIT (typically). 2526 */ 2527 if (waitflag == MB_DONTWAIT && slp->ns_numrec >= nfsd_waiting / 2 + 1) { 2528 slp->ns_flag |= SLP_NEEDQ; 2529 goto dorecs; 2530 } 2531 2532 /* 2533 * Handle protocol specifics to parse an RPC request. We always 2534 * pull from the socket using non-blocking I/O. 2535 */ 2536 if (so->so_type == SOCK_STREAM) { 2537 /* 2538 * The data has to be read in an orderly fashion from a TCP 2539 * stream, unlike a UDP socket. It is possible for soreceive 2540 * and/or nfsrv_getstream() to block, so make sure only one 2541 * entity is messing around with the TCP stream at any given 2542 * moment. The receive sockbuf's lock in soreceive is not 2543 * sufficient. 2544 * 2545 * Note that this procedure can be called from any number of 2546 * NFS severs *OR* can be upcalled directly from a TCP 2547 * protocol thread without the lock. 2548 */ 2549 if (slp->ns_flag & SLP_GETSTREAM) { 2550 slp->ns_flag |= SLP_NEEDQ; 2551 goto dorecs; 2552 } 2553 slp->ns_flag |= SLP_GETSTREAM; 2554 2555 /* 2556 * Do soreceive(). Pull out as much data as possible without 2557 * blocking. 2558 */ 2559 sbinit(&sio, 1000000000); 2560 flags = MSG_DONTWAIT; 2561 error = so_pru_soreceive(so, &nam, NULL, &sio, NULL, &flags); 2562 if (error || sio.sb_mb == NULL) { 2563 if (error == EWOULDBLOCK) 2564 slp->ns_flag |= SLP_NEEDQ; 2565 else 2566 slp->ns_flag |= SLP_DISCONN; 2567 slp->ns_flag &= ~SLP_GETSTREAM; 2568 goto dorecs; 2569 } 2570 m = sio.sb_mb; 2571 if (slp->ns_rawend) { 2572 slp->ns_rawend->m_next = m; 2573 slp->ns_cc += sio.sb_cc; 2574 } else { 2575 slp->ns_raw = m; 2576 slp->ns_cc = sio.sb_cc; 2577 } 2578 while (m->m_next) 2579 m = m->m_next; 2580 slp->ns_rawend = m; 2581 2582 /* 2583 * Now try and parse as many record(s) as we can out of the 2584 * raw stream data. 2585 */ 2586 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup); 2587 if (error) { 2588 if (error == EPERM) 2589 slp->ns_flag |= SLP_DISCONN; 2590 else 2591 slp->ns_flag |= SLP_NEEDQ; 2592 } 2593 slp->ns_flag &= ~SLP_GETSTREAM; 2594 } else { 2595 /* 2596 * For UDP soreceive typically pulls just one packet, loop 2597 * to get the whole batch. 2598 */ 2599 do { 2600 sbinit(&sio, 1000000000); 2601 flags = MSG_DONTWAIT; 2602 error = so_pru_soreceive(so, &nam, NULL, &sio, 2603 NULL, &flags); 2604 if (sio.sb_mb) { 2605 struct nfsrv_rec *rec; 2606 int mf = (waitflag & MB_DONTWAIT) ? 2607 M_NOWAIT : M_WAITOK; 2608 rec = kmalloc(sizeof(struct nfsrv_rec), 2609 M_NFSRVDESC, mf); 2610 if (!rec) { 2611 if (nam) 2612 FREE(nam, M_SONAME); 2613 m_freem(sio.sb_mb); 2614 continue; 2615 } 2616 nfs_realign(&sio.sb_mb, 10 * NFSX_UNSIGNED); 2617 rec->nr_address = nam; 2618 rec->nr_packet = sio.sb_mb; 2619 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2620 ++slp->ns_numrec; 2621 ++nparallel_wakeup; 2622 } 2623 if (error) { 2624 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) 2625 && error != EWOULDBLOCK) { 2626 slp->ns_flag |= SLP_DISCONN; 2627 goto dorecs; 2628 } 2629 } 2630 } while (sio.sb_mb); 2631 } 2632 2633 /* 2634 * If we were upcalled from the tcp protocol layer and we have 2635 * fully parsed records ready to go, or there is new data pending, 2636 * or something went wrong, try to wake up an nfsd thread to deal 2637 * with it. 2638 */ 2639 dorecs: 2640 if (waitflag == MB_DONTWAIT && (slp->ns_numrec > 0 2641 || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) { 2642 nfsrv_wakenfsd(slp, nparallel_wakeup); 2643 } 2644 } 2645 2646 /* 2647 * Try and extract an RPC request from the mbuf data list received on a 2648 * stream socket. The "waitflag" argument indicates whether or not it 2649 * can sleep. 2650 */ 2651 static int 2652 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp) 2653 { 2654 struct mbuf *m, **mpp; 2655 char *cp1, *cp2; 2656 int len; 2657 struct mbuf *om, *m2, *recm; 2658 u_int32_t recmark; 2659 2660 for (;;) { 2661 if (slp->ns_reclen == 0) { 2662 if (slp->ns_cc < NFSX_UNSIGNED) 2663 return (0); 2664 m = slp->ns_raw; 2665 if (m->m_len >= NFSX_UNSIGNED) { 2666 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED); 2667 m->m_data += NFSX_UNSIGNED; 2668 m->m_len -= NFSX_UNSIGNED; 2669 } else { 2670 cp1 = (caddr_t)&recmark; 2671 cp2 = mtod(m, caddr_t); 2672 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { 2673 while (m->m_len == 0) { 2674 m = m->m_next; 2675 cp2 = mtod(m, caddr_t); 2676 } 2677 *cp1++ = *cp2++; 2678 m->m_data++; 2679 m->m_len--; 2680 } 2681 } 2682 slp->ns_cc -= NFSX_UNSIGNED; 2683 recmark = ntohl(recmark); 2684 slp->ns_reclen = recmark & ~0x80000000; 2685 if (recmark & 0x80000000) 2686 slp->ns_flag |= SLP_LASTFRAG; 2687 else 2688 slp->ns_flag &= ~SLP_LASTFRAG; 2689 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) { 2690 log(LOG_ERR, "%s (%d) from nfs client\n", 2691 "impossible packet length", 2692 slp->ns_reclen); 2693 return (EPERM); 2694 } 2695 } 2696 2697 /* 2698 * Now get the record part. 2699 * 2700 * Note that slp->ns_reclen may be 0. Linux sometimes 2701 * generates 0-length RPCs 2702 */ 2703 recm = NULL; 2704 if (slp->ns_cc == slp->ns_reclen) { 2705 recm = slp->ns_raw; 2706 slp->ns_raw = slp->ns_rawend = NULL; 2707 slp->ns_cc = slp->ns_reclen = 0; 2708 } else if (slp->ns_cc > slp->ns_reclen) { 2709 len = 0; 2710 m = slp->ns_raw; 2711 om = NULL; 2712 2713 while (len < slp->ns_reclen) { 2714 if ((len + m->m_len) > slp->ns_reclen) { 2715 m2 = m_copym(m, 0, slp->ns_reclen - len, 2716 waitflag); 2717 if (m2) { 2718 if (om) { 2719 om->m_next = m2; 2720 recm = slp->ns_raw; 2721 } else 2722 recm = m2; 2723 m->m_data += slp->ns_reclen - len; 2724 m->m_len -= slp->ns_reclen - len; 2725 len = slp->ns_reclen; 2726 } else { 2727 return (EWOULDBLOCK); 2728 } 2729 } else if ((len + m->m_len) == slp->ns_reclen) { 2730 om = m; 2731 len += m->m_len; 2732 m = m->m_next; 2733 recm = slp->ns_raw; 2734 om->m_next = NULL; 2735 } else { 2736 om = m; 2737 len += m->m_len; 2738 m = m->m_next; 2739 } 2740 } 2741 slp->ns_raw = m; 2742 slp->ns_cc -= len; 2743 slp->ns_reclen = 0; 2744 } else { 2745 return (0); 2746 } 2747 2748 /* 2749 * Accumulate the fragments into a record. 2750 */ 2751 mpp = &slp->ns_frag; 2752 while (*mpp) 2753 mpp = &((*mpp)->m_next); 2754 *mpp = recm; 2755 if (slp->ns_flag & SLP_LASTFRAG) { 2756 struct nfsrv_rec *rec; 2757 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK; 2758 rec = kmalloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf); 2759 if (!rec) { 2760 m_freem(slp->ns_frag); 2761 } else { 2762 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED); 2763 rec->nr_address = NULL; 2764 rec->nr_packet = slp->ns_frag; 2765 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2766 ++slp->ns_numrec; 2767 ++*countp; 2768 } 2769 slp->ns_frag = NULL; 2770 } 2771 } 2772 } 2773 2774 #ifdef INVARIANTS 2775 2776 /* 2777 * Sanity check our mbuf chain. 2778 */ 2779 static void 2780 nfs_checkpkt(struct mbuf *m, int len) 2781 { 2782 int xlen = 0; 2783 while (m) { 2784 xlen += m->m_len; 2785 m = m->m_next; 2786 } 2787 if (xlen != len) { 2788 panic("nfs_checkpkt: len mismatch %d/%d mbuf %p\n", 2789 xlen, len, m); 2790 } 2791 } 2792 2793 #else 2794 2795 static void 2796 nfs_checkpkt(struct mbuf *m __unused, int len __unused) 2797 { 2798 } 2799 2800 #endif 2801 2802 /* 2803 * Parse an RPC header. 2804 */ 2805 int 2806 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd, 2807 struct nfsrv_descript **ndp) 2808 { 2809 struct nfsrv_rec *rec; 2810 struct mbuf *m; 2811 struct sockaddr *nam; 2812 struct nfsrv_descript *nd; 2813 int error; 2814 2815 *ndp = NULL; 2816 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec)) 2817 return (ENOBUFS); 2818 rec = STAILQ_FIRST(&slp->ns_rec); 2819 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link); 2820 KKASSERT(slp->ns_numrec > 0); 2821 --slp->ns_numrec; 2822 nam = rec->nr_address; 2823 m = rec->nr_packet; 2824 kfree(rec, M_NFSRVDESC); 2825 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript), 2826 M_NFSRVDESC, M_WAITOK); 2827 nd->nd_md = nd->nd_mrep = m; 2828 nd->nd_nam2 = nam; 2829 nd->nd_dpos = mtod(m, caddr_t); 2830 error = nfs_getreq(nd, nfsd, TRUE); 2831 if (error) { 2832 if (nam) { 2833 FREE(nam, M_SONAME); 2834 } 2835 kfree((caddr_t)nd, M_NFSRVDESC); 2836 return (error); 2837 } 2838 *ndp = nd; 2839 nfsd->nfsd_nd = nd; 2840 return (0); 2841 } 2842 2843 /* 2844 * Try to assign service sockets to nfsd threads based on the number 2845 * of new rpc requests that have been queued on the service socket. 2846 * 2847 * If no nfsd's are available or additonal requests are pending, set the 2848 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for 2849 * the work in the nfssvc_sock list when it is finished processing its 2850 * current work. This flag is only cleared when an nfsd can not find 2851 * any new work to perform. 2852 */ 2853 void 2854 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel) 2855 { 2856 struct nfsd *nd; 2857 2858 if ((slp->ns_flag & SLP_VALID) == 0) 2859 return; 2860 if (nparallel <= 1) 2861 nparallel = 1; 2862 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) { 2863 if (nd->nfsd_flag & NFSD_WAITING) { 2864 nd->nfsd_flag &= ~NFSD_WAITING; 2865 if (nd->nfsd_slp) 2866 panic("nfsd wakeup"); 2867 slp->ns_sref++; 2868 nd->nfsd_slp = slp; 2869 wakeup((caddr_t)nd); 2870 if (--nparallel == 0) 2871 break; 2872 } 2873 } 2874 if (nparallel) { 2875 slp->ns_flag |= SLP_DOREC; 2876 nfsd_head_flag |= NFSD_CHECKSLP; 2877 } 2878 } 2879 #endif /* NFS_NOSERVER */ 2880