1 /* 2 * Copyright (c) 1989, 1991, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95 37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $ 38 * $DragonFly: src/sys/vfs/nfs/nfs_socket.c,v 1.45 2007/05/18 17:05:13 dillon Exp $ 39 */ 40 41 /* 42 * Socket operations for use by nfs 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/proc.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/kernel.h> 51 #include <sys/mbuf.h> 52 #include <sys/vnode.h> 53 #include <sys/fcntl.h> 54 #include <sys/protosw.h> 55 #include <sys/resourcevar.h> 56 #include <sys/socket.h> 57 #include <sys/socketvar.h> 58 #include <sys/socketops.h> 59 #include <sys/syslog.h> 60 #include <sys/thread.h> 61 #include <sys/tprintf.h> 62 #include <sys/sysctl.h> 63 #include <sys/signalvar.h> 64 #include <sys/mutex.h> 65 66 #include <sys/signal2.h> 67 #include <sys/mutex2.h> 68 69 #include <netinet/in.h> 70 #include <netinet/tcp.h> 71 #include <sys/thread2.h> 72 73 #include "rpcv2.h" 74 #include "nfsproto.h" 75 #include "nfs.h" 76 #include "xdr_subs.h" 77 #include "nfsm_subs.h" 78 #include "nfsmount.h" 79 #include "nfsnode.h" 80 #include "nfsrtt.h" 81 82 #define TRUE 1 83 #define FALSE 0 84 85 /* 86 * RTT calculations are scaled by 256 (8 bits). A proper fractional 87 * RTT will still be calculated even with a slow NFS timer. 88 */ 89 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum]] 90 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum]] 91 #define NFS_RTT_SCALE_BITS 8 /* bits */ 92 #define NFS_RTT_SCALE 256 /* value */ 93 94 /* 95 * Defines which timer to use for the procnum. 96 * 0 - default 97 * 1 - getattr 98 * 2 - lookup 99 * 3 - read 100 * 4 - write 101 */ 102 static int proct[NFS_NPROCS] = { 103 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, /* 00-09 */ 104 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, /* 10-19 */ 105 0, 5, 0, 0, 0, 0, /* 20-29 */ 106 }; 107 108 static int multt[NFS_NPROCS] = { 109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00-09 */ 110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 10-19 */ 111 1, 2, 1, 1, 1, 1, /* 20-29 */ 112 }; 113 114 static int nfs_backoff[8] = { 2, 3, 5, 8, 13, 21, 34, 55 }; 115 static int nfs_realign_test; 116 static int nfs_realign_count; 117 static int nfs_showrtt; 118 static int nfs_showrexmit; 119 int nfs_maxasyncbio = NFS_MAXASYNCBIO; 120 121 SYSCTL_DECL(_vfs_nfs); 122 123 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, ""); 124 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, ""); 125 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrtt, CTLFLAG_RW, &nfs_showrtt, 0, ""); 126 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrexmit, CTLFLAG_RW, &nfs_showrexmit, 0, ""); 127 SYSCTL_INT(_vfs_nfs, OID_AUTO, maxasyncbio, CTLFLAG_RW, &nfs_maxasyncbio, 0, ""); 128 129 static int nfs_request_setup(nfsm_info_t info); 130 static int nfs_request_auth(struct nfsreq *rep); 131 static int nfs_request_try(struct nfsreq *rep); 132 static int nfs_request_waitreply(struct nfsreq *rep); 133 static int nfs_request_processreply(nfsm_info_t info, int); 134 135 int nfsrtton = 0; 136 struct nfsrtt nfsrtt; 137 struct callout nfs_timer_handle; 138 139 static int nfs_msg (struct thread *,char *,char *); 140 static int nfs_rcvlock (struct nfsmount *nmp, struct nfsreq *myreq); 141 static void nfs_rcvunlock (struct nfsmount *nmp); 142 static void nfs_realign (struct mbuf **pm, int hsiz); 143 static int nfs_receive (struct nfsmount *nmp, struct nfsreq *rep, 144 struct sockaddr **aname, struct mbuf **mp); 145 static void nfs_softterm (struct nfsreq *rep, int islocked); 146 static void nfs_hardterm (struct nfsreq *rep, int islocked); 147 static int nfs_reconnect (struct nfsmount *nmp, struct nfsreq *rep); 148 #ifndef NFS_NOSERVER 149 static int nfsrv_getstream (struct nfssvc_sock *, int, int *); 150 static void nfs_timer_req(struct nfsreq *req); 151 static void nfs_checkpkt(struct mbuf *m, int len); 152 153 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd, 154 struct nfssvc_sock *slp, 155 struct thread *td, 156 struct mbuf **mreqp) = { 157 nfsrv_null, 158 nfsrv_getattr, 159 nfsrv_setattr, 160 nfsrv_lookup, 161 nfsrv3_access, 162 nfsrv_readlink, 163 nfsrv_read, 164 nfsrv_write, 165 nfsrv_create, 166 nfsrv_mkdir, 167 nfsrv_symlink, 168 nfsrv_mknod, 169 nfsrv_remove, 170 nfsrv_rmdir, 171 nfsrv_rename, 172 nfsrv_link, 173 nfsrv_readdir, 174 nfsrv_readdirplus, 175 nfsrv_statfs, 176 nfsrv_fsinfo, 177 nfsrv_pathconf, 178 nfsrv_commit, 179 nfsrv_noop, 180 nfsrv_noop, 181 nfsrv_noop, 182 nfsrv_noop 183 }; 184 #endif /* NFS_NOSERVER */ 185 186 /* 187 * Initialize sockets and congestion for a new NFS connection. 188 * We do not free the sockaddr if error. 189 */ 190 int 191 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep) 192 { 193 struct socket *so; 194 int error; 195 struct sockaddr *saddr; 196 struct sockaddr_in *sin; 197 struct thread *td = &thread0; /* only used for socreate and sobind */ 198 199 nmp->nm_so = so = NULL; 200 if (nmp->nm_flag & NFSMNT_FORCE) 201 return (EINVAL); 202 saddr = nmp->nm_nam; 203 error = socreate(saddr->sa_family, &so, nmp->nm_sotype, 204 nmp->nm_soproto, td); 205 if (error) 206 goto bad; 207 nmp->nm_soflags = so->so_proto->pr_flags; 208 209 /* 210 * Some servers require that the client port be a reserved port number. 211 */ 212 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 213 struct sockopt sopt; 214 int ip; 215 struct sockaddr_in ssin; 216 217 bzero(&sopt, sizeof sopt); 218 ip = IP_PORTRANGE_LOW; 219 sopt.sopt_level = IPPROTO_IP; 220 sopt.sopt_name = IP_PORTRANGE; 221 sopt.sopt_val = (void *)&ip; 222 sopt.sopt_valsize = sizeof(ip); 223 sopt.sopt_td = NULL; 224 error = sosetopt(so, &sopt); 225 if (error) 226 goto bad; 227 bzero(&ssin, sizeof ssin); 228 sin = &ssin; 229 sin->sin_len = sizeof (struct sockaddr_in); 230 sin->sin_family = AF_INET; 231 sin->sin_addr.s_addr = INADDR_ANY; 232 sin->sin_port = htons(0); 233 error = sobind(so, (struct sockaddr *)sin, td); 234 if (error) 235 goto bad; 236 bzero(&sopt, sizeof sopt); 237 ip = IP_PORTRANGE_DEFAULT; 238 sopt.sopt_level = IPPROTO_IP; 239 sopt.sopt_name = IP_PORTRANGE; 240 sopt.sopt_val = (void *)&ip; 241 sopt.sopt_valsize = sizeof(ip); 242 sopt.sopt_td = NULL; 243 error = sosetopt(so, &sopt); 244 if (error) 245 goto bad; 246 } 247 248 /* 249 * Protocols that do not require connections may be optionally left 250 * unconnected for servers that reply from a port other than NFS_PORT. 251 */ 252 if (nmp->nm_flag & NFSMNT_NOCONN) { 253 if (nmp->nm_soflags & PR_CONNREQUIRED) { 254 error = ENOTCONN; 255 goto bad; 256 } 257 } else { 258 error = soconnect(so, nmp->nm_nam, td); 259 if (error) 260 goto bad; 261 262 /* 263 * Wait for the connection to complete. Cribbed from the 264 * connect system call but with the wait timing out so 265 * that interruptible mounts don't hang here for a long time. 266 */ 267 crit_enter(); 268 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 269 (void) tsleep((caddr_t)&so->so_timeo, 0, 270 "nfscon", 2 * hz); 271 if ((so->so_state & SS_ISCONNECTING) && 272 so->so_error == 0 && rep && 273 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){ 274 so->so_state &= ~SS_ISCONNECTING; 275 crit_exit(); 276 goto bad; 277 } 278 } 279 if (so->so_error) { 280 error = so->so_error; 281 so->so_error = 0; 282 crit_exit(); 283 goto bad; 284 } 285 crit_exit(); 286 } 287 so->so_rcv.ssb_timeo = (5 * hz); 288 so->so_snd.ssb_timeo = (5 * hz); 289 290 /* 291 * Get buffer reservation size from sysctl, but impose reasonable 292 * limits. 293 */ 294 if (nmp->nm_sotype == SOCK_STREAM) { 295 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 296 struct sockopt sopt; 297 int val; 298 299 bzero(&sopt, sizeof sopt); 300 sopt.sopt_level = SOL_SOCKET; 301 sopt.sopt_name = SO_KEEPALIVE; 302 sopt.sopt_val = &val; 303 sopt.sopt_valsize = sizeof val; 304 val = 1; 305 sosetopt(so, &sopt); 306 } 307 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 308 struct sockopt sopt; 309 int val; 310 311 bzero(&sopt, sizeof sopt); 312 sopt.sopt_level = IPPROTO_TCP; 313 sopt.sopt_name = TCP_NODELAY; 314 sopt.sopt_val = &val; 315 sopt.sopt_valsize = sizeof val; 316 val = 1; 317 sosetopt(so, &sopt); 318 } 319 } 320 error = soreserve(so, nfs_soreserve, nfs_soreserve, NULL); 321 if (error) 322 goto bad; 323 atomic_set_int(&so->so_rcv.ssb_flags, SSB_NOINTR); 324 atomic_set_int(&so->so_snd.ssb_flags, SSB_NOINTR); 325 326 /* Initialize other non-zero congestion variables */ 327 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = 328 nmp->nm_srtt[3] = (NFS_TIMEO << NFS_RTT_SCALE_BITS); 329 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 330 nmp->nm_sdrtt[3] = 0; 331 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED; 332 nmp->nm_timeouts = 0; 333 334 /* 335 * Assign nm_so last. The moment nm_so is assigned the nfs_timer() 336 * can mess with the socket. 337 */ 338 nmp->nm_so = so; 339 return (0); 340 341 bad: 342 if (so) { 343 soshutdown(so, SHUT_RDWR); 344 soclose(so, FNONBLOCK); 345 } 346 return (error); 347 } 348 349 /* 350 * Reconnect routine: 351 * Called when a connection is broken on a reliable protocol. 352 * - clean up the old socket 353 * - nfs_connect() again 354 * - set R_NEEDSXMIT for all outstanding requests on mount point 355 * If this fails the mount point is DEAD! 356 * nb: Must be called with the nfs_sndlock() set on the mount point. 357 */ 358 static int 359 nfs_reconnect(struct nfsmount *nmp, struct nfsreq *rep) 360 { 361 struct nfsreq *req; 362 int error; 363 364 nfs_disconnect(nmp); 365 if (nmp->nm_rxstate >= NFSSVC_STOPPING) 366 return (EINTR); 367 while ((error = nfs_connect(nmp, rep)) != 0) { 368 if (error == EINTR || error == ERESTART) 369 return (EINTR); 370 if (error == EINVAL) 371 return (error); 372 if (nmp->nm_rxstate >= NFSSVC_STOPPING) 373 return (EINTR); 374 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0); 375 } 376 377 /* 378 * Loop through outstanding request list and fix up all requests 379 * on old socket. 380 */ 381 crit_enter(); 382 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 383 KKASSERT(req->r_nmp == nmp); 384 req->r_flags |= R_NEEDSXMIT; 385 } 386 crit_exit(); 387 return (0); 388 } 389 390 /* 391 * NFS disconnect. Clean up and unlink. 392 */ 393 void 394 nfs_disconnect(struct nfsmount *nmp) 395 { 396 struct socket *so; 397 398 if (nmp->nm_so) { 399 so = nmp->nm_so; 400 nmp->nm_so = NULL; 401 soshutdown(so, SHUT_RDWR); 402 soclose(so, FNONBLOCK); 403 } 404 } 405 406 void 407 nfs_safedisconnect(struct nfsmount *nmp) 408 { 409 nfs_rcvlock(nmp, NULL); 410 nfs_disconnect(nmp); 411 nfs_rcvunlock(nmp); 412 } 413 414 /* 415 * This is the nfs send routine. For connection based socket types, it 416 * must be called with an nfs_sndlock() on the socket. 417 * "rep == NULL" indicates that it has been called from a server. 418 * For the client side: 419 * - return EINTR if the RPC is terminated, 0 otherwise 420 * - set R_NEEDSXMIT if the send fails for any reason 421 * - do any cleanup required by recoverable socket errors (?) 422 * For the server side: 423 * - return EINTR or ERESTART if interrupted by a signal 424 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 425 * - do any cleanup required by recoverable socket errors (?) 426 */ 427 int 428 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top, 429 struct nfsreq *rep) 430 { 431 struct sockaddr *sendnam; 432 int error, soflags, flags; 433 434 if (rep) { 435 if (rep->r_flags & R_SOFTTERM) { 436 m_freem(top); 437 return (EINTR); 438 } 439 if ((so = rep->r_nmp->nm_so) == NULL) { 440 rep->r_flags |= R_NEEDSXMIT; 441 m_freem(top); 442 return (0); 443 } 444 rep->r_flags &= ~R_NEEDSXMIT; 445 soflags = rep->r_nmp->nm_soflags; 446 } else { 447 soflags = so->so_proto->pr_flags; 448 } 449 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 450 sendnam = NULL; 451 else 452 sendnam = nam; 453 if (so->so_type == SOCK_SEQPACKET) 454 flags = MSG_EOR; 455 else 456 flags = 0; 457 458 /* 459 * calls pru_sosend -> sosend -> so_pru_send -> netrpc 460 */ 461 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags, 462 curthread /*XXX*/); 463 /* 464 * ENOBUFS for dgram sockets is transient and non fatal. 465 * No need to log, and no need to break a soft mount. 466 */ 467 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) { 468 error = 0; 469 /* 470 * do backoff retransmit on client 471 */ 472 if (rep) { 473 if ((rep->r_nmp->nm_state & NFSSTA_SENDSPACE) == 0) { 474 rep->r_nmp->nm_state |= NFSSTA_SENDSPACE; 475 kprintf("Warning: NFS: Insufficient sendspace " 476 "(%lu),\n" 477 "\t You must increase vfs.nfs.soreserve" 478 "or decrease vfs.nfs.maxasyncbio\n", 479 so->so_snd.ssb_hiwat); 480 } 481 rep->r_flags |= R_NEEDSXMIT; 482 } 483 } 484 485 if (error) { 486 if (rep) { 487 log(LOG_INFO, "nfs send error %d for server %s\n",error, 488 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 489 /* 490 * Deal with errors for the client side. 491 */ 492 if (rep->r_flags & R_SOFTTERM) 493 error = EINTR; 494 else 495 rep->r_flags |= R_NEEDSXMIT; 496 } else { 497 log(LOG_INFO, "nfsd send error %d\n", error); 498 } 499 500 /* 501 * Handle any recoverable (soft) socket errors here. (?) 502 */ 503 if (error != EINTR && error != ERESTART && 504 error != EWOULDBLOCK && error != EPIPE) 505 error = 0; 506 } 507 return (error); 508 } 509 510 /* 511 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all 512 * done by soreceive(), but for SOCK_STREAM we must deal with the Record 513 * Mark and consolidate the data into a new mbuf list. 514 * nb: Sometimes TCP passes the data up to soreceive() in long lists of 515 * small mbufs. 516 * For SOCK_STREAM we must be very careful to read an entire record once 517 * we have read any of it, even if the system call has been interrupted. 518 */ 519 static int 520 nfs_receive(struct nfsmount *nmp, struct nfsreq *rep, 521 struct sockaddr **aname, struct mbuf **mp) 522 { 523 struct socket *so; 524 struct sockbuf sio; 525 struct uio auio; 526 struct iovec aio; 527 struct mbuf *m; 528 struct mbuf *control; 529 u_int32_t len; 530 struct sockaddr **getnam; 531 int error, sotype, rcvflg; 532 struct thread *td = curthread; /* XXX */ 533 534 /* 535 * Set up arguments for soreceive() 536 */ 537 *mp = NULL; 538 *aname = NULL; 539 sotype = nmp->nm_sotype; 540 541 /* 542 * For reliable protocols, lock against other senders/receivers 543 * in case a reconnect is necessary. 544 * For SOCK_STREAM, first get the Record Mark to find out how much 545 * more there is to get. 546 * We must lock the socket against other receivers 547 * until we have an entire rpc request/reply. 548 */ 549 if (sotype != SOCK_DGRAM) { 550 error = nfs_sndlock(nmp, rep); 551 if (error) 552 return (error); 553 tryagain: 554 /* 555 * Check for fatal errors and resending request. 556 */ 557 /* 558 * Ugh: If a reconnect attempt just happened, nm_so 559 * would have changed. NULL indicates a failed 560 * attempt that has essentially shut down this 561 * mount point. 562 */ 563 if (rep && (rep->r_mrep || (rep->r_flags & R_SOFTTERM))) { 564 nfs_sndunlock(nmp); 565 return (EINTR); 566 } 567 so = nmp->nm_so; 568 if (so == NULL) { 569 error = nfs_reconnect(nmp, rep); 570 if (error) { 571 nfs_sndunlock(nmp); 572 return (error); 573 } 574 goto tryagain; 575 } 576 while (rep && (rep->r_flags & R_NEEDSXMIT)) { 577 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT); 578 nfsstats.rpcretries++; 579 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep); 580 if (error) { 581 if (error == EINTR || error == ERESTART || 582 (error = nfs_reconnect(nmp, rep)) != 0) { 583 nfs_sndunlock(nmp); 584 return (error); 585 } 586 goto tryagain; 587 } 588 } 589 nfs_sndunlock(nmp); 590 if (sotype == SOCK_STREAM) { 591 /* 592 * Get the length marker from the stream 593 */ 594 aio.iov_base = (caddr_t)&len; 595 aio.iov_len = sizeof(u_int32_t); 596 auio.uio_iov = &aio; 597 auio.uio_iovcnt = 1; 598 auio.uio_segflg = UIO_SYSSPACE; 599 auio.uio_rw = UIO_READ; 600 auio.uio_offset = 0; 601 auio.uio_resid = sizeof(u_int32_t); 602 auio.uio_td = td; 603 do { 604 rcvflg = MSG_WAITALL; 605 error = so_pru_soreceive(so, NULL, &auio, NULL, 606 NULL, &rcvflg); 607 if (error == EWOULDBLOCK && rep) { 608 if (rep->r_flags & R_SOFTTERM) 609 return (EINTR); 610 } 611 } while (error == EWOULDBLOCK); 612 613 if (error == 0 && auio.uio_resid > 0) { 614 /* 615 * Only log short packets if not EOF 616 */ 617 if (auio.uio_resid != sizeof(u_int32_t)) 618 log(LOG_INFO, 619 "short receive (%d/%d) from nfs server %s\n", 620 (int)(sizeof(u_int32_t) - auio.uio_resid), 621 (int)sizeof(u_int32_t), 622 nmp->nm_mountp->mnt_stat.f_mntfromname); 623 error = EPIPE; 624 } 625 if (error) 626 goto errout; 627 len = ntohl(len) & ~0x80000000; 628 /* 629 * This is SERIOUS! We are out of sync with the sender 630 * and forcing a disconnect/reconnect is all I can do. 631 */ 632 if (len > NFS_MAXPACKET) { 633 log(LOG_ERR, "%s (%d) from nfs server %s\n", 634 "impossible packet length", 635 len, 636 nmp->nm_mountp->mnt_stat.f_mntfromname); 637 error = EFBIG; 638 goto errout; 639 } 640 641 /* 642 * Get the rest of the packet as an mbuf chain 643 */ 644 sbinit(&sio, len); 645 do { 646 rcvflg = MSG_WAITALL; 647 error = so_pru_soreceive(so, NULL, NULL, &sio, 648 NULL, &rcvflg); 649 } while (error == EWOULDBLOCK || error == EINTR || 650 error == ERESTART); 651 if (error == 0 && sio.sb_cc != len) { 652 if (sio.sb_cc != 0) 653 log(LOG_INFO, 654 "short receive (%zu/%d) from nfs server %s\n", 655 (size_t)len - auio.uio_resid, len, 656 nmp->nm_mountp->mnt_stat.f_mntfromname); 657 error = EPIPE; 658 } 659 *mp = sio.sb_mb; 660 } else { 661 /* 662 * Non-stream, so get the whole packet by not 663 * specifying MSG_WAITALL and by specifying a large 664 * length. 665 * 666 * We have no use for control msg., but must grab them 667 * and then throw them away so we know what is going 668 * on. 669 */ 670 sbinit(&sio, 100000000); 671 do { 672 rcvflg = 0; 673 error = so_pru_soreceive(so, NULL, NULL, &sio, 674 &control, &rcvflg); 675 if (control) 676 m_freem(control); 677 if (error == EWOULDBLOCK && rep) { 678 if (rep->r_flags & R_SOFTTERM) { 679 m_freem(sio.sb_mb); 680 return (EINTR); 681 } 682 } 683 } while (error == EWOULDBLOCK || 684 (error == 0 && sio.sb_mb == NULL && control)); 685 if ((rcvflg & MSG_EOR) == 0) 686 kprintf("Egad!!\n"); 687 if (error == 0 && sio.sb_mb == NULL) 688 error = EPIPE; 689 len = sio.sb_cc; 690 *mp = sio.sb_mb; 691 } 692 errout: 693 if (error && error != EINTR && error != ERESTART) { 694 m_freem(*mp); 695 *mp = NULL; 696 if (error != EPIPE) { 697 log(LOG_INFO, 698 "receive error %d from nfs server %s\n", 699 error, 700 nmp->nm_mountp->mnt_stat.f_mntfromname); 701 } 702 error = nfs_sndlock(nmp, rep); 703 if (!error) { 704 error = nfs_reconnect(nmp, rep); 705 if (!error) 706 goto tryagain; 707 else 708 nfs_sndunlock(nmp); 709 } 710 } 711 } else { 712 if ((so = nmp->nm_so) == NULL) 713 return (EACCES); 714 if (so->so_state & SS_ISCONNECTED) 715 getnam = NULL; 716 else 717 getnam = aname; 718 sbinit(&sio, 100000000); 719 do { 720 rcvflg = 0; 721 error = so_pru_soreceive(so, getnam, NULL, &sio, 722 NULL, &rcvflg); 723 if (error == EWOULDBLOCK && rep && 724 (rep->r_flags & R_SOFTTERM)) { 725 m_freem(sio.sb_mb); 726 return (EINTR); 727 } 728 } while (error == EWOULDBLOCK); 729 730 len = sio.sb_cc; 731 *mp = sio.sb_mb; 732 733 /* 734 * A shutdown may result in no error and no mbuf. 735 * Convert to EPIPE. 736 */ 737 if (*mp == NULL && error == 0) 738 error = EPIPE; 739 } 740 if (error) { 741 m_freem(*mp); 742 *mp = NULL; 743 } 744 745 /* 746 * Search for any mbufs that are not a multiple of 4 bytes long 747 * or with m_data not longword aligned. 748 * These could cause pointer alignment problems, so copy them to 749 * well aligned mbufs. 750 */ 751 nfs_realign(mp, 5 * NFSX_UNSIGNED); 752 return (error); 753 } 754 755 /* 756 * Implement receipt of reply on a socket. 757 * 758 * We must search through the list of received datagrams matching them 759 * with outstanding requests using the xid, until ours is found. 760 * 761 * If myrep is NULL we process packets on the socket until 762 * interrupted or until nm_reqrxq is non-empty. 763 */ 764 /* ARGSUSED */ 765 int 766 nfs_reply(struct nfsmount *nmp, struct nfsreq *myrep) 767 { 768 struct nfsreq *rep; 769 struct sockaddr *nam; 770 u_int32_t rxid; 771 u_int32_t *tl; 772 int error; 773 struct nfsm_info info; 774 775 /* 776 * Loop around until we get our own reply 777 */ 778 for (;;) { 779 /* 780 * Lock against other receivers so that I don't get stuck in 781 * sbwait() after someone else has received my reply for me. 782 * Also necessary for connection based protocols to avoid 783 * race conditions during a reconnect. 784 * 785 * If nfs_rcvlock() returns EALREADY, that means that 786 * the reply has already been recieved by another 787 * process and we can return immediately. In this 788 * case, the lock is not taken to avoid races with 789 * other processes. 790 */ 791 info.mrep = NULL; 792 793 error = nfs_rcvlock(nmp, myrep); 794 if (error == EALREADY) 795 return (0); 796 if (error) 797 return (error); 798 799 /* 800 * If myrep is NULL we are the receiver helper thread. 801 * Stop waiting for incoming replies if there are 802 * messages sitting on reqrxq that we need to process, 803 * or if a shutdown request is pending. 804 */ 805 if (myrep == NULL && (TAILQ_FIRST(&nmp->nm_reqrxq) || 806 nmp->nm_rxstate > NFSSVC_PENDING)) { 807 nfs_rcvunlock(nmp); 808 return(EWOULDBLOCK); 809 } 810 811 /* 812 * Get the next Rpc reply off the socket 813 * 814 * We cannot release the receive lock until we've 815 * filled in rep->r_mrep, otherwise a waiting 816 * thread may deadlock in soreceive with no incoming 817 * packets expected. 818 */ 819 error = nfs_receive(nmp, myrep, &nam, &info.mrep); 820 if (error) { 821 /* 822 * Ignore routing errors on connectionless protocols?? 823 */ 824 nfs_rcvunlock(nmp); 825 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 826 if (nmp->nm_so == NULL) 827 return (error); 828 nmp->nm_so->so_error = 0; 829 continue; 830 } 831 return (error); 832 } 833 if (nam) 834 FREE(nam, M_SONAME); 835 836 /* 837 * Get the xid and check that it is an rpc reply 838 */ 839 info.md = info.mrep; 840 info.dpos = mtod(info.md, caddr_t); 841 NULLOUT(tl = nfsm_dissect(&info, 2*NFSX_UNSIGNED)); 842 rxid = *tl++; 843 if (*tl != rpc_reply) { 844 nfsstats.rpcinvalid++; 845 m_freem(info.mrep); 846 info.mrep = NULL; 847 nfsmout: 848 nfs_rcvunlock(nmp); 849 continue; 850 } 851 852 /* 853 * Loop through the request list to match up the reply 854 * Iff no match, just drop the datagram. On match, set 855 * r_mrep atomically to prevent the timer from messing 856 * around with the request after we have exited the critical 857 * section. 858 */ 859 crit_enter(); 860 TAILQ_FOREACH(rep, &nmp->nm_reqq, r_chain) { 861 if (rep->r_mrep == NULL && rxid == rep->r_xid) 862 break; 863 } 864 865 /* 866 * Fill in the rest of the reply if we found a match. 867 * 868 * Deal with duplicate responses if there was no match. 869 */ 870 if (rep) { 871 rep->r_md = info.md; 872 rep->r_dpos = info.dpos; 873 if (nfsrtton) { 874 struct rttl *rt; 875 876 rt = &nfsrtt.rttl[nfsrtt.pos]; 877 rt->proc = rep->r_procnum; 878 rt->rto = 0; 879 rt->sent = 0; 880 rt->cwnd = nmp->nm_maxasync_scaled; 881 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1]; 882 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1]; 883 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid; 884 getmicrotime(&rt->tstamp); 885 if (rep->r_flags & R_TIMING) 886 rt->rtt = rep->r_rtt; 887 else 888 rt->rtt = 1000000; 889 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ; 890 } 891 892 /* 893 * New congestion control is based only on async 894 * requests. 895 */ 896 if (nmp->nm_maxasync_scaled < NFS_MAXASYNC_SCALED) 897 ++nmp->nm_maxasync_scaled; 898 if (rep->r_flags & R_SENT) { 899 rep->r_flags &= ~R_SENT; 900 } 901 /* 902 * Update rtt using a gain of 0.125 on the mean 903 * and a gain of 0.25 on the deviation. 904 * 905 * NOTE SRTT/SDRTT are only good if R_TIMING is set. 906 */ 907 if ((rep->r_flags & R_TIMING) && rep->r_rexmit == 0) { 908 /* 909 * Since the timer resolution of 910 * NFS_HZ is so course, it can often 911 * result in r_rtt == 0. Since 912 * r_rtt == N means that the actual 913 * rtt is between N+dt and N+2-dt ticks, 914 * add 1. 915 */ 916 int n; 917 int d; 918 919 #define NFSRSB NFS_RTT_SCALE_BITS 920 n = ((NFS_SRTT(rep) * 7) + 921 (rep->r_rtt << NFSRSB)) >> 3; 922 d = n - NFS_SRTT(rep); 923 NFS_SRTT(rep) = n; 924 925 /* 926 * Don't let the jitter calculation decay 927 * too quickly, but we want a fast rampup. 928 */ 929 if (d < 0) 930 d = -d; 931 d <<= NFSRSB; 932 if (d < NFS_SDRTT(rep)) 933 n = ((NFS_SDRTT(rep) * 15) + d) >> 4; 934 else 935 n = ((NFS_SDRTT(rep) * 3) + d) >> 2; 936 NFS_SDRTT(rep) = n; 937 #undef NFSRSB 938 } 939 nmp->nm_timeouts = 0; 940 rep->r_mrep = info.mrep; 941 nfs_hardterm(rep, 0); 942 } else { 943 /* 944 * Extract vers, prog, nfsver, procnum. A duplicate 945 * response means we didn't wait long enough so 946 * we increase the SRTT to avoid future spurious 947 * timeouts. 948 */ 949 u_int procnum = nmp->nm_lastreprocnum; 950 int n; 951 952 if (procnum < NFS_NPROCS && proct[procnum]) { 953 if (nfs_showrexmit) 954 kprintf("D"); 955 n = nmp->nm_srtt[proct[procnum]]; 956 n += NFS_ASYSCALE * NFS_HZ; 957 if (n < NFS_ASYSCALE * NFS_HZ * 10) 958 n = NFS_ASYSCALE * NFS_HZ * 10; 959 nmp->nm_srtt[proct[procnum]] = n; 960 } 961 } 962 nfs_rcvunlock(nmp); 963 crit_exit(); 964 965 /* 966 * If not matched to a request, drop it. 967 * If it's mine, get out. 968 */ 969 if (rep == NULL) { 970 nfsstats.rpcunexpected++; 971 m_freem(info.mrep); 972 info.mrep = NULL; 973 } else if (rep == myrep) { 974 if (rep->r_mrep == NULL) 975 panic("nfsreply nil"); 976 return (0); 977 } 978 } 979 } 980 981 /* 982 * Run the request state machine until the target state is reached 983 * or a fatal error occurs. The target state is not run. Specifying 984 * a target of NFSM_STATE_DONE runs the state machine until the rpc 985 * is complete. 986 * 987 * EINPROGRESS is returned for all states other then the DONE state, 988 * indicating that the rpc is still in progress. 989 */ 990 int 991 nfs_request(struct nfsm_info *info, nfsm_state_t bstate, nfsm_state_t estate) 992 { 993 struct nfsreq *req; 994 995 while (info->state >= bstate && info->state < estate) { 996 switch(info->state) { 997 case NFSM_STATE_SETUP: 998 /* 999 * Setup the nfsreq. Any error which occurs during 1000 * this state is fatal. 1001 */ 1002 info->error = nfs_request_setup(info); 1003 if (info->error) { 1004 info->state = NFSM_STATE_DONE; 1005 return (info->error); 1006 } else { 1007 req = info->req; 1008 req->r_mrp = &info->mrep; 1009 req->r_mdp = &info->md; 1010 req->r_dposp = &info->dpos; 1011 info->state = NFSM_STATE_AUTH; 1012 } 1013 break; 1014 case NFSM_STATE_AUTH: 1015 /* 1016 * Authenticate the nfsreq. Any error which occurs 1017 * during this state is fatal. 1018 */ 1019 info->error = nfs_request_auth(info->req); 1020 if (info->error) { 1021 info->state = NFSM_STATE_DONE; 1022 return (info->error); 1023 } else { 1024 info->state = NFSM_STATE_TRY; 1025 } 1026 break; 1027 case NFSM_STATE_TRY: 1028 /* 1029 * Transmit or retransmit attempt. An error in this 1030 * state is ignored and we always move on to the 1031 * next state. 1032 * 1033 * This can trivially race the receiver if the 1034 * request is asynchronous. nfs_request_try() 1035 * will thus set the state for us and we 1036 * must also return immediately if we are 1037 * running an async state machine, because 1038 * info can become invalid due to races after 1039 * try() returns. 1040 */ 1041 if (info->req->r_flags & R_ASYNC) { 1042 nfs_request_try(info->req); 1043 if (estate == NFSM_STATE_WAITREPLY) 1044 return (EINPROGRESS); 1045 } else { 1046 nfs_request_try(info->req); 1047 info->state = NFSM_STATE_WAITREPLY; 1048 } 1049 break; 1050 case NFSM_STATE_WAITREPLY: 1051 /* 1052 * Wait for a reply or timeout and move on to the 1053 * next state. The error returned by this state 1054 * is passed to the processing code in the next 1055 * state. 1056 */ 1057 info->error = nfs_request_waitreply(info->req); 1058 info->state = NFSM_STATE_PROCESSREPLY; 1059 break; 1060 case NFSM_STATE_PROCESSREPLY: 1061 /* 1062 * Process the reply or timeout. Errors which occur 1063 * in this state may cause the state machine to 1064 * go back to an earlier state, and are fatal 1065 * otherwise. 1066 */ 1067 info->error = nfs_request_processreply(info, 1068 info->error); 1069 switch(info->error) { 1070 case ENEEDAUTH: 1071 info->state = NFSM_STATE_AUTH; 1072 break; 1073 case EAGAIN: 1074 info->state = NFSM_STATE_TRY; 1075 break; 1076 default: 1077 /* 1078 * Operation complete, with or without an 1079 * error. We are done. 1080 */ 1081 info->req = NULL; 1082 info->state = NFSM_STATE_DONE; 1083 return (info->error); 1084 } 1085 break; 1086 case NFSM_STATE_DONE: 1087 /* 1088 * Shouldn't be reached 1089 */ 1090 return (info->error); 1091 /* NOT REACHED */ 1092 } 1093 } 1094 1095 /* 1096 * If we are done return the error code (if any). 1097 * Otherwise return EINPROGRESS. 1098 */ 1099 if (info->state == NFSM_STATE_DONE) 1100 return (info->error); 1101 return (EINPROGRESS); 1102 } 1103 1104 /* 1105 * nfs_request - goes something like this 1106 * - fill in request struct 1107 * - links it into list 1108 * - calls nfs_send() for first transmit 1109 * - calls nfs_receive() to get reply 1110 * - break down rpc header and return with nfs reply pointed to 1111 * by mrep or error 1112 * nb: always frees up mreq mbuf list 1113 */ 1114 static int 1115 nfs_request_setup(nfsm_info_t info) 1116 { 1117 struct nfsreq *req; 1118 struct nfsmount *nmp; 1119 struct mbuf *m; 1120 int i; 1121 1122 /* 1123 * Reject requests while attempting a forced unmount. 1124 */ 1125 if (info->vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) { 1126 m_freem(info->mreq); 1127 info->mreq = NULL; 1128 return (ESTALE); 1129 } 1130 nmp = VFSTONFS(info->vp->v_mount); 1131 req = kmalloc(sizeof(struct nfsreq), M_NFSREQ, M_WAITOK); 1132 req->r_nmp = nmp; 1133 req->r_vp = info->vp; 1134 req->r_td = info->td; 1135 req->r_procnum = info->procnum; 1136 req->r_mreq = NULL; 1137 req->r_cred = info->cred; 1138 1139 i = 0; 1140 m = info->mreq; 1141 while (m) { 1142 i += m->m_len; 1143 m = m->m_next; 1144 } 1145 req->r_mrest = info->mreq; 1146 req->r_mrest_len = i; 1147 1148 /* 1149 * The presence of a non-NULL r_info in req indicates 1150 * async completion via our helper threads. See the receiver 1151 * code. 1152 */ 1153 if (info->bio) { 1154 req->r_info = info; 1155 req->r_flags = R_ASYNC; 1156 } else { 1157 req->r_info = NULL; 1158 req->r_flags = 0; 1159 } 1160 info->req = req; 1161 return(0); 1162 } 1163 1164 static int 1165 nfs_request_auth(struct nfsreq *rep) 1166 { 1167 struct nfsmount *nmp = rep->r_nmp; 1168 struct mbuf *m; 1169 char nickv[RPCX_NICKVERF]; 1170 int error = 0, auth_len, auth_type; 1171 int verf_len; 1172 u_int32_t xid; 1173 char *auth_str, *verf_str; 1174 struct ucred *cred; 1175 1176 cred = rep->r_cred; 1177 rep->r_failed_auth = 0; 1178 1179 /* 1180 * Get the RPC header with authorization. 1181 */ 1182 verf_str = auth_str = NULL; 1183 if (nmp->nm_flag & NFSMNT_KERB) { 1184 verf_str = nickv; 1185 verf_len = sizeof (nickv); 1186 auth_type = RPCAUTH_KERB4; 1187 bzero((caddr_t)rep->r_key, sizeof(rep->r_key)); 1188 if (rep->r_failed_auth || 1189 nfs_getnickauth(nmp, cred, &auth_str, &auth_len, 1190 verf_str, verf_len)) { 1191 error = nfs_getauth(nmp, rep, cred, &auth_str, 1192 &auth_len, verf_str, &verf_len, rep->r_key); 1193 if (error) { 1194 m_freem(rep->r_mrest); 1195 rep->r_mrest = NULL; 1196 kfree((caddr_t)rep, M_NFSREQ); 1197 return (error); 1198 } 1199 } 1200 } else { 1201 auth_type = RPCAUTH_UNIX; 1202 if (cred->cr_ngroups < 1) 1203 panic("nfsreq nogrps"); 1204 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ? 1205 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) + 1206 5 * NFSX_UNSIGNED; 1207 } 1208 if (rep->r_mrest) 1209 nfs_checkpkt(rep->r_mrest, rep->r_mrest_len); 1210 m = nfsm_rpchead(cred, nmp->nm_flag, rep->r_procnum, auth_type, 1211 auth_len, auth_str, verf_len, verf_str, 1212 rep->r_mrest, rep->r_mrest_len, &rep->r_mheadend, &xid); 1213 rep->r_mrest = NULL; 1214 if (auth_str) 1215 kfree(auth_str, M_TEMP); 1216 1217 /* 1218 * For stream protocols, insert a Sun RPC Record Mark. 1219 */ 1220 if (nmp->nm_sotype == SOCK_STREAM) { 1221 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT); 1222 if (m == NULL) { 1223 kfree(rep, M_NFSREQ); 1224 return (ENOBUFS); 1225 } 1226 *mtod(m, u_int32_t *) = htonl(0x80000000 | 1227 (m->m_pkthdr.len - NFSX_UNSIGNED)); 1228 } 1229 1230 nfs_checkpkt(m, m->m_pkthdr.len); 1231 1232 rep->r_mreq = m; 1233 rep->r_xid = xid; 1234 return (0); 1235 } 1236 1237 static int 1238 nfs_request_try(struct nfsreq *rep) 1239 { 1240 struct nfsmount *nmp = rep->r_nmp; 1241 struct mbuf *m2; 1242 int error; 1243 1244 /* 1245 * Request is not on any queue, only the owner has access to it 1246 * so it should not be locked by anyone atm. 1247 * 1248 * Interlock to prevent races. While locked the only remote 1249 * action possible is for r_mrep to be set (once we enqueue it). 1250 */ 1251 if (rep->r_flags == 0xdeadc0de) { 1252 print_backtrace(-1); 1253 panic("flags nbad\n"); 1254 } 1255 KKASSERT((rep->r_flags & (R_LOCKED | R_ONREQQ)) == 0); 1256 if (nmp->nm_flag & NFSMNT_SOFT) 1257 rep->r_retry = nmp->nm_retry; 1258 else 1259 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ 1260 rep->r_rtt = rep->r_rexmit = 0; 1261 if (proct[rep->r_procnum] > 0) 1262 rep->r_flags |= R_TIMING | R_LOCKED; 1263 else 1264 rep->r_flags |= R_LOCKED; 1265 rep->r_mrep = NULL; 1266 1267 /* 1268 * Do the client side RPC. 1269 */ 1270 nfsstats.rpcrequests++; 1271 1272 if (nmp->nm_flag & NFSMNT_FORCE) { 1273 rep->r_flags |= R_SOFTTERM; 1274 rep->r_flags &= ~R_LOCKED; 1275 return (0); 1276 } 1277 1278 /* 1279 * Chain request into list of outstanding requests. Be sure 1280 * to put it LAST so timer finds oldest requests first. Note 1281 * that our control of R_LOCKED prevents the request from 1282 * getting ripped out from under us or transmitted by the 1283 * timer code. 1284 * 1285 * For requests with info structures we must atomically set the 1286 * info's state because the structure could become invalid upon 1287 * return due to races (i.e., if async) 1288 */ 1289 crit_enter(); 1290 mtx_link_init(&rep->r_link); 1291 TAILQ_INSERT_TAIL(&nmp->nm_reqq, rep, r_chain); 1292 rep->r_flags |= R_ONREQQ; 1293 ++nmp->nm_reqqlen; 1294 if (rep->r_flags & R_ASYNC) 1295 rep->r_info->state = NFSM_STATE_WAITREPLY; 1296 crit_exit(); 1297 1298 error = 0; 1299 1300 /* 1301 * Send if we can. Congestion control is not handled here any more 1302 * becausing trying to defer the initial send based on the nfs_timer 1303 * requires having a very fast nfs_timer, which is silly. 1304 */ 1305 if (nmp->nm_so) { 1306 if (nmp->nm_soflags & PR_CONNREQUIRED) 1307 error = nfs_sndlock(nmp, rep); 1308 if (error == 0) { 1309 m2 = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT); 1310 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep); 1311 if (nmp->nm_soflags & PR_CONNREQUIRED) 1312 nfs_sndunlock(nmp); 1313 rep->r_flags &= ~R_NEEDSXMIT; 1314 if ((rep->r_flags & R_SENT) == 0) { 1315 rep->r_flags |= R_SENT; 1316 } 1317 } else { 1318 rep->r_flags |= R_NEEDSXMIT; 1319 } 1320 } else { 1321 rep->r_flags |= R_NEEDSXMIT; 1322 rep->r_rtt = -1; 1323 } 1324 if (error == EPIPE) 1325 error = 0; 1326 1327 /* 1328 * Release the lock. The only remote action that may have occurred 1329 * would have been the setting of rep->r_mrep. If this occured 1330 * and the request was async we have to move it to the reader 1331 * thread's queue for action. 1332 * 1333 * For async requests also make sure the reader is woken up so 1334 * it gets on the socket to read responses. 1335 */ 1336 crit_enter(); 1337 if (rep->r_flags & R_ASYNC) { 1338 if (rep->r_mrep) 1339 nfs_hardterm(rep, 1); 1340 rep->r_flags &= ~R_LOCKED; 1341 nfssvc_iod_reader_wakeup(nmp); 1342 } else { 1343 rep->r_flags &= ~R_LOCKED; 1344 } 1345 if (rep->r_flags & R_WANTED) { 1346 rep->r_flags &= ~R_WANTED; 1347 wakeup(rep); 1348 } 1349 crit_exit(); 1350 return (error); 1351 } 1352 1353 /* 1354 * This code is only called for synchronous requests. Completed synchronous 1355 * requests are left on reqq and we remove them before moving on to the 1356 * processing state. 1357 */ 1358 static int 1359 nfs_request_waitreply(struct nfsreq *rep) 1360 { 1361 struct nfsmount *nmp = rep->r_nmp; 1362 int error; 1363 1364 KKASSERT((rep->r_flags & R_ASYNC) == 0); 1365 1366 /* 1367 * Wait until the request is finished. 1368 */ 1369 error = nfs_reply(nmp, rep); 1370 1371 /* 1372 * RPC done, unlink the request, but don't rip it out from under 1373 * the callout timer. 1374 * 1375 * Once unlinked no other receiver or the timer will have 1376 * visibility, so we do not have to set R_LOCKED. 1377 */ 1378 crit_enter(); 1379 while (rep->r_flags & R_LOCKED) { 1380 rep->r_flags |= R_WANTED; 1381 tsleep(rep, 0, "nfstrac", 0); 1382 } 1383 KKASSERT(rep->r_flags & R_ONREQQ); 1384 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain); 1385 rep->r_flags &= ~R_ONREQQ; 1386 --nmp->nm_reqqlen; 1387 if (TAILQ_FIRST(&nmp->nm_bioq) && 1388 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) { 1389 nfssvc_iod_writer_wakeup(nmp); 1390 } 1391 crit_exit(); 1392 1393 /* 1394 * Decrement the outstanding request count. 1395 */ 1396 if (rep->r_flags & R_SENT) { 1397 rep->r_flags &= ~R_SENT; 1398 } 1399 return (error); 1400 } 1401 1402 /* 1403 * Process reply with error returned from nfs_requet_waitreply(). 1404 * 1405 * Returns EAGAIN if it wants us to loop up to nfs_request_try() again. 1406 * Returns ENEEDAUTH if it wants us to loop up to nfs_request_auth() again. 1407 */ 1408 static int 1409 nfs_request_processreply(nfsm_info_t info, int error) 1410 { 1411 struct nfsreq *req = info->req; 1412 struct nfsmount *nmp = req->r_nmp; 1413 u_int32_t *tl; 1414 int verf_type; 1415 int i; 1416 1417 /* 1418 * If there was a successful reply and a tprintf msg. 1419 * tprintf a response. 1420 */ 1421 if (error == 0 && (req->r_flags & R_TPRINTFMSG)) { 1422 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname, 1423 "is alive again"); 1424 } 1425 info->mrep = req->r_mrep; 1426 info->md = req->r_md; 1427 info->dpos = req->r_dpos; 1428 if (error) { 1429 m_freem(req->r_mreq); 1430 req->r_mreq = NULL; 1431 kfree(req, M_NFSREQ); 1432 info->req = NULL; 1433 return (error); 1434 } 1435 1436 /* 1437 * break down the rpc header and check if ok 1438 */ 1439 NULLOUT(tl = nfsm_dissect(info, 3 * NFSX_UNSIGNED)); 1440 if (*tl++ == rpc_msgdenied) { 1441 if (*tl == rpc_mismatch) { 1442 error = EOPNOTSUPP; 1443 } else if ((nmp->nm_flag & NFSMNT_KERB) && 1444 *tl++ == rpc_autherr) { 1445 if (req->r_failed_auth == 0) { 1446 req->r_failed_auth++; 1447 req->r_mheadend->m_next = NULL; 1448 m_freem(info->mrep); 1449 info->mrep = NULL; 1450 m_freem(req->r_mreq); 1451 req->r_mreq = NULL; 1452 return (ENEEDAUTH); 1453 } else { 1454 error = EAUTH; 1455 } 1456 } else { 1457 error = EACCES; 1458 } 1459 m_freem(info->mrep); 1460 info->mrep = NULL; 1461 m_freem(req->r_mreq); 1462 req->r_mreq = NULL; 1463 kfree(req, M_NFSREQ); 1464 info->req = NULL; 1465 return (error); 1466 } 1467 1468 /* 1469 * Grab any Kerberos verifier, otherwise just throw it away. 1470 */ 1471 verf_type = fxdr_unsigned(int, *tl++); 1472 i = fxdr_unsigned(int32_t, *tl); 1473 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) { 1474 error = nfs_savenickauth(nmp, req->r_cred, i, req->r_key, 1475 &info->md, &info->dpos, info->mrep); 1476 if (error) 1477 goto nfsmout; 1478 } else if (i > 0) { 1479 ERROROUT(nfsm_adv(info, nfsm_rndup(i))); 1480 } 1481 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED)); 1482 /* 0 == ok */ 1483 if (*tl == 0) { 1484 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED)); 1485 if (*tl != 0) { 1486 error = fxdr_unsigned(int, *tl); 1487 1488 /* 1489 * Does anyone even implement this? Just impose 1490 * a 1-second delay. 1491 */ 1492 if ((nmp->nm_flag & NFSMNT_NFSV3) && 1493 error == NFSERR_TRYLATER) { 1494 m_freem(info->mrep); 1495 info->mrep = NULL; 1496 error = 0; 1497 1498 tsleep((caddr_t)&lbolt, 0, "nqnfstry", 0); 1499 return (EAGAIN); /* goto tryagain */ 1500 } 1501 1502 /* 1503 * If the File Handle was stale, invalidate the 1504 * lookup cache, just in case. 1505 * 1506 * To avoid namecache<->vnode deadlocks we must 1507 * release the vnode lock if we hold it. 1508 */ 1509 if (error == ESTALE) { 1510 struct vnode *vp = req->r_vp; 1511 int ltype; 1512 1513 ltype = lockstatus(&vp->v_lock, curthread); 1514 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED) 1515 lockmgr(&vp->v_lock, LK_RELEASE); 1516 cache_inval_vp(vp, CINV_CHILDREN); 1517 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED) 1518 lockmgr(&vp->v_lock, ltype); 1519 } 1520 if (nmp->nm_flag & NFSMNT_NFSV3) { 1521 KKASSERT(*req->r_mrp == info->mrep); 1522 KKASSERT(*req->r_mdp == info->md); 1523 KKASSERT(*req->r_dposp == info->dpos); 1524 error |= NFSERR_RETERR; 1525 } else { 1526 m_freem(info->mrep); 1527 info->mrep = NULL; 1528 } 1529 m_freem(req->r_mreq); 1530 req->r_mreq = NULL; 1531 kfree(req, M_NFSREQ); 1532 info->req = NULL; 1533 return (error); 1534 } 1535 1536 KKASSERT(*req->r_mrp == info->mrep); 1537 KKASSERT(*req->r_mdp == info->md); 1538 KKASSERT(*req->r_dposp == info->dpos); 1539 m_freem(req->r_mreq); 1540 req->r_mreq = NULL; 1541 FREE(req, M_NFSREQ); 1542 return (0); 1543 } 1544 m_freem(info->mrep); 1545 info->mrep = NULL; 1546 error = EPROTONOSUPPORT; 1547 nfsmout: 1548 m_freem(req->r_mreq); 1549 req->r_mreq = NULL; 1550 kfree(req, M_NFSREQ); 1551 info->req = NULL; 1552 return (error); 1553 } 1554 1555 #ifndef NFS_NOSERVER 1556 /* 1557 * Generate the rpc reply header 1558 * siz arg. is used to decide if adding a cluster is worthwhile 1559 */ 1560 int 1561 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp, 1562 int err, struct mbuf **mrq, struct mbuf **mbp, caddr_t *bposp) 1563 { 1564 u_int32_t *tl; 1565 struct nfsm_info info; 1566 1567 siz += RPC_REPLYSIZ; 1568 info.mb = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL); 1569 info.mreq = info.mb; 1570 info.mreq->m_pkthdr.len = 0; 1571 /* 1572 * If this is not a cluster, try and leave leading space 1573 * for the lower level headers. 1574 */ 1575 if ((max_hdr + siz) < MINCLSIZE) 1576 info.mreq->m_data += max_hdr; 1577 tl = mtod(info.mreq, u_int32_t *); 1578 info.mreq->m_len = 6 * NFSX_UNSIGNED; 1579 info.bpos = ((caddr_t)tl) + info.mreq->m_len; 1580 *tl++ = txdr_unsigned(nd->nd_retxid); 1581 *tl++ = rpc_reply; 1582 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) { 1583 *tl++ = rpc_msgdenied; 1584 if (err & NFSERR_AUTHERR) { 1585 *tl++ = rpc_autherr; 1586 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR); 1587 info.mreq->m_len -= NFSX_UNSIGNED; 1588 info.bpos -= NFSX_UNSIGNED; 1589 } else { 1590 *tl++ = rpc_mismatch; 1591 *tl++ = txdr_unsigned(RPC_VER2); 1592 *tl = txdr_unsigned(RPC_VER2); 1593 } 1594 } else { 1595 *tl++ = rpc_msgaccepted; 1596 1597 /* 1598 * For Kerberos authentication, we must send the nickname 1599 * verifier back, otherwise just RPCAUTH_NULL. 1600 */ 1601 if (nd->nd_flag & ND_KERBFULL) { 1602 struct nfsuid *nuidp; 1603 struct timeval ktvin, ktvout; 1604 1605 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first; 1606 nuidp != 0; nuidp = nuidp->nu_hash.le_next) { 1607 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid && 1608 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp), 1609 &nuidp->nu_haddr, nd->nd_nam2))) 1610 break; 1611 } 1612 if (nuidp) { 1613 ktvin.tv_sec = 1614 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1); 1615 ktvin.tv_usec = 1616 txdr_unsigned(nuidp->nu_timestamp.tv_usec); 1617 1618 /* 1619 * Encrypt the timestamp in ecb mode using the 1620 * session key. 1621 */ 1622 #ifdef NFSKERB 1623 XXX 1624 #else 1625 ktvout.tv_sec = 0; 1626 ktvout.tv_usec = 0; 1627 #endif 1628 1629 *tl++ = rpc_auth_kerb; 1630 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED); 1631 *tl = ktvout.tv_sec; 1632 tl = nfsm_build(&info, 3 * NFSX_UNSIGNED); 1633 *tl++ = ktvout.tv_usec; 1634 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid); 1635 } else { 1636 *tl++ = 0; 1637 *tl++ = 0; 1638 } 1639 } else { 1640 *tl++ = 0; 1641 *tl++ = 0; 1642 } 1643 switch (err) { 1644 case EPROGUNAVAIL: 1645 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 1646 break; 1647 case EPROGMISMATCH: 1648 *tl = txdr_unsigned(RPC_PROGMISMATCH); 1649 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED); 1650 *tl++ = txdr_unsigned(2); 1651 *tl = txdr_unsigned(3); 1652 break; 1653 case EPROCUNAVAIL: 1654 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 1655 break; 1656 case EBADRPC: 1657 *tl = txdr_unsigned(RPC_GARBAGE); 1658 break; 1659 default: 1660 *tl = 0; 1661 if (err != NFSERR_RETVOID) { 1662 tl = nfsm_build(&info, NFSX_UNSIGNED); 1663 if (err) 1664 *tl = txdr_unsigned(nfsrv_errmap(nd, err)); 1665 else 1666 *tl = 0; 1667 } 1668 break; 1669 }; 1670 } 1671 1672 if (mrq != NULL) 1673 *mrq = info.mreq; 1674 *mbp = info.mb; 1675 *bposp = info.bpos; 1676 if (err != 0 && err != NFSERR_RETVOID) 1677 nfsstats.srvrpc_errs++; 1678 return (0); 1679 } 1680 1681 1682 #endif /* NFS_NOSERVER */ 1683 1684 /* 1685 * Nfs timer routine. 1686 * 1687 * Scan the nfsreq list and retranmit any requests that have timed out 1688 * To avoid retransmission attempts on STREAM sockets (in the future) make 1689 * sure to set the r_retry field to 0 (implies nm_retry == 0). 1690 * 1691 * Requests with attached responses, terminated requests, and 1692 * locked requests are ignored. Locked requests will be picked up 1693 * in a later timer call. 1694 */ 1695 void 1696 nfs_timer(void *arg /* never used */) 1697 { 1698 struct nfsmount *nmp; 1699 struct nfsreq *req; 1700 #ifndef NFS_NOSERVER 1701 struct nfssvc_sock *slp; 1702 u_quad_t cur_usec; 1703 #endif /* NFS_NOSERVER */ 1704 1705 crit_enter(); 1706 TAILQ_FOREACH(nmp, &nfs_mountq, nm_entry) { 1707 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1708 KKASSERT(nmp == req->r_nmp); 1709 if (req->r_mrep) 1710 continue; 1711 if (req->r_flags & (R_SOFTTERM | R_LOCKED)) 1712 continue; 1713 req->r_flags |= R_LOCKED; 1714 if (nfs_sigintr(nmp, req, req->r_td)) { 1715 nfs_softterm(req, 1); 1716 } else { 1717 nfs_timer_req(req); 1718 } 1719 req->r_flags &= ~R_LOCKED; 1720 if (req->r_flags & R_WANTED) { 1721 req->r_flags &= ~R_WANTED; 1722 wakeup(req); 1723 } 1724 } 1725 } 1726 #ifndef NFS_NOSERVER 1727 1728 /* 1729 * Scan the write gathering queues for writes that need to be 1730 * completed now. 1731 */ 1732 cur_usec = nfs_curusec(); 1733 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) { 1734 if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec) 1735 nfsrv_wakenfsd(slp, 1); 1736 } 1737 #endif /* NFS_NOSERVER */ 1738 crit_exit(); 1739 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer, NULL); 1740 } 1741 1742 static 1743 void 1744 nfs_timer_req(struct nfsreq *req) 1745 { 1746 struct thread *td = &thread0; /* XXX for creds, will break if sleep */ 1747 struct nfsmount *nmp = req->r_nmp; 1748 struct mbuf *m; 1749 struct socket *so; 1750 int timeo; 1751 int error; 1752 1753 /* 1754 * rtt ticks and timeout calculation. Return if the timeout 1755 * has not been reached yet, unless the packet is flagged 1756 * for an immediate send. 1757 * 1758 * The mean rtt doesn't help when we get random I/Os, we have 1759 * to multiply by fairly large numbers. 1760 */ 1761 if (req->r_rtt >= 0) { 1762 /* 1763 * Calculate the timeout to test against. 1764 */ 1765 req->r_rtt++; 1766 if (nmp->nm_flag & NFSMNT_DUMBTIMR) { 1767 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS; 1768 } else if (req->r_flags & R_TIMING) { 1769 timeo = NFS_SRTT(req) + NFS_SDRTT(req); 1770 } else { 1771 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS; 1772 } 1773 timeo *= multt[req->r_procnum]; 1774 /* timeo is still scaled by SCALE_BITS */ 1775 1776 #define NFSFS (NFS_RTT_SCALE * NFS_HZ) 1777 if (req->r_flags & R_TIMING) { 1778 static long last_time; 1779 if (nfs_showrtt && last_time != time_second) { 1780 kprintf("rpccmd %d NFS SRTT %d SDRTT %d " 1781 "timeo %d.%03d\n", 1782 proct[req->r_procnum], 1783 NFS_SRTT(req), NFS_SDRTT(req), 1784 timeo / NFSFS, 1785 timeo % NFSFS * 1000 / NFSFS); 1786 last_time = time_second; 1787 } 1788 } 1789 #undef NFSFS 1790 1791 /* 1792 * deal with nfs_timer jitter. 1793 */ 1794 timeo = (timeo >> NFS_RTT_SCALE_BITS) + 1; 1795 if (timeo < 2) 1796 timeo = 2; 1797 1798 if (nmp->nm_timeouts > 0) 1799 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 1800 if (timeo > NFS_MAXTIMEO) 1801 timeo = NFS_MAXTIMEO; 1802 if (req->r_rtt <= timeo) { 1803 if ((req->r_flags & R_NEEDSXMIT) == 0) 1804 return; 1805 } else if (nmp->nm_timeouts < 8) { 1806 nmp->nm_timeouts++; 1807 } 1808 } 1809 1810 /* 1811 * Check for server not responding 1812 */ 1813 if ((req->r_flags & R_TPRINTFMSG) == 0 && 1814 req->r_rexmit > nmp->nm_deadthresh) { 1815 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname, 1816 "not responding"); 1817 req->r_flags |= R_TPRINTFMSG; 1818 } 1819 if (req->r_rexmit >= req->r_retry) { /* too many */ 1820 nfsstats.rpctimeouts++; 1821 nfs_softterm(req, 1); 1822 return; 1823 } 1824 1825 /* 1826 * Generally disable retransmission on reliable sockets, 1827 * unless the request is flagged for immediate send. 1828 */ 1829 if (nmp->nm_sotype != SOCK_DGRAM) { 1830 if (++req->r_rexmit > NFS_MAXREXMIT) 1831 req->r_rexmit = NFS_MAXREXMIT; 1832 if ((req->r_flags & R_NEEDSXMIT) == 0) 1833 return; 1834 } 1835 1836 /* 1837 * Stop here if we do not have a socket! 1838 */ 1839 if ((so = nmp->nm_so) == NULL) 1840 return; 1841 1842 /* 1843 * If there is enough space and the window allows.. resend it. 1844 * 1845 * r_rtt is left intact in case we get an answer after the 1846 * retry that was a reply to the original packet. 1847 * 1848 * NOTE: so_pru_send() 1849 */ 1850 if (ssb_space(&so->so_snd) >= req->r_mreq->m_pkthdr.len && 1851 (req->r_flags & (R_SENT | R_NEEDSXMIT)) && 1852 (m = m_copym(req->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){ 1853 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0) 1854 error = so_pru_send(so, 0, m, NULL, NULL, td); 1855 else 1856 error = so_pru_send(so, 0, m, nmp->nm_nam, NULL, td); 1857 if (error) { 1858 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) 1859 so->so_error = 0; 1860 req->r_flags |= R_NEEDSXMIT; 1861 } else if (req->r_mrep == NULL) { 1862 /* 1863 * Iff first send, start timing 1864 * else turn timing off, backoff timer 1865 * and divide congestion window by 2. 1866 * 1867 * It is possible for the so_pru_send() to 1868 * block and for us to race a reply so we 1869 * only do this if the reply field has not 1870 * been filled in. R_LOCKED will prevent 1871 * the request from being ripped out from under 1872 * us entirely. 1873 * 1874 * Record the last resent procnum to aid us 1875 * in duplicate detection on receive. 1876 */ 1877 if ((req->r_flags & R_NEEDSXMIT) == 0) { 1878 if (nfs_showrexmit) 1879 kprintf("X"); 1880 if (++req->r_rexmit > NFS_MAXREXMIT) 1881 req->r_rexmit = NFS_MAXREXMIT; 1882 nmp->nm_maxasync_scaled >>= 1; 1883 if (nmp->nm_maxasync_scaled < NFS_MINASYNC_SCALED) 1884 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED; 1885 nfsstats.rpcretries++; 1886 nmp->nm_lastreprocnum = req->r_procnum; 1887 } else { 1888 req->r_flags |= R_SENT; 1889 req->r_flags &= ~R_NEEDSXMIT; 1890 } 1891 } 1892 } 1893 } 1894 1895 /* 1896 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and 1897 * wait for all requests to complete. This is used by forced unmounts 1898 * to terminate any outstanding RPCs. 1899 * 1900 * Locked requests cannot be canceled but will be marked for 1901 * soft-termination. 1902 */ 1903 int 1904 nfs_nmcancelreqs(struct nfsmount *nmp) 1905 { 1906 struct nfsreq *req; 1907 int i; 1908 1909 crit_enter(); 1910 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1911 if (req->r_mrep != NULL || (req->r_flags & R_SOFTTERM)) 1912 continue; 1913 nfs_softterm(req, 0); 1914 } 1915 /* XXX the other two queues as well */ 1916 crit_exit(); 1917 1918 for (i = 0; i < 30; i++) { 1919 crit_enter(); 1920 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1921 if (nmp == req->r_nmp) 1922 break; 1923 } 1924 crit_exit(); 1925 if (req == NULL) 1926 return (0); 1927 tsleep(&lbolt, 0, "nfscancel", 0); 1928 } 1929 return (EBUSY); 1930 } 1931 1932 /* 1933 * Soft-terminate a request, effectively marking it as failed. 1934 * 1935 * Must be called from within a critical section. 1936 */ 1937 static void 1938 nfs_softterm(struct nfsreq *rep, int islocked) 1939 { 1940 rep->r_flags |= R_SOFTTERM; 1941 nfs_hardterm(rep, islocked); 1942 } 1943 1944 /* 1945 * Hard-terminate a request, typically after getting a response. 1946 * 1947 * The state machine can still decide to re-issue it later if necessary. 1948 * 1949 * Must be called from within a critical section. 1950 */ 1951 static void 1952 nfs_hardterm(struct nfsreq *rep, int islocked) 1953 { 1954 struct nfsmount *nmp = rep->r_nmp; 1955 1956 /* 1957 * The nm_send count is decremented now to avoid deadlocks 1958 * when the process in soreceive() hasn't yet managed to send 1959 * its own request. 1960 */ 1961 if (rep->r_flags & R_SENT) { 1962 rep->r_flags &= ~R_SENT; 1963 } 1964 1965 /* 1966 * If we locked the request or nobody else has locked the request, 1967 * and the request is async, we can move it to the reader thread's 1968 * queue now and fix up the state. 1969 * 1970 * If we locked the request or nobody else has locked the request, 1971 * we can wake up anyone blocked waiting for a response on the 1972 * request. 1973 */ 1974 if (islocked || (rep->r_flags & R_LOCKED) == 0) { 1975 if ((rep->r_flags & (R_ONREQQ | R_ASYNC)) == 1976 (R_ONREQQ | R_ASYNC)) { 1977 rep->r_flags &= ~R_ONREQQ; 1978 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain); 1979 --nmp->nm_reqqlen; 1980 TAILQ_INSERT_TAIL(&nmp->nm_reqrxq, rep, r_chain); 1981 KKASSERT(rep->r_info->state == NFSM_STATE_TRY || 1982 rep->r_info->state == NFSM_STATE_WAITREPLY); 1983 rep->r_info->state = NFSM_STATE_PROCESSREPLY; 1984 nfssvc_iod_reader_wakeup(nmp); 1985 if (TAILQ_FIRST(&nmp->nm_bioq) && 1986 nmp->nm_reqqlen <= nfs_maxasyncbio * 2 / 3) { 1987 nfssvc_iod_writer_wakeup(nmp); 1988 } 1989 } 1990 mtx_abort_ex_link(&nmp->nm_rxlock, &rep->r_link); 1991 } 1992 } 1993 1994 /* 1995 * Test for a termination condition pending on the process. 1996 * This is used for NFSMNT_INT mounts. 1997 */ 1998 int 1999 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td) 2000 { 2001 sigset_t tmpset; 2002 struct proc *p; 2003 struct lwp *lp; 2004 2005 if (rep && (rep->r_flags & R_SOFTTERM)) 2006 return (EINTR); 2007 /* Terminate all requests while attempting a forced unmount. */ 2008 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) 2009 return (EINTR); 2010 if (!(nmp->nm_flag & NFSMNT_INT)) 2011 return (0); 2012 /* td might be NULL YYY */ 2013 if (td == NULL || (p = td->td_proc) == NULL) 2014 return (0); 2015 2016 lp = td->td_lwp; 2017 tmpset = lwp_sigpend(lp); 2018 SIGSETNAND(tmpset, lp->lwp_sigmask); 2019 SIGSETNAND(tmpset, p->p_sigignore); 2020 if (SIGNOTEMPTY(tmpset) && NFSINT_SIGMASK(tmpset)) 2021 return (EINTR); 2022 2023 return (0); 2024 } 2025 2026 /* 2027 * Lock a socket against others. 2028 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply 2029 * and also to avoid race conditions between the processes with nfs requests 2030 * in progress when a reconnect is necessary. 2031 */ 2032 int 2033 nfs_sndlock(struct nfsmount *nmp, struct nfsreq *rep) 2034 { 2035 mtx_t mtx = &nmp->nm_txlock; 2036 struct thread *td; 2037 int slptimeo; 2038 int slpflag; 2039 int error; 2040 2041 slpflag = 0; 2042 slptimeo = 0; 2043 td = rep ? rep->r_td : NULL; 2044 if (nmp->nm_flag & NFSMNT_INT) 2045 slpflag = PCATCH; 2046 2047 while ((error = mtx_lock_ex_try(mtx)) != 0) { 2048 if (nfs_sigintr(nmp, rep, td)) { 2049 error = EINTR; 2050 break; 2051 } 2052 error = mtx_lock_ex(mtx, "nfsndlck", slpflag, slptimeo); 2053 if (error == 0) 2054 break; 2055 if (slpflag == PCATCH) { 2056 slpflag = 0; 2057 slptimeo = 2 * hz; 2058 } 2059 } 2060 /* Always fail if our request has been cancelled. */ 2061 if (rep && (rep->r_flags & R_SOFTTERM)) { 2062 if (error == 0) 2063 mtx_unlock(mtx); 2064 error = EINTR; 2065 } 2066 return (error); 2067 } 2068 2069 /* 2070 * Unlock the stream socket for others. 2071 */ 2072 void 2073 nfs_sndunlock(struct nfsmount *nmp) 2074 { 2075 mtx_unlock(&nmp->nm_txlock); 2076 } 2077 2078 /* 2079 * Lock the receiver side of the socket. 2080 * 2081 * rep may be NULL. 2082 */ 2083 static int 2084 nfs_rcvlock(struct nfsmount *nmp, struct nfsreq *rep) 2085 { 2086 mtx_t mtx = &nmp->nm_rxlock; 2087 int slpflag; 2088 int slptimeo; 2089 int error; 2090 2091 /* 2092 * Unconditionally check for completion in case another nfsiod 2093 * get the packet while the caller was blocked, before the caller 2094 * called us. Packet reception is handled by mainline code which 2095 * is protected by the BGL at the moment. 2096 * 2097 * We do not strictly need the second check just before the 2098 * tsleep(), but it's good defensive programming. 2099 */ 2100 if (rep && rep->r_mrep != NULL) 2101 return (EALREADY); 2102 2103 if (nmp->nm_flag & NFSMNT_INT) 2104 slpflag = PCATCH; 2105 else 2106 slpflag = 0; 2107 slptimeo = 0; 2108 2109 while ((error = mtx_lock_ex_try(mtx)) != 0) { 2110 if (nfs_sigintr(nmp, rep, (rep ? rep->r_td : NULL))) { 2111 error = EINTR; 2112 break; 2113 } 2114 if (rep && rep->r_mrep != NULL) { 2115 error = EALREADY; 2116 break; 2117 } 2118 2119 /* 2120 * NOTE: can return ENOLCK, but in that case rep->r_mrep 2121 * will already be set. 2122 */ 2123 if (rep) { 2124 error = mtx_lock_ex_link(mtx, &rep->r_link, 2125 "nfsrcvlk", 2126 slpflag, slptimeo); 2127 } else { 2128 error = mtx_lock_ex(mtx, "nfsrcvlk", slpflag, slptimeo); 2129 } 2130 if (error == 0) 2131 break; 2132 2133 /* 2134 * If our reply was recieved while we were sleeping, 2135 * then just return without taking the lock to avoid a 2136 * situation where a single iod could 'capture' the 2137 * recieve lock. 2138 */ 2139 if (rep && rep->r_mrep != NULL) { 2140 error = EALREADY; 2141 break; 2142 } 2143 if (slpflag == PCATCH) { 2144 slpflag = 0; 2145 slptimeo = 2 * hz; 2146 } 2147 } 2148 if (error == 0) { 2149 if (rep && rep->r_mrep != NULL) { 2150 error = EALREADY; 2151 mtx_unlock(mtx); 2152 } 2153 } 2154 return (error); 2155 } 2156 2157 /* 2158 * Unlock the stream socket for others. 2159 */ 2160 static void 2161 nfs_rcvunlock(struct nfsmount *nmp) 2162 { 2163 mtx_unlock(&nmp->nm_rxlock); 2164 } 2165 2166 /* 2167 * nfs_realign: 2168 * 2169 * Check for badly aligned mbuf data and realign by copying the unaligned 2170 * portion of the data into a new mbuf chain and freeing the portions 2171 * of the old chain that were replaced. 2172 * 2173 * We cannot simply realign the data within the existing mbuf chain 2174 * because the underlying buffers may contain other rpc commands and 2175 * we cannot afford to overwrite them. 2176 * 2177 * We would prefer to avoid this situation entirely. The situation does 2178 * not occur with NFS/UDP and is supposed to only occassionally occur 2179 * with TCP. Use vfs.nfs.realign_count and realign_test to check this. 2180 * 2181 * NOTE! MB_DONTWAIT cannot be used here. The mbufs must be acquired 2182 * because the rpc request OR reply cannot be thrown away. TCP NFS 2183 * mounts do not retry their RPCs unless the TCP connection itself 2184 * is dropped so throwing away a RPC will basically cause the NFS 2185 * operation to lockup indefinitely. 2186 */ 2187 static void 2188 nfs_realign(struct mbuf **pm, int hsiz) 2189 { 2190 struct mbuf *m; 2191 struct mbuf *n = NULL; 2192 2193 /* 2194 * Check for misalignemnt 2195 */ 2196 ++nfs_realign_test; 2197 while ((m = *pm) != NULL) { 2198 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) 2199 break; 2200 pm = &m->m_next; 2201 } 2202 2203 /* 2204 * If misalignment found make a completely new copy. 2205 */ 2206 if (m) { 2207 ++nfs_realign_count; 2208 n = m_dup_data(m, MB_WAIT); 2209 m_freem(*pm); 2210 *pm = n; 2211 } 2212 } 2213 2214 #ifndef NFS_NOSERVER 2215 2216 /* 2217 * Parse an RPC request 2218 * - verify it 2219 * - fill in the cred struct. 2220 */ 2221 int 2222 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header) 2223 { 2224 int len, i; 2225 u_int32_t *tl; 2226 struct uio uio; 2227 struct iovec iov; 2228 caddr_t cp; 2229 u_int32_t nfsvers, auth_type; 2230 uid_t nickuid; 2231 int error = 0, ticklen; 2232 struct nfsuid *nuidp; 2233 struct timeval tvin, tvout; 2234 struct nfsm_info info; 2235 #if 0 /* until encrypted keys are implemented */ 2236 NFSKERBKEYSCHED_T keys; /* stores key schedule */ 2237 #endif 2238 2239 info.mrep = nd->nd_mrep; 2240 info.md = nd->nd_md; 2241 info.dpos = nd->nd_dpos; 2242 2243 if (has_header) { 2244 NULLOUT(tl = nfsm_dissect(&info, 10 * NFSX_UNSIGNED)); 2245 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++); 2246 if (*tl++ != rpc_call) { 2247 m_freem(info.mrep); 2248 return (EBADRPC); 2249 } 2250 } else { 2251 NULLOUT(tl = nfsm_dissect(&info, 8 * NFSX_UNSIGNED)); 2252 } 2253 nd->nd_repstat = 0; 2254 nd->nd_flag = 0; 2255 if (*tl++ != rpc_vers) { 2256 nd->nd_repstat = ERPCMISMATCH; 2257 nd->nd_procnum = NFSPROC_NOOP; 2258 return (0); 2259 } 2260 if (*tl != nfs_prog) { 2261 nd->nd_repstat = EPROGUNAVAIL; 2262 nd->nd_procnum = NFSPROC_NOOP; 2263 return (0); 2264 } 2265 tl++; 2266 nfsvers = fxdr_unsigned(u_int32_t, *tl++); 2267 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) { 2268 nd->nd_repstat = EPROGMISMATCH; 2269 nd->nd_procnum = NFSPROC_NOOP; 2270 return (0); 2271 } 2272 if (nfsvers == NFS_VER3) 2273 nd->nd_flag = ND_NFSV3; 2274 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++); 2275 if (nd->nd_procnum == NFSPROC_NULL) 2276 return (0); 2277 if (nd->nd_procnum >= NFS_NPROCS || 2278 (nd->nd_procnum >= NQNFSPROC_GETLEASE) || 2279 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) { 2280 nd->nd_repstat = EPROCUNAVAIL; 2281 nd->nd_procnum = NFSPROC_NOOP; 2282 return (0); 2283 } 2284 if ((nd->nd_flag & ND_NFSV3) == 0) 2285 nd->nd_procnum = nfsv3_procid[nd->nd_procnum]; 2286 auth_type = *tl++; 2287 len = fxdr_unsigned(int, *tl++); 2288 if (len < 0 || len > RPCAUTH_MAXSIZ) { 2289 m_freem(info.mrep); 2290 return (EBADRPC); 2291 } 2292 2293 nd->nd_flag &= ~ND_KERBAUTH; 2294 /* 2295 * Handle auth_unix or auth_kerb. 2296 */ 2297 if (auth_type == rpc_auth_unix) { 2298 len = fxdr_unsigned(int, *++tl); 2299 if (len < 0 || len > NFS_MAXNAMLEN) { 2300 m_freem(info.mrep); 2301 return (EBADRPC); 2302 } 2303 ERROROUT(nfsm_adv(&info, nfsm_rndup(len))); 2304 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED)); 2305 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred)); 2306 nd->nd_cr.cr_ref = 1; 2307 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 2308 nd->nd_cr.cr_ruid = nd->nd_cr.cr_svuid = nd->nd_cr.cr_uid; 2309 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++); 2310 nd->nd_cr.cr_rgid = nd->nd_cr.cr_svgid = nd->nd_cr.cr_gid; 2311 len = fxdr_unsigned(int, *tl); 2312 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 2313 m_freem(info.mrep); 2314 return (EBADRPC); 2315 } 2316 NULLOUT(tl = nfsm_dissect(&info, (len + 2) * NFSX_UNSIGNED)); 2317 for (i = 1; i <= len; i++) 2318 if (i < NGROUPS) 2319 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++); 2320 else 2321 tl++; 2322 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); 2323 if (nd->nd_cr.cr_ngroups > 1) 2324 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups); 2325 len = fxdr_unsigned(int, *++tl); 2326 if (len < 0 || len > RPCAUTH_MAXSIZ) { 2327 m_freem(info.mrep); 2328 return (EBADRPC); 2329 } 2330 if (len > 0) { 2331 ERROROUT(nfsm_adv(&info, nfsm_rndup(len))); 2332 } 2333 } else if (auth_type == rpc_auth_kerb) { 2334 switch (fxdr_unsigned(int, *tl++)) { 2335 case RPCAKN_FULLNAME: 2336 ticklen = fxdr_unsigned(int, *tl); 2337 *((u_int32_t *)nfsd->nfsd_authstr) = *tl; 2338 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED; 2339 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED; 2340 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) { 2341 m_freem(info.mrep); 2342 return (EBADRPC); 2343 } 2344 uio.uio_offset = 0; 2345 uio.uio_iov = &iov; 2346 uio.uio_iovcnt = 1; 2347 uio.uio_segflg = UIO_SYSSPACE; 2348 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4]; 2349 iov.iov_len = RPCAUTH_MAXSIZ - 4; 2350 ERROROUT(nfsm_mtouio(&info, &uio, uio.uio_resid)); 2351 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED)); 2352 if (*tl++ != rpc_auth_kerb || 2353 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) { 2354 kprintf("Bad kerb verifier\n"); 2355 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2356 nd->nd_procnum = NFSPROC_NOOP; 2357 return (0); 2358 } 2359 NULLOUT(cp = nfsm_dissect(&info, 4 * NFSX_UNSIGNED)); 2360 tl = (u_int32_t *)cp; 2361 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) { 2362 kprintf("Not fullname kerb verifier\n"); 2363 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2364 nd->nd_procnum = NFSPROC_NOOP; 2365 return (0); 2366 } 2367 cp += NFSX_UNSIGNED; 2368 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED); 2369 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED; 2370 nd->nd_flag |= ND_KERBFULL; 2371 nfsd->nfsd_flag |= NFSD_NEEDAUTH; 2372 break; 2373 case RPCAKN_NICKNAME: 2374 if (len != 2 * NFSX_UNSIGNED) { 2375 kprintf("Kerb nickname short\n"); 2376 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED); 2377 nd->nd_procnum = NFSPROC_NOOP; 2378 return (0); 2379 } 2380 nickuid = fxdr_unsigned(uid_t, *tl); 2381 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED)); 2382 if (*tl++ != rpc_auth_kerb || 2383 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) { 2384 kprintf("Kerb nick verifier bad\n"); 2385 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2386 nd->nd_procnum = NFSPROC_NOOP; 2387 return (0); 2388 } 2389 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED)); 2390 tvin.tv_sec = *tl++; 2391 tvin.tv_usec = *tl; 2392 2393 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first; 2394 nuidp != 0; nuidp = nuidp->nu_hash.le_next) { 2395 if (nuidp->nu_cr.cr_uid == nickuid && 2396 (!nd->nd_nam2 || 2397 netaddr_match(NU_NETFAM(nuidp), 2398 &nuidp->nu_haddr, nd->nd_nam2))) 2399 break; 2400 } 2401 if (!nuidp) { 2402 nd->nd_repstat = 2403 (NFSERR_AUTHERR|AUTH_REJECTCRED); 2404 nd->nd_procnum = NFSPROC_NOOP; 2405 return (0); 2406 } 2407 2408 /* 2409 * Now, decrypt the timestamp using the session key 2410 * and validate it. 2411 */ 2412 #ifdef NFSKERB 2413 XXX 2414 #else 2415 tvout.tv_sec = 0; 2416 tvout.tv_usec = 0; 2417 #endif 2418 2419 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec); 2420 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec); 2421 if (nuidp->nu_expire < time_second || 2422 nuidp->nu_timestamp.tv_sec > tvout.tv_sec || 2423 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec && 2424 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) { 2425 nuidp->nu_expire = 0; 2426 nd->nd_repstat = 2427 (NFSERR_AUTHERR|AUTH_REJECTVERF); 2428 nd->nd_procnum = NFSPROC_NOOP; 2429 return (0); 2430 } 2431 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr); 2432 nd->nd_flag |= ND_KERBNICK; 2433 }; 2434 } else { 2435 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED); 2436 nd->nd_procnum = NFSPROC_NOOP; 2437 return (0); 2438 } 2439 2440 nd->nd_md = info.md; 2441 nd->nd_dpos = info.dpos; 2442 return (0); 2443 nfsmout: 2444 return (error); 2445 } 2446 2447 #endif 2448 2449 /* 2450 * Send a message to the originating process's terminal. The thread and/or 2451 * process may be NULL. YYY the thread should not be NULL but there may 2452 * still be some uio_td's that are still being passed as NULL through to 2453 * nfsm_request(). 2454 */ 2455 static int 2456 nfs_msg(struct thread *td, char *server, char *msg) 2457 { 2458 tpr_t tpr; 2459 2460 if (td && td->td_proc) 2461 tpr = tprintf_open(td->td_proc); 2462 else 2463 tpr = NULL; 2464 tprintf(tpr, "nfs server %s: %s\n", server, msg); 2465 tprintf_close(tpr); 2466 return (0); 2467 } 2468 2469 #ifndef NFS_NOSERVER 2470 /* 2471 * Socket upcall routine for the nfsd sockets. 2472 * The caddr_t arg is a pointer to the "struct nfssvc_sock". 2473 * Essentially do as much as possible non-blocking, else punt and it will 2474 * be called with MB_WAIT from an nfsd. 2475 */ 2476 void 2477 nfsrv_rcv(struct socket *so, void *arg, int waitflag) 2478 { 2479 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 2480 struct mbuf *m; 2481 struct sockaddr *nam; 2482 struct sockbuf sio; 2483 int flags, error; 2484 int nparallel_wakeup = 0; 2485 2486 if ((slp->ns_flag & SLP_VALID) == 0) 2487 return; 2488 2489 /* 2490 * Do not allow an infinite number of completed RPC records to build 2491 * up before we stop reading data from the socket. Otherwise we could 2492 * end up holding onto an unreasonable number of mbufs for requests 2493 * waiting for service. 2494 * 2495 * This should give pretty good feedback to the TCP 2496 * layer and prevents a memory crunch for other protocols. 2497 * 2498 * Note that the same service socket can be dispatched to several 2499 * nfs servers simultaniously. 2500 * 2501 * the tcp protocol callback calls us with MB_DONTWAIT. 2502 * nfsd calls us with MB_WAIT (typically). 2503 */ 2504 if (waitflag == MB_DONTWAIT && slp->ns_numrec >= nfsd_waiting / 2 + 1) { 2505 slp->ns_flag |= SLP_NEEDQ; 2506 goto dorecs; 2507 } 2508 2509 /* 2510 * Handle protocol specifics to parse an RPC request. We always 2511 * pull from the socket using non-blocking I/O. 2512 */ 2513 if (so->so_type == SOCK_STREAM) { 2514 /* 2515 * The data has to be read in an orderly fashion from a TCP 2516 * stream, unlike a UDP socket. It is possible for soreceive 2517 * and/or nfsrv_getstream() to block, so make sure only one 2518 * entity is messing around with the TCP stream at any given 2519 * moment. The receive sockbuf's lock in soreceive is not 2520 * sufficient. 2521 * 2522 * Note that this procedure can be called from any number of 2523 * NFS severs *OR* can be upcalled directly from a TCP 2524 * protocol thread. 2525 */ 2526 if (slp->ns_flag & SLP_GETSTREAM) { 2527 slp->ns_flag |= SLP_NEEDQ; 2528 goto dorecs; 2529 } 2530 slp->ns_flag |= SLP_GETSTREAM; 2531 2532 /* 2533 * Do soreceive(). Pull out as much data as possible without 2534 * blocking. 2535 */ 2536 sbinit(&sio, 1000000000); 2537 flags = MSG_DONTWAIT; 2538 error = so_pru_soreceive(so, &nam, NULL, &sio, NULL, &flags); 2539 if (error || sio.sb_mb == NULL) { 2540 if (error == EWOULDBLOCK) 2541 slp->ns_flag |= SLP_NEEDQ; 2542 else 2543 slp->ns_flag |= SLP_DISCONN; 2544 slp->ns_flag &= ~SLP_GETSTREAM; 2545 goto dorecs; 2546 } 2547 m = sio.sb_mb; 2548 if (slp->ns_rawend) { 2549 slp->ns_rawend->m_next = m; 2550 slp->ns_cc += sio.sb_cc; 2551 } else { 2552 slp->ns_raw = m; 2553 slp->ns_cc = sio.sb_cc; 2554 } 2555 while (m->m_next) 2556 m = m->m_next; 2557 slp->ns_rawend = m; 2558 2559 /* 2560 * Now try and parse as many record(s) as we can out of the 2561 * raw stream data. 2562 */ 2563 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup); 2564 if (error) { 2565 if (error == EPERM) 2566 slp->ns_flag |= SLP_DISCONN; 2567 else 2568 slp->ns_flag |= SLP_NEEDQ; 2569 } 2570 slp->ns_flag &= ~SLP_GETSTREAM; 2571 } else { 2572 /* 2573 * For UDP soreceive typically pulls just one packet, loop 2574 * to get the whole batch. 2575 */ 2576 do { 2577 sbinit(&sio, 1000000000); 2578 flags = MSG_DONTWAIT; 2579 error = so_pru_soreceive(so, &nam, NULL, &sio, 2580 NULL, &flags); 2581 if (sio.sb_mb) { 2582 struct nfsrv_rec *rec; 2583 int mf = (waitflag & MB_DONTWAIT) ? 2584 M_NOWAIT : M_WAITOK; 2585 rec = kmalloc(sizeof(struct nfsrv_rec), 2586 M_NFSRVDESC, mf); 2587 if (!rec) { 2588 if (nam) 2589 FREE(nam, M_SONAME); 2590 m_freem(sio.sb_mb); 2591 continue; 2592 } 2593 nfs_realign(&sio.sb_mb, 10 * NFSX_UNSIGNED); 2594 rec->nr_address = nam; 2595 rec->nr_packet = sio.sb_mb; 2596 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2597 ++slp->ns_numrec; 2598 ++nparallel_wakeup; 2599 } 2600 if (error) { 2601 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) 2602 && error != EWOULDBLOCK) { 2603 slp->ns_flag |= SLP_DISCONN; 2604 goto dorecs; 2605 } 2606 } 2607 } while (sio.sb_mb); 2608 } 2609 2610 /* 2611 * If we were upcalled from the tcp protocol layer and we have 2612 * fully parsed records ready to go, or there is new data pending, 2613 * or something went wrong, try to wake up an nfsd thread to deal 2614 * with it. 2615 */ 2616 dorecs: 2617 if (waitflag == MB_DONTWAIT && (slp->ns_numrec > 0 2618 || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) { 2619 nfsrv_wakenfsd(slp, nparallel_wakeup); 2620 } 2621 } 2622 2623 /* 2624 * Try and extract an RPC request from the mbuf data list received on a 2625 * stream socket. The "waitflag" argument indicates whether or not it 2626 * can sleep. 2627 */ 2628 static int 2629 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp) 2630 { 2631 struct mbuf *m, **mpp; 2632 char *cp1, *cp2; 2633 int len; 2634 struct mbuf *om, *m2, *recm; 2635 u_int32_t recmark; 2636 2637 for (;;) { 2638 if (slp->ns_reclen == 0) { 2639 if (slp->ns_cc < NFSX_UNSIGNED) 2640 return (0); 2641 m = slp->ns_raw; 2642 if (m->m_len >= NFSX_UNSIGNED) { 2643 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED); 2644 m->m_data += NFSX_UNSIGNED; 2645 m->m_len -= NFSX_UNSIGNED; 2646 } else { 2647 cp1 = (caddr_t)&recmark; 2648 cp2 = mtod(m, caddr_t); 2649 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { 2650 while (m->m_len == 0) { 2651 m = m->m_next; 2652 cp2 = mtod(m, caddr_t); 2653 } 2654 *cp1++ = *cp2++; 2655 m->m_data++; 2656 m->m_len--; 2657 } 2658 } 2659 slp->ns_cc -= NFSX_UNSIGNED; 2660 recmark = ntohl(recmark); 2661 slp->ns_reclen = recmark & ~0x80000000; 2662 if (recmark & 0x80000000) 2663 slp->ns_flag |= SLP_LASTFRAG; 2664 else 2665 slp->ns_flag &= ~SLP_LASTFRAG; 2666 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) { 2667 log(LOG_ERR, "%s (%d) from nfs client\n", 2668 "impossible packet length", 2669 slp->ns_reclen); 2670 return (EPERM); 2671 } 2672 } 2673 2674 /* 2675 * Now get the record part. 2676 * 2677 * Note that slp->ns_reclen may be 0. Linux sometimes 2678 * generates 0-length RPCs 2679 */ 2680 recm = NULL; 2681 if (slp->ns_cc == slp->ns_reclen) { 2682 recm = slp->ns_raw; 2683 slp->ns_raw = slp->ns_rawend = NULL; 2684 slp->ns_cc = slp->ns_reclen = 0; 2685 } else if (slp->ns_cc > slp->ns_reclen) { 2686 len = 0; 2687 m = slp->ns_raw; 2688 om = NULL; 2689 2690 while (len < slp->ns_reclen) { 2691 if ((len + m->m_len) > slp->ns_reclen) { 2692 m2 = m_copym(m, 0, slp->ns_reclen - len, 2693 waitflag); 2694 if (m2) { 2695 if (om) { 2696 om->m_next = m2; 2697 recm = slp->ns_raw; 2698 } else 2699 recm = m2; 2700 m->m_data += slp->ns_reclen - len; 2701 m->m_len -= slp->ns_reclen - len; 2702 len = slp->ns_reclen; 2703 } else { 2704 return (EWOULDBLOCK); 2705 } 2706 } else if ((len + m->m_len) == slp->ns_reclen) { 2707 om = m; 2708 len += m->m_len; 2709 m = m->m_next; 2710 recm = slp->ns_raw; 2711 om->m_next = NULL; 2712 } else { 2713 om = m; 2714 len += m->m_len; 2715 m = m->m_next; 2716 } 2717 } 2718 slp->ns_raw = m; 2719 slp->ns_cc -= len; 2720 slp->ns_reclen = 0; 2721 } else { 2722 return (0); 2723 } 2724 2725 /* 2726 * Accumulate the fragments into a record. 2727 */ 2728 mpp = &slp->ns_frag; 2729 while (*mpp) 2730 mpp = &((*mpp)->m_next); 2731 *mpp = recm; 2732 if (slp->ns_flag & SLP_LASTFRAG) { 2733 struct nfsrv_rec *rec; 2734 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK; 2735 rec = kmalloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf); 2736 if (!rec) { 2737 m_freem(slp->ns_frag); 2738 } else { 2739 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED); 2740 rec->nr_address = NULL; 2741 rec->nr_packet = slp->ns_frag; 2742 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2743 ++slp->ns_numrec; 2744 ++*countp; 2745 } 2746 slp->ns_frag = NULL; 2747 } 2748 } 2749 } 2750 2751 #ifdef INVARIANTS 2752 2753 /* 2754 * Sanity check our mbuf chain. 2755 */ 2756 static void 2757 nfs_checkpkt(struct mbuf *m, int len) 2758 { 2759 int xlen = 0; 2760 while (m) { 2761 xlen += m->m_len; 2762 m = m->m_next; 2763 } 2764 if (xlen != len) { 2765 panic("nfs_checkpkt: len mismatch %d/%d mbuf %p\n", 2766 xlen, len, m); 2767 } 2768 } 2769 2770 #else 2771 2772 static void 2773 nfs_checkpkt(struct mbuf *m __unused, int len __unused) 2774 { 2775 } 2776 2777 #endif 2778 2779 /* 2780 * Parse an RPC header. 2781 */ 2782 int 2783 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd, 2784 struct nfsrv_descript **ndp) 2785 { 2786 struct nfsrv_rec *rec; 2787 struct mbuf *m; 2788 struct sockaddr *nam; 2789 struct nfsrv_descript *nd; 2790 int error; 2791 2792 *ndp = NULL; 2793 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec)) 2794 return (ENOBUFS); 2795 rec = STAILQ_FIRST(&slp->ns_rec); 2796 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link); 2797 KKASSERT(slp->ns_numrec > 0); 2798 --slp->ns_numrec; 2799 nam = rec->nr_address; 2800 m = rec->nr_packet; 2801 kfree(rec, M_NFSRVDESC); 2802 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript), 2803 M_NFSRVDESC, M_WAITOK); 2804 nd->nd_md = nd->nd_mrep = m; 2805 nd->nd_nam2 = nam; 2806 nd->nd_dpos = mtod(m, caddr_t); 2807 error = nfs_getreq(nd, nfsd, TRUE); 2808 if (error) { 2809 if (nam) { 2810 FREE(nam, M_SONAME); 2811 } 2812 kfree((caddr_t)nd, M_NFSRVDESC); 2813 return (error); 2814 } 2815 *ndp = nd; 2816 nfsd->nfsd_nd = nd; 2817 return (0); 2818 } 2819 2820 /* 2821 * Try to assign service sockets to nfsd threads based on the number 2822 * of new rpc requests that have been queued on the service socket. 2823 * 2824 * If no nfsd's are available or additonal requests are pending, set the 2825 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for 2826 * the work in the nfssvc_sock list when it is finished processing its 2827 * current work. This flag is only cleared when an nfsd can not find 2828 * any new work to perform. 2829 */ 2830 void 2831 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel) 2832 { 2833 struct nfsd *nd; 2834 2835 if ((slp->ns_flag & SLP_VALID) == 0) 2836 return; 2837 if (nparallel <= 1) 2838 nparallel = 1; 2839 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) { 2840 if (nd->nfsd_flag & NFSD_WAITING) { 2841 nd->nfsd_flag &= ~NFSD_WAITING; 2842 if (nd->nfsd_slp) 2843 panic("nfsd wakeup"); 2844 slp->ns_sref++; 2845 nd->nfsd_slp = slp; 2846 wakeup((caddr_t)nd); 2847 if (--nparallel == 0) 2848 break; 2849 } 2850 } 2851 if (nparallel) { 2852 slp->ns_flag |= SLP_DOREC; 2853 nfsd_head_flag |= NFSD_CHECKSLP; 2854 } 2855 } 2856 #endif /* NFS_NOSERVER */ 2857