1 /* 2 * Copyright (c) 1989, 1991, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95 37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $ 38 * $DragonFly: src/sys/vfs/nfs/nfs_socket.c,v 1.45 2007/05/18 17:05:13 dillon Exp $ 39 */ 40 41 /* 42 * Socket operations for use by nfs 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/proc.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/kernel.h> 51 #include <sys/mbuf.h> 52 #include <sys/vnode.h> 53 #include <sys/fcntl.h> 54 #include <sys/protosw.h> 55 #include <sys/resourcevar.h> 56 #include <sys/socket.h> 57 #include <sys/socketvar.h> 58 #include <sys/socketops.h> 59 #include <sys/syslog.h> 60 #include <sys/thread.h> 61 #include <sys/tprintf.h> 62 #include <sys/sysctl.h> 63 #include <sys/signalvar.h> 64 #include <sys/mutex.h> 65 66 #include <sys/signal2.h> 67 #include <sys/mutex2.h> 68 69 #include <netinet/in.h> 70 #include <netinet/tcp.h> 71 #include <sys/thread2.h> 72 73 #include "rpcv2.h" 74 #include "nfsproto.h" 75 #include "nfs.h" 76 #include "xdr_subs.h" 77 #include "nfsm_subs.h" 78 #include "nfsmount.h" 79 #include "nfsnode.h" 80 #include "nfsrtt.h" 81 82 #define TRUE 1 83 #define FALSE 0 84 85 /* 86 * RTT calculations are scaled by 256 (8 bits). A proper fractional 87 * RTT will still be calculated even with a slow NFS timer. 88 */ 89 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum]] 90 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum]] 91 #define NFS_RTT_SCALE_BITS 8 /* bits */ 92 #define NFS_RTT_SCALE 256 /* value */ 93 94 /* 95 * Defines which timer to use for the procnum. 96 * 0 - default 97 * 1 - getattr 98 * 2 - lookup 99 * 3 - read 100 * 4 - write 101 */ 102 static int proct[NFS_NPROCS] = { 103 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, /* 00-09 */ 104 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, /* 10-19 */ 105 0, 5, 0, 0, 0, 0, /* 20-29 */ 106 }; 107 108 static int multt[NFS_NPROCS] = { 109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00-09 */ 110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 10-19 */ 111 1, 2, 1, 1, 1, 1, /* 20-29 */ 112 }; 113 114 static int nfs_backoff[8] = { 2, 3, 5, 8, 13, 21, 34, 55 }; 115 static int nfs_realign_test; 116 static int nfs_realign_count; 117 static int nfs_showrtt; 118 static int nfs_showrexmit; 119 int nfs_maxasyncbio = NFS_MAXASYNCBIO; 120 121 SYSCTL_DECL(_vfs_nfs); 122 123 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, ""); 124 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, ""); 125 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrtt, CTLFLAG_RW, &nfs_showrtt, 0, ""); 126 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrexmit, CTLFLAG_RW, &nfs_showrexmit, 0, ""); 127 SYSCTL_INT(_vfs_nfs, OID_AUTO, maxasyncbio, CTLFLAG_RW, &nfs_maxasyncbio, 0, ""); 128 129 static int nfs_request_setup(nfsm_info_t info); 130 static int nfs_request_auth(struct nfsreq *rep); 131 static int nfs_request_try(struct nfsreq *rep); 132 static int nfs_request_waitreply(struct nfsreq *rep); 133 static int nfs_request_processreply(nfsm_info_t info, int); 134 135 int nfsrtton = 0; 136 struct nfsrtt nfsrtt; 137 struct callout nfs_timer_handle; 138 139 static int nfs_msg (struct thread *,char *,char *); 140 static int nfs_rcvlock (struct nfsmount *nmp, struct nfsreq *myreq); 141 static void nfs_rcvunlock (struct nfsmount *nmp); 142 static void nfs_realign (struct mbuf **pm, int hsiz); 143 static int nfs_receive (struct nfsmount *nmp, struct nfsreq *rep, 144 struct sockaddr **aname, struct mbuf **mp); 145 static void nfs_softterm (struct nfsreq *rep, int islocked); 146 static void nfs_hardterm (struct nfsreq *rep, int islocked); 147 static int nfs_reconnect (struct nfsmount *nmp, struct nfsreq *rep); 148 #ifndef NFS_NOSERVER 149 static int nfsrv_getstream (struct nfssvc_sock *, int, int *); 150 static void nfs_timer_req(struct nfsreq *req); 151 152 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd, 153 struct nfssvc_sock *slp, 154 struct thread *td, 155 struct mbuf **mreqp) = { 156 nfsrv_null, 157 nfsrv_getattr, 158 nfsrv_setattr, 159 nfsrv_lookup, 160 nfsrv3_access, 161 nfsrv_readlink, 162 nfsrv_read, 163 nfsrv_write, 164 nfsrv_create, 165 nfsrv_mkdir, 166 nfsrv_symlink, 167 nfsrv_mknod, 168 nfsrv_remove, 169 nfsrv_rmdir, 170 nfsrv_rename, 171 nfsrv_link, 172 nfsrv_readdir, 173 nfsrv_readdirplus, 174 nfsrv_statfs, 175 nfsrv_fsinfo, 176 nfsrv_pathconf, 177 nfsrv_commit, 178 nfsrv_noop, 179 nfsrv_noop, 180 nfsrv_noop, 181 nfsrv_noop 182 }; 183 #endif /* NFS_NOSERVER */ 184 185 /* 186 * Initialize sockets and congestion for a new NFS connection. 187 * We do not free the sockaddr if error. 188 */ 189 int 190 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep) 191 { 192 struct socket *so; 193 int error; 194 struct sockaddr *saddr; 195 struct sockaddr_in *sin; 196 struct thread *td = &thread0; /* only used for socreate and sobind */ 197 198 nmp->nm_so = so = NULL; 199 if (nmp->nm_flag & NFSMNT_FORCE) 200 return (EINVAL); 201 saddr = nmp->nm_nam; 202 error = socreate(saddr->sa_family, &so, nmp->nm_sotype, 203 nmp->nm_soproto, td); 204 if (error) 205 goto bad; 206 nmp->nm_soflags = so->so_proto->pr_flags; 207 208 /* 209 * Some servers require that the client port be a reserved port number. 210 */ 211 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 212 struct sockopt sopt; 213 int ip; 214 struct sockaddr_in ssin; 215 216 bzero(&sopt, sizeof sopt); 217 ip = IP_PORTRANGE_LOW; 218 sopt.sopt_level = IPPROTO_IP; 219 sopt.sopt_name = IP_PORTRANGE; 220 sopt.sopt_val = (void *)&ip; 221 sopt.sopt_valsize = sizeof(ip); 222 sopt.sopt_td = NULL; 223 error = sosetopt(so, &sopt); 224 if (error) 225 goto bad; 226 bzero(&ssin, sizeof ssin); 227 sin = &ssin; 228 sin->sin_len = sizeof (struct sockaddr_in); 229 sin->sin_family = AF_INET; 230 sin->sin_addr.s_addr = INADDR_ANY; 231 sin->sin_port = htons(0); 232 error = sobind(so, (struct sockaddr *)sin, td); 233 if (error) 234 goto bad; 235 bzero(&sopt, sizeof sopt); 236 ip = IP_PORTRANGE_DEFAULT; 237 sopt.sopt_level = IPPROTO_IP; 238 sopt.sopt_name = IP_PORTRANGE; 239 sopt.sopt_val = (void *)&ip; 240 sopt.sopt_valsize = sizeof(ip); 241 sopt.sopt_td = NULL; 242 error = sosetopt(so, &sopt); 243 if (error) 244 goto bad; 245 } 246 247 /* 248 * Protocols that do not require connections may be optionally left 249 * unconnected for servers that reply from a port other than NFS_PORT. 250 */ 251 if (nmp->nm_flag & NFSMNT_NOCONN) { 252 if (nmp->nm_soflags & PR_CONNREQUIRED) { 253 error = ENOTCONN; 254 goto bad; 255 } 256 } else { 257 error = soconnect(so, nmp->nm_nam, td); 258 if (error) 259 goto bad; 260 261 /* 262 * Wait for the connection to complete. Cribbed from the 263 * connect system call but with the wait timing out so 264 * that interruptible mounts don't hang here for a long time. 265 */ 266 crit_enter(); 267 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 268 (void) tsleep((caddr_t)&so->so_timeo, 0, 269 "nfscon", 2 * hz); 270 if ((so->so_state & SS_ISCONNECTING) && 271 so->so_error == 0 && rep && 272 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){ 273 so->so_state &= ~SS_ISCONNECTING; 274 crit_exit(); 275 goto bad; 276 } 277 } 278 if (so->so_error) { 279 error = so->so_error; 280 so->so_error = 0; 281 crit_exit(); 282 goto bad; 283 } 284 crit_exit(); 285 } 286 so->so_rcv.ssb_timeo = (5 * hz); 287 so->so_snd.ssb_timeo = (5 * hz); 288 289 /* 290 * Get buffer reservation size from sysctl, but impose reasonable 291 * limits. 292 */ 293 if (nmp->nm_sotype == SOCK_STREAM) { 294 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 295 struct sockopt sopt; 296 int val; 297 298 bzero(&sopt, sizeof sopt); 299 sopt.sopt_level = SOL_SOCKET; 300 sopt.sopt_name = SO_KEEPALIVE; 301 sopt.sopt_val = &val; 302 sopt.sopt_valsize = sizeof val; 303 val = 1; 304 sosetopt(so, &sopt); 305 } 306 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 307 struct sockopt sopt; 308 int val; 309 310 bzero(&sopt, sizeof sopt); 311 sopt.sopt_level = IPPROTO_TCP; 312 sopt.sopt_name = TCP_NODELAY; 313 sopt.sopt_val = &val; 314 sopt.sopt_valsize = sizeof val; 315 val = 1; 316 sosetopt(so, &sopt); 317 } 318 } 319 error = soreserve(so, nfs_soreserve, nfs_soreserve, NULL); 320 if (error) 321 goto bad; 322 so->so_rcv.ssb_flags |= SSB_NOINTR; 323 so->so_snd.ssb_flags |= SSB_NOINTR; 324 325 /* Initialize other non-zero congestion variables */ 326 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = 327 nmp->nm_srtt[3] = (NFS_TIMEO << NFS_RTT_SCALE_BITS); 328 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 329 nmp->nm_sdrtt[3] = 0; 330 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED; 331 nmp->nm_timeouts = 0; 332 333 /* 334 * Assign nm_so last. The moment nm_so is assigned the nfs_timer() 335 * can mess with the socket. 336 */ 337 nmp->nm_so = so; 338 return (0); 339 340 bad: 341 if (so) { 342 soshutdown(so, SHUT_RDWR); 343 soclose(so, FNONBLOCK); 344 } 345 return (error); 346 } 347 348 /* 349 * Reconnect routine: 350 * Called when a connection is broken on a reliable protocol. 351 * - clean up the old socket 352 * - nfs_connect() again 353 * - set R_NEEDSXMIT for all outstanding requests on mount point 354 * If this fails the mount point is DEAD! 355 * nb: Must be called with the nfs_sndlock() set on the mount point. 356 */ 357 static int 358 nfs_reconnect(struct nfsmount *nmp, struct nfsreq *rep) 359 { 360 struct nfsreq *req; 361 int error; 362 363 nfs_disconnect(nmp); 364 while ((error = nfs_connect(nmp, rep)) != 0) { 365 if (error == EINTR || error == ERESTART) 366 return (EINTR); 367 if (error == EINVAL) 368 return (error); 369 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0); 370 } 371 372 /* 373 * Loop through outstanding request list and fix up all requests 374 * on old socket. 375 */ 376 crit_enter(); 377 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 378 KKASSERT(req->r_nmp == nmp); 379 req->r_flags |= R_NEEDSXMIT; 380 } 381 crit_exit(); 382 return (0); 383 } 384 385 /* 386 * NFS disconnect. Clean up and unlink. 387 */ 388 void 389 nfs_disconnect(struct nfsmount *nmp) 390 { 391 struct socket *so; 392 393 if (nmp->nm_so) { 394 so = nmp->nm_so; 395 nmp->nm_so = NULL; 396 soshutdown(so, SHUT_RDWR); 397 soclose(so, FNONBLOCK); 398 } 399 } 400 401 void 402 nfs_safedisconnect(struct nfsmount *nmp) 403 { 404 nfs_rcvlock(nmp, NULL); 405 nfs_disconnect(nmp); 406 nfs_rcvunlock(nmp); 407 } 408 409 /* 410 * This is the nfs send routine. For connection based socket types, it 411 * must be called with an nfs_sndlock() on the socket. 412 * "rep == NULL" indicates that it has been called from a server. 413 * For the client side: 414 * - return EINTR if the RPC is terminated, 0 otherwise 415 * - set R_NEEDSXMIT if the send fails for any reason 416 * - do any cleanup required by recoverable socket errors (?) 417 * For the server side: 418 * - return EINTR or ERESTART if interrupted by a signal 419 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 420 * - do any cleanup required by recoverable socket errors (?) 421 */ 422 int 423 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top, 424 struct nfsreq *rep) 425 { 426 struct sockaddr *sendnam; 427 int error, soflags, flags; 428 429 if (rep) { 430 if (rep->r_flags & R_SOFTTERM) { 431 m_freem(top); 432 return (EINTR); 433 } 434 if ((so = rep->r_nmp->nm_so) == NULL) { 435 rep->r_flags |= R_NEEDSXMIT; 436 m_freem(top); 437 return (0); 438 } 439 rep->r_flags &= ~R_NEEDSXMIT; 440 soflags = rep->r_nmp->nm_soflags; 441 } else { 442 soflags = so->so_proto->pr_flags; 443 } 444 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 445 sendnam = NULL; 446 else 447 sendnam = nam; 448 if (so->so_type == SOCK_SEQPACKET) 449 flags = MSG_EOR; 450 else 451 flags = 0; 452 453 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags, 454 curthread /*XXX*/); 455 /* 456 * ENOBUFS for dgram sockets is transient and non fatal. 457 * No need to log, and no need to break a soft mount. 458 */ 459 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) { 460 error = 0; 461 /* 462 * do backoff retransmit on client 463 */ 464 if (rep) { 465 if ((rep->r_nmp->nm_state & NFSSTA_SENDSPACE) == 0) { 466 rep->r_nmp->nm_state |= NFSSTA_SENDSPACE; 467 kprintf("Warning: NFS: Insufficient sendspace " 468 "(%lu),\n" 469 "\t You must increase vfs.nfs.soreserve" 470 "or decrease vfs.nfs.maxasyncbio\n", 471 so->so_snd.ssb_hiwat); 472 } 473 rep->r_flags |= R_NEEDSXMIT; 474 } 475 } 476 477 if (error) { 478 if (rep) { 479 log(LOG_INFO, "nfs send error %d for server %s\n",error, 480 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 481 /* 482 * Deal with errors for the client side. 483 */ 484 if (rep->r_flags & R_SOFTTERM) 485 error = EINTR; 486 else 487 rep->r_flags |= R_NEEDSXMIT; 488 } else { 489 log(LOG_INFO, "nfsd send error %d\n", error); 490 } 491 492 /* 493 * Handle any recoverable (soft) socket errors here. (?) 494 */ 495 if (error != EINTR && error != ERESTART && 496 error != EWOULDBLOCK && error != EPIPE) 497 error = 0; 498 } 499 return (error); 500 } 501 502 /* 503 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all 504 * done by soreceive(), but for SOCK_STREAM we must deal with the Record 505 * Mark and consolidate the data into a new mbuf list. 506 * nb: Sometimes TCP passes the data up to soreceive() in long lists of 507 * small mbufs. 508 * For SOCK_STREAM we must be very careful to read an entire record once 509 * we have read any of it, even if the system call has been interrupted. 510 */ 511 static int 512 nfs_receive(struct nfsmount *nmp, struct nfsreq *rep, 513 struct sockaddr **aname, struct mbuf **mp) 514 { 515 struct socket *so; 516 struct sockbuf sio; 517 struct uio auio; 518 struct iovec aio; 519 struct mbuf *m; 520 struct mbuf *control; 521 u_int32_t len; 522 struct sockaddr **getnam; 523 int error, sotype, rcvflg; 524 struct thread *td = curthread; /* XXX */ 525 526 /* 527 * Set up arguments for soreceive() 528 */ 529 *mp = NULL; 530 *aname = NULL; 531 sotype = nmp->nm_sotype; 532 533 /* 534 * For reliable protocols, lock against other senders/receivers 535 * in case a reconnect is necessary. 536 * For SOCK_STREAM, first get the Record Mark to find out how much 537 * more there is to get. 538 * We must lock the socket against other receivers 539 * until we have an entire rpc request/reply. 540 */ 541 if (sotype != SOCK_DGRAM) { 542 error = nfs_sndlock(nmp, rep); 543 if (error) 544 return (error); 545 tryagain: 546 /* 547 * Check for fatal errors and resending request. 548 */ 549 /* 550 * Ugh: If a reconnect attempt just happened, nm_so 551 * would have changed. NULL indicates a failed 552 * attempt that has essentially shut down this 553 * mount point. 554 */ 555 if (rep && (rep->r_mrep || (rep->r_flags & R_SOFTTERM))) { 556 nfs_sndunlock(nmp); 557 return (EINTR); 558 } 559 so = nmp->nm_so; 560 if (so == NULL) { 561 error = nfs_reconnect(nmp, rep); 562 if (error) { 563 nfs_sndunlock(nmp); 564 return (error); 565 } 566 goto tryagain; 567 } 568 while (rep && (rep->r_flags & R_NEEDSXMIT)) { 569 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT); 570 nfsstats.rpcretries++; 571 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep); 572 if (error) { 573 if (error == EINTR || error == ERESTART || 574 (error = nfs_reconnect(nmp, rep)) != 0) { 575 nfs_sndunlock(nmp); 576 return (error); 577 } 578 goto tryagain; 579 } 580 } 581 nfs_sndunlock(nmp); 582 if (sotype == SOCK_STREAM) { 583 /* 584 * Get the length marker from the stream 585 */ 586 aio.iov_base = (caddr_t)&len; 587 aio.iov_len = sizeof(u_int32_t); 588 auio.uio_iov = &aio; 589 auio.uio_iovcnt = 1; 590 auio.uio_segflg = UIO_SYSSPACE; 591 auio.uio_rw = UIO_READ; 592 auio.uio_offset = 0; 593 auio.uio_resid = sizeof(u_int32_t); 594 auio.uio_td = td; 595 do { 596 rcvflg = MSG_WAITALL; 597 error = so_pru_soreceive(so, NULL, &auio, NULL, 598 NULL, &rcvflg); 599 if (error == EWOULDBLOCK && rep) { 600 if (rep->r_flags & R_SOFTTERM) 601 return (EINTR); 602 } 603 } while (error == EWOULDBLOCK); 604 605 if (error == 0 && auio.uio_resid > 0) { 606 /* 607 * Only log short packets if not EOF 608 */ 609 if (auio.uio_resid != sizeof(u_int32_t)) 610 log(LOG_INFO, 611 "short receive (%d/%d) from nfs server %s\n", 612 (int)(sizeof(u_int32_t) - auio.uio_resid), 613 (int)sizeof(u_int32_t), 614 nmp->nm_mountp->mnt_stat.f_mntfromname); 615 error = EPIPE; 616 } 617 if (error) 618 goto errout; 619 len = ntohl(len) & ~0x80000000; 620 /* 621 * This is SERIOUS! We are out of sync with the sender 622 * and forcing a disconnect/reconnect is all I can do. 623 */ 624 if (len > NFS_MAXPACKET) { 625 log(LOG_ERR, "%s (%d) from nfs server %s\n", 626 "impossible packet length", 627 len, 628 nmp->nm_mountp->mnt_stat.f_mntfromname); 629 error = EFBIG; 630 goto errout; 631 } 632 633 /* 634 * Get the rest of the packet as an mbuf chain 635 */ 636 sbinit(&sio, len); 637 do { 638 rcvflg = MSG_WAITALL; 639 error = so_pru_soreceive(so, NULL, NULL, &sio, 640 NULL, &rcvflg); 641 } while (error == EWOULDBLOCK || error == EINTR || 642 error == ERESTART); 643 if (error == 0 && sio.sb_cc != len) { 644 if (sio.sb_cc != 0) 645 log(LOG_INFO, 646 "short receive (%d/%d) from nfs server %s\n", 647 len - auio.uio_resid, len, 648 nmp->nm_mountp->mnt_stat.f_mntfromname); 649 error = EPIPE; 650 } 651 *mp = sio.sb_mb; 652 } else { 653 /* 654 * Non-stream, so get the whole packet by not 655 * specifying MSG_WAITALL and by specifying a large 656 * length. 657 * 658 * We have no use for control msg., but must grab them 659 * and then throw them away so we know what is going 660 * on. 661 */ 662 sbinit(&sio, 100000000); 663 do { 664 rcvflg = 0; 665 error = so_pru_soreceive(so, NULL, NULL, &sio, 666 &control, &rcvflg); 667 if (control) 668 m_freem(control); 669 if (error == EWOULDBLOCK && rep) { 670 if (rep->r_flags & R_SOFTTERM) { 671 m_freem(sio.sb_mb); 672 return (EINTR); 673 } 674 } 675 } while (error == EWOULDBLOCK || 676 (error == 0 && sio.sb_mb == NULL && control)); 677 if ((rcvflg & MSG_EOR) == 0) 678 kprintf("Egad!!\n"); 679 if (error == 0 && sio.sb_mb == NULL) 680 error = EPIPE; 681 len = sio.sb_cc; 682 *mp = sio.sb_mb; 683 } 684 errout: 685 if (error && error != EINTR && error != ERESTART) { 686 m_freem(*mp); 687 *mp = NULL; 688 if (error != EPIPE) { 689 log(LOG_INFO, 690 "receive error %d from nfs server %s\n", 691 error, 692 nmp->nm_mountp->mnt_stat.f_mntfromname); 693 } 694 error = nfs_sndlock(nmp, rep); 695 if (!error) { 696 error = nfs_reconnect(nmp, rep); 697 if (!error) 698 goto tryagain; 699 else 700 nfs_sndunlock(nmp); 701 } 702 } 703 } else { 704 if ((so = nmp->nm_so) == NULL) 705 return (EACCES); 706 if (so->so_state & SS_ISCONNECTED) 707 getnam = NULL; 708 else 709 getnam = aname; 710 sbinit(&sio, 100000000); 711 do { 712 rcvflg = 0; 713 error = so_pru_soreceive(so, getnam, NULL, &sio, 714 NULL, &rcvflg); 715 if (error == EWOULDBLOCK && rep && 716 (rep->r_flags & R_SOFTTERM)) { 717 m_freem(sio.sb_mb); 718 return (EINTR); 719 } 720 } while (error == EWOULDBLOCK); 721 722 len = sio.sb_cc; 723 *mp = sio.sb_mb; 724 725 /* 726 * A shutdown may result in no error and no mbuf. 727 * Convert to EPIPE. 728 */ 729 if (*mp == NULL && error == 0) 730 error = EPIPE; 731 } 732 if (error) { 733 m_freem(*mp); 734 *mp = NULL; 735 } 736 737 /* 738 * Search for any mbufs that are not a multiple of 4 bytes long 739 * or with m_data not longword aligned. 740 * These could cause pointer alignment problems, so copy them to 741 * well aligned mbufs. 742 */ 743 nfs_realign(mp, 5 * NFSX_UNSIGNED); 744 return (error); 745 } 746 747 /* 748 * Implement receipt of reply on a socket. 749 * 750 * We must search through the list of received datagrams matching them 751 * with outstanding requests using the xid, until ours is found. 752 * 753 * If myrep is NULL we process packets on the socket until 754 * interrupted or until nm_reqrxq is non-empty. 755 */ 756 /* ARGSUSED */ 757 int 758 nfs_reply(struct nfsmount *nmp, struct nfsreq *myrep) 759 { 760 struct nfsreq *rep; 761 struct sockaddr *nam; 762 u_int32_t rxid; 763 u_int32_t *tl; 764 int error; 765 struct nfsm_info info; 766 767 /* 768 * Loop around until we get our own reply 769 */ 770 for (;;) { 771 /* 772 * Lock against other receivers so that I don't get stuck in 773 * sbwait() after someone else has received my reply for me. 774 * Also necessary for connection based protocols to avoid 775 * race conditions during a reconnect. 776 * 777 * If nfs_rcvlock() returns EALREADY, that means that 778 * the reply has already been recieved by another 779 * process and we can return immediately. In this 780 * case, the lock is not taken to avoid races with 781 * other processes. 782 */ 783 info.mrep = NULL; 784 785 error = nfs_rcvlock(nmp, myrep); 786 if (error == EALREADY) 787 return (0); 788 if (error) 789 return (error); 790 791 /* 792 * If myrep is NULL we are the receiver helper thread. 793 * Stop waiting for incoming replies if there are 794 * messages sitting on reqrxq that we need to process, 795 * or if a shutdown request is pending. 796 */ 797 if (myrep == NULL && (TAILQ_FIRST(&nmp->nm_reqrxq) || 798 nmp->nm_rxstate > NFSSVC_PENDING)) { 799 nfs_rcvunlock(nmp); 800 return(EWOULDBLOCK); 801 } 802 803 /* 804 * Get the next Rpc reply off the socket 805 * 806 * We cannot release the receive lock until we've 807 * filled in rep->r_mrep, otherwise a waiting 808 * thread may deadlock in soreceive with no incoming 809 * packets expected. 810 */ 811 error = nfs_receive(nmp, myrep, &nam, &info.mrep); 812 if (error) { 813 /* 814 * Ignore routing errors on connectionless protocols?? 815 */ 816 nfs_rcvunlock(nmp); 817 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 818 if (nmp->nm_so == NULL) 819 return (error); 820 nmp->nm_so->so_error = 0; 821 continue; 822 } 823 return (error); 824 } 825 if (nam) 826 FREE(nam, M_SONAME); 827 828 /* 829 * Get the xid and check that it is an rpc reply 830 */ 831 info.md = info.mrep; 832 info.dpos = mtod(info.md, caddr_t); 833 NULLOUT(tl = nfsm_dissect(&info, 2*NFSX_UNSIGNED)); 834 rxid = *tl++; 835 if (*tl != rpc_reply) { 836 nfsstats.rpcinvalid++; 837 m_freem(info.mrep); 838 info.mrep = NULL; 839 nfsmout: 840 nfs_rcvunlock(nmp); 841 continue; 842 } 843 844 /* 845 * Loop through the request list to match up the reply 846 * Iff no match, just drop the datagram. On match, set 847 * r_mrep atomically to prevent the timer from messing 848 * around with the request after we have exited the critical 849 * section. 850 */ 851 crit_enter(); 852 TAILQ_FOREACH(rep, &nmp->nm_reqq, r_chain) { 853 if (rep->r_mrep == NULL && rxid == rep->r_xid) 854 break; 855 } 856 857 /* 858 * Fill in the rest of the reply if we found a match. 859 * 860 * Deal with duplicate responses if there was no match. 861 */ 862 if (rep) { 863 rep->r_md = info.md; 864 rep->r_dpos = info.dpos; 865 if (nfsrtton) { 866 struct rttl *rt; 867 868 rt = &nfsrtt.rttl[nfsrtt.pos]; 869 rt->proc = rep->r_procnum; 870 rt->rto = 0; 871 rt->sent = 0; 872 rt->cwnd = nmp->nm_maxasync_scaled; 873 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1]; 874 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1]; 875 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid; 876 getmicrotime(&rt->tstamp); 877 if (rep->r_flags & R_TIMING) 878 rt->rtt = rep->r_rtt; 879 else 880 rt->rtt = 1000000; 881 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ; 882 } 883 884 /* 885 * New congestion control is based only on async 886 * requests. 887 */ 888 if (nmp->nm_maxasync_scaled < NFS_MAXASYNC_SCALED) 889 ++nmp->nm_maxasync_scaled; 890 if (rep->r_flags & R_SENT) { 891 rep->r_flags &= ~R_SENT; 892 } 893 /* 894 * Update rtt using a gain of 0.125 on the mean 895 * and a gain of 0.25 on the deviation. 896 * 897 * NOTE SRTT/SDRTT are only good if R_TIMING is set. 898 */ 899 if ((rep->r_flags & R_TIMING) && rep->r_rexmit == 0) { 900 /* 901 * Since the timer resolution of 902 * NFS_HZ is so course, it can often 903 * result in r_rtt == 0. Since 904 * r_rtt == N means that the actual 905 * rtt is between N+dt and N+2-dt ticks, 906 * add 1. 907 */ 908 int n; 909 int d; 910 911 #define NFSRSB NFS_RTT_SCALE_BITS 912 n = ((NFS_SRTT(rep) * 7) + 913 (rep->r_rtt << NFSRSB)) >> 3; 914 d = n - NFS_SRTT(rep); 915 NFS_SRTT(rep) = n; 916 917 /* 918 * Don't let the jitter calculation decay 919 * too quickly, but we want a fast rampup. 920 */ 921 if (d < 0) 922 d = -d; 923 d <<= NFSRSB; 924 if (d < NFS_SDRTT(rep)) 925 n = ((NFS_SDRTT(rep) * 15) + d) >> 4; 926 else 927 n = ((NFS_SDRTT(rep) * 3) + d) >> 2; 928 NFS_SDRTT(rep) = n; 929 #undef NFSRSB 930 } 931 nmp->nm_timeouts = 0; 932 rep->r_mrep = info.mrep; 933 nfs_hardterm(rep, 0); 934 } else { 935 /* 936 * Extract vers, prog, nfsver, procnum. A duplicate 937 * response means we didn't wait long enough so 938 * we increase the SRTT to avoid future spurious 939 * timeouts. 940 */ 941 u_int procnum = nmp->nm_lastreprocnum; 942 int n; 943 944 if (procnum < NFS_NPROCS && proct[procnum]) { 945 if (nfs_showrexmit) 946 kprintf("D"); 947 n = nmp->nm_srtt[proct[procnum]]; 948 n += NFS_ASYSCALE * NFS_HZ; 949 if (n < NFS_ASYSCALE * NFS_HZ * 10) 950 n = NFS_ASYSCALE * NFS_HZ * 10; 951 nmp->nm_srtt[proct[procnum]] = n; 952 } 953 } 954 nfs_rcvunlock(nmp); 955 crit_exit(); 956 957 /* 958 * If not matched to a request, drop it. 959 * If it's mine, get out. 960 */ 961 if (rep == NULL) { 962 nfsstats.rpcunexpected++; 963 m_freem(info.mrep); 964 info.mrep = NULL; 965 } else if (rep == myrep) { 966 if (rep->r_mrep == NULL) 967 panic("nfsreply nil"); 968 return (0); 969 } 970 } 971 } 972 973 /* 974 * Run the request state machine until the target state is reached 975 * or a fatal error occurs. The target state is not run. Specifying 976 * a target of NFSM_STATE_DONE runs the state machine until the rpc 977 * is complete. 978 * 979 * EINPROGRESS is returned for all states other then the DONE state, 980 * indicating that the rpc is still in progress. 981 */ 982 int 983 nfs_request(struct nfsm_info *info, nfsm_state_t bstate, nfsm_state_t estate) 984 { 985 struct nfsreq *req; 986 987 while (info->state >= bstate && info->state < estate) { 988 switch(info->state) { 989 case NFSM_STATE_SETUP: 990 /* 991 * Setup the nfsreq. Any error which occurs during 992 * this state is fatal. 993 */ 994 info->error = nfs_request_setup(info); 995 if (info->error) { 996 info->state = NFSM_STATE_DONE; 997 return (info->error); 998 } else { 999 req = info->req; 1000 req->r_mrp = &info->mrep; 1001 req->r_mdp = &info->md; 1002 req->r_dposp = &info->dpos; 1003 info->state = NFSM_STATE_AUTH; 1004 } 1005 break; 1006 case NFSM_STATE_AUTH: 1007 /* 1008 * Authenticate the nfsreq. Any error which occurs 1009 * during this state is fatal. 1010 */ 1011 info->error = nfs_request_auth(info->req); 1012 if (info->error) { 1013 info->state = NFSM_STATE_DONE; 1014 return (info->error); 1015 } else { 1016 info->state = NFSM_STATE_TRY; 1017 } 1018 break; 1019 case NFSM_STATE_TRY: 1020 /* 1021 * Transmit or retransmit attempt. An error in this 1022 * state is ignored and we always move on to the 1023 * next state. 1024 * 1025 * This can trivially race the receiver if the 1026 * request is asynchronous. nfs_request_try() 1027 * will thus set the state for us and we 1028 * must also return immediately if we are 1029 * running an async state machine, because 1030 * info can become invalid due to races after 1031 * try() returns. 1032 */ 1033 if (info->req->r_flags & R_ASYNC) { 1034 nfs_request_try(info->req); 1035 if (estate == NFSM_STATE_WAITREPLY) 1036 return (EINPROGRESS); 1037 } else { 1038 nfs_request_try(info->req); 1039 info->state = NFSM_STATE_WAITREPLY; 1040 } 1041 break; 1042 case NFSM_STATE_WAITREPLY: 1043 /* 1044 * Wait for a reply or timeout and move on to the 1045 * next state. The error returned by this state 1046 * is passed to the processing code in the next 1047 * state. 1048 */ 1049 info->error = nfs_request_waitreply(info->req); 1050 info->state = NFSM_STATE_PROCESSREPLY; 1051 break; 1052 case NFSM_STATE_PROCESSREPLY: 1053 /* 1054 * Process the reply or timeout. Errors which occur 1055 * in this state may cause the state machine to 1056 * go back to an earlier state, and are fatal 1057 * otherwise. 1058 */ 1059 info->error = nfs_request_processreply(info, 1060 info->error); 1061 switch(info->error) { 1062 case ENEEDAUTH: 1063 info->state = NFSM_STATE_AUTH; 1064 break; 1065 case EAGAIN: 1066 info->state = NFSM_STATE_TRY; 1067 break; 1068 default: 1069 /* 1070 * Operation complete, with or without an 1071 * error. We are done. 1072 */ 1073 info->req = NULL; 1074 info->state = NFSM_STATE_DONE; 1075 return (info->error); 1076 } 1077 break; 1078 case NFSM_STATE_DONE: 1079 /* 1080 * Shouldn't be reached 1081 */ 1082 return (info->error); 1083 /* NOT REACHED */ 1084 } 1085 } 1086 1087 /* 1088 * If we are done return the error code (if any). 1089 * Otherwise return EINPROGRESS. 1090 */ 1091 if (info->state == NFSM_STATE_DONE) 1092 return (info->error); 1093 return (EINPROGRESS); 1094 } 1095 1096 /* 1097 * nfs_request - goes something like this 1098 * - fill in request struct 1099 * - links it into list 1100 * - calls nfs_send() for first transmit 1101 * - calls nfs_receive() to get reply 1102 * - break down rpc header and return with nfs reply pointed to 1103 * by mrep or error 1104 * nb: always frees up mreq mbuf list 1105 */ 1106 static int 1107 nfs_request_setup(nfsm_info_t info) 1108 { 1109 struct nfsreq *req; 1110 struct nfsmount *nmp; 1111 struct mbuf *m; 1112 int i; 1113 1114 /* 1115 * Reject requests while attempting a forced unmount. 1116 */ 1117 if (info->vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) { 1118 m_freem(info->mreq); 1119 info->mreq = NULL; 1120 return (ESTALE); 1121 } 1122 nmp = VFSTONFS(info->vp->v_mount); 1123 req = kmalloc(sizeof(struct nfsreq), M_NFSREQ, M_WAITOK); 1124 req->r_nmp = nmp; 1125 req->r_vp = info->vp; 1126 req->r_td = info->td; 1127 req->r_procnum = info->procnum; 1128 req->r_mreq = NULL; 1129 req->r_cred = info->cred; 1130 1131 i = 0; 1132 m = info->mreq; 1133 while (m) { 1134 i += m->m_len; 1135 m = m->m_next; 1136 } 1137 req->r_mrest = info->mreq; 1138 req->r_mrest_len = i; 1139 1140 /* 1141 * The presence of a non-NULL r_info in req indicates 1142 * async completion via our helper threads. See the receiver 1143 * code. 1144 */ 1145 if (info->bio) { 1146 req->r_info = info; 1147 req->r_flags = R_ASYNC; 1148 } else { 1149 req->r_info = NULL; 1150 req->r_flags = 0; 1151 } 1152 info->req = req; 1153 return(0); 1154 } 1155 1156 static int 1157 nfs_request_auth(struct nfsreq *rep) 1158 { 1159 struct nfsmount *nmp = rep->r_nmp; 1160 struct mbuf *m; 1161 char nickv[RPCX_NICKVERF]; 1162 int error = 0, auth_len, auth_type; 1163 int verf_len; 1164 u_int32_t xid; 1165 char *auth_str, *verf_str; 1166 struct ucred *cred; 1167 1168 cred = rep->r_cred; 1169 rep->r_failed_auth = 0; 1170 1171 /* 1172 * Get the RPC header with authorization. 1173 */ 1174 verf_str = auth_str = NULL; 1175 if (nmp->nm_flag & NFSMNT_KERB) { 1176 verf_str = nickv; 1177 verf_len = sizeof (nickv); 1178 auth_type = RPCAUTH_KERB4; 1179 bzero((caddr_t)rep->r_key, sizeof(rep->r_key)); 1180 if (rep->r_failed_auth || 1181 nfs_getnickauth(nmp, cred, &auth_str, &auth_len, 1182 verf_str, verf_len)) { 1183 error = nfs_getauth(nmp, rep, cred, &auth_str, 1184 &auth_len, verf_str, &verf_len, rep->r_key); 1185 if (error) { 1186 m_freem(rep->r_mrest); 1187 rep->r_mrest = NULL; 1188 kfree((caddr_t)rep, M_NFSREQ); 1189 return (error); 1190 } 1191 } 1192 } else { 1193 auth_type = RPCAUTH_UNIX; 1194 if (cred->cr_ngroups < 1) 1195 panic("nfsreq nogrps"); 1196 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ? 1197 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) + 1198 5 * NFSX_UNSIGNED; 1199 } 1200 m = nfsm_rpchead(cred, nmp->nm_flag, rep->r_procnum, auth_type, 1201 auth_len, auth_str, verf_len, verf_str, 1202 rep->r_mrest, rep->r_mrest_len, &rep->r_mheadend, &xid); 1203 rep->r_mrest = NULL; 1204 if (auth_str) 1205 kfree(auth_str, M_TEMP); 1206 1207 /* 1208 * For stream protocols, insert a Sun RPC Record Mark. 1209 */ 1210 if (nmp->nm_sotype == SOCK_STREAM) { 1211 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT); 1212 if (m == NULL) { 1213 kfree(rep, M_NFSREQ); 1214 return (ENOBUFS); 1215 } 1216 *mtod(m, u_int32_t *) = htonl(0x80000000 | 1217 (m->m_pkthdr.len - NFSX_UNSIGNED)); 1218 } 1219 rep->r_mreq = m; 1220 rep->r_xid = xid; 1221 return (0); 1222 } 1223 1224 static int 1225 nfs_request_try(struct nfsreq *rep) 1226 { 1227 struct nfsmount *nmp = rep->r_nmp; 1228 struct mbuf *m2; 1229 int error; 1230 1231 /* 1232 * Request is not on any queue, only the owner has access to it 1233 * so it should not be locked by anyone atm. 1234 * 1235 * Interlock to prevent races. While locked the only remote 1236 * action possible is for r_mrep to be set (once we enqueue it). 1237 */ 1238 if (rep->r_flags == 0xdeadc0de) { 1239 print_backtrace(); 1240 panic("flags nbad\n"); 1241 } 1242 KKASSERT((rep->r_flags & (R_LOCKED | R_ONREQQ)) == 0); 1243 if (nmp->nm_flag & NFSMNT_SOFT) 1244 rep->r_retry = nmp->nm_retry; 1245 else 1246 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ 1247 rep->r_rtt = rep->r_rexmit = 0; 1248 if (proct[rep->r_procnum] > 0) 1249 rep->r_flags |= R_TIMING | R_LOCKED; 1250 else 1251 rep->r_flags |= R_LOCKED; 1252 rep->r_mrep = NULL; 1253 1254 /* 1255 * Do the client side RPC. 1256 */ 1257 nfsstats.rpcrequests++; 1258 1259 if (nmp->nm_flag & NFSMNT_FORCE) { 1260 rep->r_flags |= R_SOFTTERM; 1261 rep->r_flags &= ~R_LOCKED; 1262 return (0); 1263 } 1264 1265 /* 1266 * Chain request into list of outstanding requests. Be sure 1267 * to put it LAST so timer finds oldest requests first. Note 1268 * that our control of R_LOCKED prevents the request from 1269 * getting ripped out from under us or transmitted by the 1270 * timer code. 1271 * 1272 * For requests with info structures we must atomically set the 1273 * info's state because the structure could become invalid upon 1274 * return due to races (i.e., if async) 1275 */ 1276 crit_enter(); 1277 mtx_link_init(&rep->r_link); 1278 TAILQ_INSERT_TAIL(&nmp->nm_reqq, rep, r_chain); 1279 rep->r_flags |= R_ONREQQ; 1280 ++nmp->nm_reqqlen; 1281 if (rep->r_flags & R_ASYNC) 1282 rep->r_info->state = NFSM_STATE_WAITREPLY; 1283 crit_exit(); 1284 1285 error = 0; 1286 1287 /* 1288 * Send if we can. Congestion control is not handled here any more 1289 * becausing trying to defer the initial send based on the nfs_timer 1290 * requires having a very fast nfs_timer, which is silly. 1291 */ 1292 if (nmp->nm_so) { 1293 if (nmp->nm_soflags & PR_CONNREQUIRED) 1294 error = nfs_sndlock(nmp, rep); 1295 if (error == 0) { 1296 m2 = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT); 1297 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep); 1298 if (nmp->nm_soflags & PR_CONNREQUIRED) 1299 nfs_sndunlock(nmp); 1300 rep->r_flags &= ~R_NEEDSXMIT; 1301 if ((rep->r_flags & R_SENT) == 0) { 1302 rep->r_flags |= R_SENT; 1303 } 1304 } else { 1305 rep->r_flags |= R_NEEDSXMIT; 1306 } 1307 } else { 1308 rep->r_flags |= R_NEEDSXMIT; 1309 rep->r_rtt = -1; 1310 } 1311 if (error == EPIPE) 1312 error = 0; 1313 1314 /* 1315 * Release the lock. The only remote action that may have occurred 1316 * would have been the setting of rep->r_mrep. If this occured 1317 * and the request was async we have to move it to the reader 1318 * thread's queue for action. 1319 * 1320 * For async requests also make sure the reader is woken up so 1321 * it gets on the socket to read responses. 1322 */ 1323 crit_enter(); 1324 if (rep->r_flags & R_ASYNC) { 1325 if (rep->r_mrep) 1326 nfs_hardterm(rep, 1); 1327 rep->r_flags &= ~R_LOCKED; 1328 nfssvc_iod_reader_wakeup(nmp); 1329 } else { 1330 rep->r_flags &= ~R_LOCKED; 1331 } 1332 if (rep->r_flags & R_WANTED) { 1333 rep->r_flags &= ~R_WANTED; 1334 wakeup(rep); 1335 } 1336 crit_exit(); 1337 return (error); 1338 } 1339 1340 /* 1341 * This code is only called for synchronous requests. Completed synchronous 1342 * requests are left on reqq and we remove them before moving on to the 1343 * processing state. 1344 */ 1345 static int 1346 nfs_request_waitreply(struct nfsreq *rep) 1347 { 1348 struct nfsmount *nmp = rep->r_nmp; 1349 int error; 1350 1351 KKASSERT((rep->r_flags & R_ASYNC) == 0); 1352 1353 /* 1354 * Wait until the request is finished. 1355 */ 1356 error = nfs_reply(nmp, rep); 1357 1358 /* 1359 * RPC done, unlink the request, but don't rip it out from under 1360 * the callout timer. 1361 * 1362 * Once unlinked no other receiver or the timer will have 1363 * visibility, so we do not have to set R_LOCKED. 1364 */ 1365 crit_enter(); 1366 while (rep->r_flags & R_LOCKED) { 1367 rep->r_flags |= R_WANTED; 1368 tsleep(rep, 0, "nfstrac", 0); 1369 } 1370 KKASSERT(rep->r_flags & R_ONREQQ); 1371 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain); 1372 rep->r_flags &= ~R_ONREQQ; 1373 --nmp->nm_reqqlen; 1374 if (TAILQ_FIRST(&nmp->nm_bioq) && 1375 nmp->nm_reqqlen == NFS_MAXASYNCBIO * 2 / 3) { 1376 nfssvc_iod_writer_wakeup(nmp); 1377 } 1378 crit_exit(); 1379 1380 /* 1381 * Decrement the outstanding request count. 1382 */ 1383 if (rep->r_flags & R_SENT) { 1384 rep->r_flags &= ~R_SENT; 1385 } 1386 return (error); 1387 } 1388 1389 /* 1390 * Process reply with error returned from nfs_requet_waitreply(). 1391 * 1392 * Returns EAGAIN if it wants us to loop up to nfs_request_try() again. 1393 * Returns ENEEDAUTH if it wants us to loop up to nfs_request_auth() again. 1394 */ 1395 static int 1396 nfs_request_processreply(nfsm_info_t info, int error) 1397 { 1398 struct nfsreq *req = info->req; 1399 struct nfsmount *nmp = req->r_nmp; 1400 u_int32_t *tl; 1401 int verf_type; 1402 int i; 1403 1404 /* 1405 * If there was a successful reply and a tprintf msg. 1406 * tprintf a response. 1407 */ 1408 if (error == 0 && (req->r_flags & R_TPRINTFMSG)) { 1409 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname, 1410 "is alive again"); 1411 } 1412 info->mrep = req->r_mrep; 1413 info->md = req->r_md; 1414 info->dpos = req->r_dpos; 1415 if (error) { 1416 m_freem(req->r_mreq); 1417 req->r_mreq = NULL; 1418 kfree(req, M_NFSREQ); 1419 info->req = NULL; 1420 return (error); 1421 } 1422 1423 /* 1424 * break down the rpc header and check if ok 1425 */ 1426 NULLOUT(tl = nfsm_dissect(info, 3 * NFSX_UNSIGNED)); 1427 if (*tl++ == rpc_msgdenied) { 1428 if (*tl == rpc_mismatch) { 1429 error = EOPNOTSUPP; 1430 } else if ((nmp->nm_flag & NFSMNT_KERB) && 1431 *tl++ == rpc_autherr) { 1432 if (req->r_failed_auth == 0) { 1433 req->r_failed_auth++; 1434 req->r_mheadend->m_next = NULL; 1435 m_freem(info->mrep); 1436 info->mrep = NULL; 1437 m_freem(req->r_mreq); 1438 return (ENEEDAUTH); 1439 } else { 1440 error = EAUTH; 1441 } 1442 } else { 1443 error = EACCES; 1444 } 1445 m_freem(info->mrep); 1446 info->mrep = NULL; 1447 m_freem(req->r_mreq); 1448 req->r_mreq = NULL; 1449 kfree(req, M_NFSREQ); 1450 info->req = NULL; 1451 return (error); 1452 } 1453 1454 /* 1455 * Grab any Kerberos verifier, otherwise just throw it away. 1456 */ 1457 verf_type = fxdr_unsigned(int, *tl++); 1458 i = fxdr_unsigned(int32_t, *tl); 1459 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) { 1460 error = nfs_savenickauth(nmp, req->r_cred, i, req->r_key, 1461 &info->md, &info->dpos, info->mrep); 1462 if (error) 1463 goto nfsmout; 1464 } else if (i > 0) { 1465 ERROROUT(nfsm_adv(info, nfsm_rndup(i))); 1466 } 1467 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED)); 1468 /* 0 == ok */ 1469 if (*tl == 0) { 1470 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED)); 1471 if (*tl != 0) { 1472 error = fxdr_unsigned(int, *tl); 1473 1474 /* 1475 * Does anyone even implement this? Just impose 1476 * a 1-second delay. 1477 */ 1478 if ((nmp->nm_flag & NFSMNT_NFSV3) && 1479 error == NFSERR_TRYLATER) { 1480 m_freem(info->mrep); 1481 info->mrep = NULL; 1482 error = 0; 1483 1484 tsleep((caddr_t)&lbolt, 0, "nqnfstry", 0); 1485 return (EAGAIN); /* goto tryagain */ 1486 } 1487 1488 /* 1489 * If the File Handle was stale, invalidate the 1490 * lookup cache, just in case. 1491 * 1492 * To avoid namecache<->vnode deadlocks we must 1493 * release the vnode lock if we hold it. 1494 */ 1495 if (error == ESTALE) { 1496 struct vnode *vp = req->r_vp; 1497 int ltype; 1498 1499 ltype = lockstatus(&vp->v_lock, curthread); 1500 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED) 1501 lockmgr(&vp->v_lock, LK_RELEASE); 1502 cache_inval_vp(vp, CINV_CHILDREN); 1503 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED) 1504 lockmgr(&vp->v_lock, ltype); 1505 } 1506 if (nmp->nm_flag & NFSMNT_NFSV3) { 1507 KKASSERT(*req->r_mrp == info->mrep); 1508 KKASSERT(*req->r_mdp == info->md); 1509 KKASSERT(*req->r_dposp == info->dpos); 1510 error |= NFSERR_RETERR; 1511 } else { 1512 m_freem(info->mrep); 1513 info->mrep = NULL; 1514 } 1515 m_freem(req->r_mreq); 1516 req->r_mreq = NULL; 1517 kfree(req, M_NFSREQ); 1518 info->req = NULL; 1519 return (error); 1520 } 1521 1522 KKASSERT(*req->r_mrp == info->mrep); 1523 KKASSERT(*req->r_mdp == info->md); 1524 KKASSERT(*req->r_dposp == info->dpos); 1525 m_freem(req->r_mreq); 1526 req->r_mreq = NULL; 1527 FREE(req, M_NFSREQ); 1528 return (0); 1529 } 1530 m_freem(info->mrep); 1531 info->mrep = NULL; 1532 error = EPROTONOSUPPORT; 1533 nfsmout: 1534 m_freem(req->r_mreq); 1535 req->r_mreq = NULL; 1536 kfree(req, M_NFSREQ); 1537 info->req = NULL; 1538 return (error); 1539 } 1540 1541 #ifndef NFS_NOSERVER 1542 /* 1543 * Generate the rpc reply header 1544 * siz arg. is used to decide if adding a cluster is worthwhile 1545 */ 1546 int 1547 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp, 1548 int err, struct mbuf **mrq, struct mbuf **mbp, caddr_t *bposp) 1549 { 1550 u_int32_t *tl; 1551 struct nfsm_info info; 1552 1553 siz += RPC_REPLYSIZ; 1554 info.mb = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL); 1555 info.mreq = info.mb; 1556 info.mreq->m_pkthdr.len = 0; 1557 /* 1558 * If this is not a cluster, try and leave leading space 1559 * for the lower level headers. 1560 */ 1561 if ((max_hdr + siz) < MINCLSIZE) 1562 info.mreq->m_data += max_hdr; 1563 tl = mtod(info.mreq, u_int32_t *); 1564 info.mreq->m_len = 6 * NFSX_UNSIGNED; 1565 info.bpos = ((caddr_t)tl) + info.mreq->m_len; 1566 *tl++ = txdr_unsigned(nd->nd_retxid); 1567 *tl++ = rpc_reply; 1568 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) { 1569 *tl++ = rpc_msgdenied; 1570 if (err & NFSERR_AUTHERR) { 1571 *tl++ = rpc_autherr; 1572 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR); 1573 info.mreq->m_len -= NFSX_UNSIGNED; 1574 info.bpos -= NFSX_UNSIGNED; 1575 } else { 1576 *tl++ = rpc_mismatch; 1577 *tl++ = txdr_unsigned(RPC_VER2); 1578 *tl = txdr_unsigned(RPC_VER2); 1579 } 1580 } else { 1581 *tl++ = rpc_msgaccepted; 1582 1583 /* 1584 * For Kerberos authentication, we must send the nickname 1585 * verifier back, otherwise just RPCAUTH_NULL. 1586 */ 1587 if (nd->nd_flag & ND_KERBFULL) { 1588 struct nfsuid *nuidp; 1589 struct timeval ktvin, ktvout; 1590 1591 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first; 1592 nuidp != 0; nuidp = nuidp->nu_hash.le_next) { 1593 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid && 1594 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp), 1595 &nuidp->nu_haddr, nd->nd_nam2))) 1596 break; 1597 } 1598 if (nuidp) { 1599 ktvin.tv_sec = 1600 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1); 1601 ktvin.tv_usec = 1602 txdr_unsigned(nuidp->nu_timestamp.tv_usec); 1603 1604 /* 1605 * Encrypt the timestamp in ecb mode using the 1606 * session key. 1607 */ 1608 #ifdef NFSKERB 1609 XXX 1610 #endif 1611 1612 *tl++ = rpc_auth_kerb; 1613 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED); 1614 *tl = ktvout.tv_sec; 1615 tl = nfsm_build(&info, 3 * NFSX_UNSIGNED); 1616 *tl++ = ktvout.tv_usec; 1617 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid); 1618 } else { 1619 *tl++ = 0; 1620 *tl++ = 0; 1621 } 1622 } else { 1623 *tl++ = 0; 1624 *tl++ = 0; 1625 } 1626 switch (err) { 1627 case EPROGUNAVAIL: 1628 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 1629 break; 1630 case EPROGMISMATCH: 1631 *tl = txdr_unsigned(RPC_PROGMISMATCH); 1632 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED); 1633 *tl++ = txdr_unsigned(2); 1634 *tl = txdr_unsigned(3); 1635 break; 1636 case EPROCUNAVAIL: 1637 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 1638 break; 1639 case EBADRPC: 1640 *tl = txdr_unsigned(RPC_GARBAGE); 1641 break; 1642 default: 1643 *tl = 0; 1644 if (err != NFSERR_RETVOID) { 1645 tl = nfsm_build(&info, NFSX_UNSIGNED); 1646 if (err) 1647 *tl = txdr_unsigned(nfsrv_errmap(nd, err)); 1648 else 1649 *tl = 0; 1650 } 1651 break; 1652 }; 1653 } 1654 1655 if (mrq != NULL) 1656 *mrq = info.mreq; 1657 *mbp = info.mb; 1658 *bposp = info.bpos; 1659 if (err != 0 && err != NFSERR_RETVOID) 1660 nfsstats.srvrpc_errs++; 1661 return (0); 1662 } 1663 1664 1665 #endif /* NFS_NOSERVER */ 1666 1667 /* 1668 * Nfs timer routine. 1669 * 1670 * Scan the nfsreq list and retranmit any requests that have timed out 1671 * To avoid retransmission attempts on STREAM sockets (in the future) make 1672 * sure to set the r_retry field to 0 (implies nm_retry == 0). 1673 * 1674 * Requests with attached responses, terminated requests, and 1675 * locked requests are ignored. Locked requests will be picked up 1676 * in a later timer call. 1677 */ 1678 void 1679 nfs_timer(void *arg /* never used */) 1680 { 1681 struct nfsmount *nmp; 1682 struct nfsreq *req; 1683 #ifndef NFS_NOSERVER 1684 struct nfssvc_sock *slp; 1685 u_quad_t cur_usec; 1686 #endif /* NFS_NOSERVER */ 1687 1688 crit_enter(); 1689 TAILQ_FOREACH(nmp, &nfs_mountq, nm_entry) { 1690 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1691 KKASSERT(nmp == req->r_nmp); 1692 if (req->r_mrep) 1693 continue; 1694 if (req->r_flags & (R_SOFTTERM | R_LOCKED)) 1695 continue; 1696 req->r_flags |= R_LOCKED; 1697 if (nfs_sigintr(nmp, req, req->r_td)) { 1698 nfs_softterm(req, 1); 1699 } else { 1700 nfs_timer_req(req); 1701 } 1702 req->r_flags &= ~R_LOCKED; 1703 if (req->r_flags & R_WANTED) { 1704 req->r_flags &= ~R_WANTED; 1705 wakeup(req); 1706 } 1707 } 1708 } 1709 #ifndef NFS_NOSERVER 1710 1711 /* 1712 * Scan the write gathering queues for writes that need to be 1713 * completed now. 1714 */ 1715 cur_usec = nfs_curusec(); 1716 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) { 1717 if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec) 1718 nfsrv_wakenfsd(slp, 1); 1719 } 1720 #endif /* NFS_NOSERVER */ 1721 crit_exit(); 1722 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer, NULL); 1723 } 1724 1725 static 1726 void 1727 nfs_timer_req(struct nfsreq *req) 1728 { 1729 struct thread *td = &thread0; /* XXX for creds, will break if sleep */ 1730 struct nfsmount *nmp = req->r_nmp; 1731 struct mbuf *m; 1732 struct socket *so; 1733 int timeo; 1734 int error; 1735 1736 /* 1737 * rtt ticks and timeout calculation. Return if the timeout 1738 * has not been reached yet, unless the packet is flagged 1739 * for an immediate send. 1740 * 1741 * The mean rtt doesn't help when we get random I/Os, we have 1742 * to multiply by fairly large numbers. 1743 */ 1744 if (req->r_rtt >= 0) { 1745 /* 1746 * Calculate the timeout to test against. 1747 */ 1748 req->r_rtt++; 1749 if (nmp->nm_flag & NFSMNT_DUMBTIMR) { 1750 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS; 1751 } else if (req->r_flags & R_TIMING) { 1752 timeo = NFS_SRTT(req) + NFS_SDRTT(req); 1753 } else { 1754 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS; 1755 } 1756 timeo *= multt[req->r_procnum]; 1757 /* timeo is still scaled by SCALE_BITS */ 1758 1759 #define NFSFS (NFS_RTT_SCALE * NFS_HZ) 1760 if (req->r_flags & R_TIMING) { 1761 static long last_time; 1762 if (nfs_showrtt && last_time != time_second) { 1763 kprintf("rpccmd %d NFS SRTT %d SDRTT %d " 1764 "timeo %d.%03d\n", 1765 proct[req->r_procnum], 1766 NFS_SRTT(req), NFS_SDRTT(req), 1767 timeo / NFSFS, 1768 timeo % NFSFS * 1000 / NFSFS); 1769 last_time = time_second; 1770 } 1771 } 1772 #undef NFSFS 1773 1774 /* 1775 * deal with nfs_timer jitter. 1776 */ 1777 timeo = (timeo >> NFS_RTT_SCALE_BITS) + 1; 1778 if (timeo < 2) 1779 timeo = 2; 1780 1781 if (nmp->nm_timeouts > 0) 1782 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 1783 if (timeo > NFS_MAXTIMEO) 1784 timeo = NFS_MAXTIMEO; 1785 if (req->r_rtt <= timeo) { 1786 if ((req->r_flags & R_NEEDSXMIT) == 0) 1787 return; 1788 } else if (nmp->nm_timeouts < 8) { 1789 nmp->nm_timeouts++; 1790 } 1791 } 1792 1793 /* 1794 * Check for server not responding 1795 */ 1796 if ((req->r_flags & R_TPRINTFMSG) == 0 && 1797 req->r_rexmit > nmp->nm_deadthresh) { 1798 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname, 1799 "not responding"); 1800 req->r_flags |= R_TPRINTFMSG; 1801 } 1802 if (req->r_rexmit >= req->r_retry) { /* too many */ 1803 nfsstats.rpctimeouts++; 1804 nfs_softterm(req, 1); 1805 return; 1806 } 1807 1808 /* 1809 * Generally disable retransmission on reliable sockets, 1810 * unless the request is flagged for immediate send. 1811 */ 1812 if (nmp->nm_sotype != SOCK_DGRAM) { 1813 if (++req->r_rexmit > NFS_MAXREXMIT) 1814 req->r_rexmit = NFS_MAXREXMIT; 1815 if ((req->r_flags & R_NEEDSXMIT) == 0) 1816 return; 1817 } 1818 1819 /* 1820 * Stop here if we do not have a socket! 1821 */ 1822 if ((so = nmp->nm_so) == NULL) 1823 return; 1824 1825 /* 1826 * If there is enough space and the window allows.. resend it. 1827 * 1828 * r_rtt is left intact in case we get an answer after the 1829 * retry that was a reply to the original packet. 1830 */ 1831 if (ssb_space(&so->so_snd) >= req->r_mreq->m_pkthdr.len && 1832 (req->r_flags & (R_SENT | R_NEEDSXMIT)) && 1833 (m = m_copym(req->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){ 1834 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0) 1835 error = so_pru_send(so, 0, m, NULL, NULL, td); 1836 else 1837 error = so_pru_send(so, 0, m, nmp->nm_nam, 1838 NULL, td); 1839 if (error) { 1840 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) 1841 so->so_error = 0; 1842 req->r_flags |= R_NEEDSXMIT; 1843 } else if (req->r_mrep == NULL) { 1844 /* 1845 * Iff first send, start timing 1846 * else turn timing off, backoff timer 1847 * and divide congestion window by 2. 1848 * 1849 * It is possible for the so_pru_send() to 1850 * block and for us to race a reply so we 1851 * only do this if the reply field has not 1852 * been filled in. R_LOCKED will prevent 1853 * the request from being ripped out from under 1854 * us entirely. 1855 * 1856 * Record the last resent procnum to aid us 1857 * in duplicate detection on receive. 1858 */ 1859 if ((req->r_flags & R_NEEDSXMIT) == 0) { 1860 if (nfs_showrexmit) 1861 kprintf("X"); 1862 if (++req->r_rexmit > NFS_MAXREXMIT) 1863 req->r_rexmit = NFS_MAXREXMIT; 1864 nmp->nm_maxasync_scaled >>= 1; 1865 if (nmp->nm_maxasync_scaled < NFS_MINASYNC_SCALED) 1866 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED; 1867 nfsstats.rpcretries++; 1868 nmp->nm_lastreprocnum = req->r_procnum; 1869 } else { 1870 req->r_flags |= R_SENT; 1871 req->r_flags &= ~R_NEEDSXMIT; 1872 } 1873 } 1874 } 1875 } 1876 1877 /* 1878 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and 1879 * wait for all requests to complete. This is used by forced unmounts 1880 * to terminate any outstanding RPCs. 1881 * 1882 * Locked requests cannot be canceled but will be marked for 1883 * soft-termination. 1884 */ 1885 int 1886 nfs_nmcancelreqs(struct nfsmount *nmp) 1887 { 1888 struct nfsreq *req; 1889 int i; 1890 1891 crit_enter(); 1892 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1893 if (req->r_mrep != NULL || (req->r_flags & R_SOFTTERM)) 1894 continue; 1895 nfs_softterm(req, 0); 1896 } 1897 /* XXX the other two queues as well */ 1898 crit_exit(); 1899 1900 for (i = 0; i < 30; i++) { 1901 crit_enter(); 1902 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1903 if (nmp == req->r_nmp) 1904 break; 1905 } 1906 crit_exit(); 1907 if (req == NULL) 1908 return (0); 1909 tsleep(&lbolt, 0, "nfscancel", 0); 1910 } 1911 return (EBUSY); 1912 } 1913 1914 /* 1915 * Soft-terminate a request, effectively marking it as failed. 1916 * 1917 * Must be called from within a critical section. 1918 */ 1919 static void 1920 nfs_softterm(struct nfsreq *rep, int islocked) 1921 { 1922 rep->r_flags |= R_SOFTTERM; 1923 nfs_hardterm(rep, islocked); 1924 } 1925 1926 /* 1927 * Hard-terminate a request, typically after getting a response. 1928 * 1929 * The state machine can still decide to re-issue it later if necessary. 1930 * 1931 * Must be called from within a critical section. 1932 */ 1933 static void 1934 nfs_hardterm(struct nfsreq *rep, int islocked) 1935 { 1936 struct nfsmount *nmp = rep->r_nmp; 1937 1938 /* 1939 * The nm_send count is decremented now to avoid deadlocks 1940 * when the process in soreceive() hasn't yet managed to send 1941 * its own request. 1942 */ 1943 if (rep->r_flags & R_SENT) { 1944 rep->r_flags &= ~R_SENT; 1945 } 1946 1947 /* 1948 * If we locked the request or nobody else has locked the request, 1949 * and the request is async, we can move it to the reader thread's 1950 * queue now and fix up the state. 1951 * 1952 * If we locked the request or nobody else has locked the request, 1953 * we can wake up anyone blocked waiting for a response on the 1954 * request. 1955 */ 1956 if (islocked || (rep->r_flags & R_LOCKED) == 0) { 1957 if ((rep->r_flags & (R_ONREQQ | R_ASYNC)) == 1958 (R_ONREQQ | R_ASYNC)) { 1959 rep->r_flags &= ~R_ONREQQ; 1960 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain); 1961 --nmp->nm_reqqlen; 1962 TAILQ_INSERT_TAIL(&nmp->nm_reqrxq, rep, r_chain); 1963 KKASSERT(rep->r_info->state == NFSM_STATE_TRY || 1964 rep->r_info->state == NFSM_STATE_WAITREPLY); 1965 rep->r_info->state = NFSM_STATE_PROCESSREPLY; 1966 nfssvc_iod_reader_wakeup(nmp); 1967 if (TAILQ_FIRST(&nmp->nm_bioq) && 1968 nmp->nm_reqqlen == NFS_MAXASYNCBIO * 2 / 3) { 1969 nfssvc_iod_writer_wakeup(nmp); 1970 } 1971 } 1972 mtx_abort_ex_link(&nmp->nm_rxlock, &rep->r_link); 1973 } 1974 } 1975 1976 /* 1977 * Test for a termination condition pending on the process. 1978 * This is used for NFSMNT_INT mounts. 1979 */ 1980 int 1981 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td) 1982 { 1983 sigset_t tmpset; 1984 struct proc *p; 1985 struct lwp *lp; 1986 1987 if (rep && (rep->r_flags & R_SOFTTERM)) 1988 return (EINTR); 1989 /* Terminate all requests while attempting a forced unmount. */ 1990 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) 1991 return (EINTR); 1992 if (!(nmp->nm_flag & NFSMNT_INT)) 1993 return (0); 1994 /* td might be NULL YYY */ 1995 if (td == NULL || (p = td->td_proc) == NULL) 1996 return (0); 1997 1998 lp = td->td_lwp; 1999 tmpset = lwp_sigpend(lp); 2000 SIGSETNAND(tmpset, lp->lwp_sigmask); 2001 SIGSETNAND(tmpset, p->p_sigignore); 2002 if (SIGNOTEMPTY(tmpset) && NFSINT_SIGMASK(tmpset)) 2003 return (EINTR); 2004 2005 return (0); 2006 } 2007 2008 /* 2009 * Lock a socket against others. 2010 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply 2011 * and also to avoid race conditions between the processes with nfs requests 2012 * in progress when a reconnect is necessary. 2013 */ 2014 int 2015 nfs_sndlock(struct nfsmount *nmp, struct nfsreq *rep) 2016 { 2017 mtx_t mtx = &nmp->nm_txlock; 2018 struct thread *td; 2019 int slptimeo; 2020 int slpflag; 2021 int error; 2022 2023 slpflag = 0; 2024 slptimeo = 0; 2025 td = rep ? rep->r_td : NULL; 2026 if (nmp->nm_flag & NFSMNT_INT) 2027 slpflag = PCATCH; 2028 2029 while ((error = mtx_lock_ex_try(mtx)) != 0) { 2030 if (nfs_sigintr(nmp, rep, td)) { 2031 error = EINTR; 2032 break; 2033 } 2034 error = mtx_lock_ex(mtx, "nfsndlck", slpflag, slptimeo); 2035 if (error == 0) 2036 break; 2037 if (slpflag == PCATCH) { 2038 slpflag = 0; 2039 slptimeo = 2 * hz; 2040 } 2041 } 2042 /* Always fail if our request has been cancelled. */ 2043 if (rep && (rep->r_flags & R_SOFTTERM)) { 2044 if (error == 0) 2045 mtx_unlock(mtx); 2046 error = EINTR; 2047 } 2048 return (error); 2049 } 2050 2051 /* 2052 * Unlock the stream socket for others. 2053 */ 2054 void 2055 nfs_sndunlock(struct nfsmount *nmp) 2056 { 2057 mtx_unlock(&nmp->nm_txlock); 2058 } 2059 2060 /* 2061 * Lock the receiver side of the socket. 2062 * 2063 * rep may be NULL. 2064 */ 2065 static int 2066 nfs_rcvlock(struct nfsmount *nmp, struct nfsreq *rep) 2067 { 2068 mtx_t mtx = &nmp->nm_rxlock; 2069 int slpflag; 2070 int slptimeo; 2071 int error; 2072 2073 /* 2074 * Unconditionally check for completion in case another nfsiod 2075 * get the packet while the caller was blocked, before the caller 2076 * called us. Packet reception is handled by mainline code which 2077 * is protected by the BGL at the moment. 2078 * 2079 * We do not strictly need the second check just before the 2080 * tsleep(), but it's good defensive programming. 2081 */ 2082 if (rep && rep->r_mrep != NULL) 2083 return (EALREADY); 2084 2085 if (nmp->nm_flag & NFSMNT_INT) 2086 slpflag = PCATCH; 2087 else 2088 slpflag = 0; 2089 slptimeo = 0; 2090 2091 while ((error = mtx_lock_ex_try(mtx)) != 0) { 2092 if (nfs_sigintr(nmp, rep, (rep ? rep->r_td : NULL))) { 2093 error = EINTR; 2094 break; 2095 } 2096 if (rep && rep->r_mrep != NULL) { 2097 error = EALREADY; 2098 break; 2099 } 2100 2101 /* 2102 * NOTE: can return ENOLCK, but in that case rep->r_mrep 2103 * will already be set. 2104 */ 2105 if (rep) { 2106 error = mtx_lock_ex_link(mtx, &rep->r_link, 2107 "nfsrcvlk", 2108 slpflag, slptimeo); 2109 } else { 2110 error = mtx_lock_ex(mtx, "nfsrcvlk", slpflag, slptimeo); 2111 } 2112 if (error == 0) 2113 break; 2114 2115 /* 2116 * If our reply was recieved while we were sleeping, 2117 * then just return without taking the lock to avoid a 2118 * situation where a single iod could 'capture' the 2119 * recieve lock. 2120 */ 2121 if (rep && rep->r_mrep != NULL) { 2122 error = EALREADY; 2123 break; 2124 } 2125 if (slpflag == PCATCH) { 2126 slpflag = 0; 2127 slptimeo = 2 * hz; 2128 } 2129 } 2130 if (error == 0) { 2131 if (rep && rep->r_mrep != NULL) { 2132 error = EALREADY; 2133 mtx_unlock(mtx); 2134 } 2135 } 2136 return (error); 2137 } 2138 2139 /* 2140 * Unlock the stream socket for others. 2141 */ 2142 static void 2143 nfs_rcvunlock(struct nfsmount *nmp) 2144 { 2145 mtx_unlock(&nmp->nm_rxlock); 2146 } 2147 2148 /* 2149 * nfs_realign: 2150 * 2151 * Check for badly aligned mbuf data and realign by copying the unaligned 2152 * portion of the data into a new mbuf chain and freeing the portions 2153 * of the old chain that were replaced. 2154 * 2155 * We cannot simply realign the data within the existing mbuf chain 2156 * because the underlying buffers may contain other rpc commands and 2157 * we cannot afford to overwrite them. 2158 * 2159 * We would prefer to avoid this situation entirely. The situation does 2160 * not occur with NFS/UDP and is supposed to only occassionally occur 2161 * with TCP. Use vfs.nfs.realign_count and realign_test to check this. 2162 */ 2163 static void 2164 nfs_realign(struct mbuf **pm, int hsiz) 2165 { 2166 struct mbuf *m; 2167 struct mbuf *n = NULL; 2168 int off = 0; 2169 2170 ++nfs_realign_test; 2171 2172 while ((m = *pm) != NULL) { 2173 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) { 2174 n = m_getl(m->m_len, MB_WAIT, MT_DATA, 0, NULL); 2175 n->m_len = 0; 2176 break; 2177 } 2178 pm = &m->m_next; 2179 } 2180 2181 /* 2182 * If n is non-NULL, loop on m copying data, then replace the 2183 * portion of the chain that had to be realigned. 2184 */ 2185 if (n != NULL) { 2186 ++nfs_realign_count; 2187 while (m) { 2188 m_copyback(n, off, m->m_len, mtod(m, caddr_t)); 2189 off += m->m_len; 2190 m = m->m_next; 2191 } 2192 m_freem(*pm); 2193 *pm = n; 2194 } 2195 } 2196 2197 #ifndef NFS_NOSERVER 2198 2199 /* 2200 * Parse an RPC request 2201 * - verify it 2202 * - fill in the cred struct. 2203 */ 2204 int 2205 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header) 2206 { 2207 int len, i; 2208 u_int32_t *tl; 2209 struct uio uio; 2210 struct iovec iov; 2211 caddr_t cp; 2212 u_int32_t nfsvers, auth_type; 2213 uid_t nickuid; 2214 int error = 0, ticklen; 2215 struct nfsuid *nuidp; 2216 struct timeval tvin, tvout; 2217 struct nfsm_info info; 2218 #if 0 /* until encrypted keys are implemented */ 2219 NFSKERBKEYSCHED_T keys; /* stores key schedule */ 2220 #endif 2221 2222 info.mrep = nd->nd_mrep; 2223 info.md = nd->nd_md; 2224 info.dpos = nd->nd_dpos; 2225 2226 if (has_header) { 2227 NULLOUT(tl = nfsm_dissect(&info, 10 * NFSX_UNSIGNED)); 2228 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++); 2229 if (*tl++ != rpc_call) { 2230 m_freem(info.mrep); 2231 return (EBADRPC); 2232 } 2233 } else { 2234 NULLOUT(tl = nfsm_dissect(&info, 8 * NFSX_UNSIGNED)); 2235 } 2236 nd->nd_repstat = 0; 2237 nd->nd_flag = 0; 2238 if (*tl++ != rpc_vers) { 2239 nd->nd_repstat = ERPCMISMATCH; 2240 nd->nd_procnum = NFSPROC_NOOP; 2241 return (0); 2242 } 2243 if (*tl != nfs_prog) { 2244 nd->nd_repstat = EPROGUNAVAIL; 2245 nd->nd_procnum = NFSPROC_NOOP; 2246 return (0); 2247 } 2248 tl++; 2249 nfsvers = fxdr_unsigned(u_int32_t, *tl++); 2250 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) { 2251 nd->nd_repstat = EPROGMISMATCH; 2252 nd->nd_procnum = NFSPROC_NOOP; 2253 return (0); 2254 } 2255 if (nfsvers == NFS_VER3) 2256 nd->nd_flag = ND_NFSV3; 2257 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++); 2258 if (nd->nd_procnum == NFSPROC_NULL) 2259 return (0); 2260 if (nd->nd_procnum >= NFS_NPROCS || 2261 (nd->nd_procnum >= NQNFSPROC_GETLEASE) || 2262 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) { 2263 nd->nd_repstat = EPROCUNAVAIL; 2264 nd->nd_procnum = NFSPROC_NOOP; 2265 return (0); 2266 } 2267 if ((nd->nd_flag & ND_NFSV3) == 0) 2268 nd->nd_procnum = nfsv3_procid[nd->nd_procnum]; 2269 auth_type = *tl++; 2270 len = fxdr_unsigned(int, *tl++); 2271 if (len < 0 || len > RPCAUTH_MAXSIZ) { 2272 m_freem(info.mrep); 2273 return (EBADRPC); 2274 } 2275 2276 nd->nd_flag &= ~ND_KERBAUTH; 2277 /* 2278 * Handle auth_unix or auth_kerb. 2279 */ 2280 if (auth_type == rpc_auth_unix) { 2281 len = fxdr_unsigned(int, *++tl); 2282 if (len < 0 || len > NFS_MAXNAMLEN) { 2283 m_freem(info.mrep); 2284 return (EBADRPC); 2285 } 2286 ERROROUT(nfsm_adv(&info, nfsm_rndup(len))); 2287 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED)); 2288 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred)); 2289 nd->nd_cr.cr_ref = 1; 2290 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 2291 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++); 2292 len = fxdr_unsigned(int, *tl); 2293 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 2294 m_freem(info.mrep); 2295 return (EBADRPC); 2296 } 2297 NULLOUT(tl = nfsm_dissect(&info, (len + 2) * NFSX_UNSIGNED)); 2298 for (i = 1; i <= len; i++) 2299 if (i < NGROUPS) 2300 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++); 2301 else 2302 tl++; 2303 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); 2304 if (nd->nd_cr.cr_ngroups > 1) 2305 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups); 2306 len = fxdr_unsigned(int, *++tl); 2307 if (len < 0 || len > RPCAUTH_MAXSIZ) { 2308 m_freem(info.mrep); 2309 return (EBADRPC); 2310 } 2311 if (len > 0) { 2312 ERROROUT(nfsm_adv(&info, nfsm_rndup(len))); 2313 } 2314 } else if (auth_type == rpc_auth_kerb) { 2315 switch (fxdr_unsigned(int, *tl++)) { 2316 case RPCAKN_FULLNAME: 2317 ticklen = fxdr_unsigned(int, *tl); 2318 *((u_int32_t *)nfsd->nfsd_authstr) = *tl; 2319 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED; 2320 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED; 2321 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) { 2322 m_freem(info.mrep); 2323 return (EBADRPC); 2324 } 2325 uio.uio_offset = 0; 2326 uio.uio_iov = &iov; 2327 uio.uio_iovcnt = 1; 2328 uio.uio_segflg = UIO_SYSSPACE; 2329 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4]; 2330 iov.iov_len = RPCAUTH_MAXSIZ - 4; 2331 ERROROUT(nfsm_mtouio(&info, &uio, uio.uio_resid)); 2332 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED)); 2333 if (*tl++ != rpc_auth_kerb || 2334 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) { 2335 kprintf("Bad kerb verifier\n"); 2336 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2337 nd->nd_procnum = NFSPROC_NOOP; 2338 return (0); 2339 } 2340 NULLOUT(cp = nfsm_dissect(&info, 4 * NFSX_UNSIGNED)); 2341 tl = (u_int32_t *)cp; 2342 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) { 2343 kprintf("Not fullname kerb verifier\n"); 2344 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2345 nd->nd_procnum = NFSPROC_NOOP; 2346 return (0); 2347 } 2348 cp += NFSX_UNSIGNED; 2349 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED); 2350 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED; 2351 nd->nd_flag |= ND_KERBFULL; 2352 nfsd->nfsd_flag |= NFSD_NEEDAUTH; 2353 break; 2354 case RPCAKN_NICKNAME: 2355 if (len != 2 * NFSX_UNSIGNED) { 2356 kprintf("Kerb nickname short\n"); 2357 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED); 2358 nd->nd_procnum = NFSPROC_NOOP; 2359 return (0); 2360 } 2361 nickuid = fxdr_unsigned(uid_t, *tl); 2362 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED)); 2363 if (*tl++ != rpc_auth_kerb || 2364 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) { 2365 kprintf("Kerb nick verifier bad\n"); 2366 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2367 nd->nd_procnum = NFSPROC_NOOP; 2368 return (0); 2369 } 2370 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED)); 2371 tvin.tv_sec = *tl++; 2372 tvin.tv_usec = *tl; 2373 2374 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first; 2375 nuidp != 0; nuidp = nuidp->nu_hash.le_next) { 2376 if (nuidp->nu_cr.cr_uid == nickuid && 2377 (!nd->nd_nam2 || 2378 netaddr_match(NU_NETFAM(nuidp), 2379 &nuidp->nu_haddr, nd->nd_nam2))) 2380 break; 2381 } 2382 if (!nuidp) { 2383 nd->nd_repstat = 2384 (NFSERR_AUTHERR|AUTH_REJECTCRED); 2385 nd->nd_procnum = NFSPROC_NOOP; 2386 return (0); 2387 } 2388 2389 /* 2390 * Now, decrypt the timestamp using the session key 2391 * and validate it. 2392 */ 2393 #ifdef NFSKERB 2394 XXX 2395 #endif 2396 2397 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec); 2398 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec); 2399 if (nuidp->nu_expire < time_second || 2400 nuidp->nu_timestamp.tv_sec > tvout.tv_sec || 2401 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec && 2402 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) { 2403 nuidp->nu_expire = 0; 2404 nd->nd_repstat = 2405 (NFSERR_AUTHERR|AUTH_REJECTVERF); 2406 nd->nd_procnum = NFSPROC_NOOP; 2407 return (0); 2408 } 2409 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr); 2410 nd->nd_flag |= ND_KERBNICK; 2411 }; 2412 } else { 2413 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED); 2414 nd->nd_procnum = NFSPROC_NOOP; 2415 return (0); 2416 } 2417 2418 nd->nd_md = info.md; 2419 nd->nd_dpos = info.dpos; 2420 return (0); 2421 nfsmout: 2422 return (error); 2423 } 2424 2425 #endif 2426 2427 /* 2428 * Send a message to the originating process's terminal. The thread and/or 2429 * process may be NULL. YYY the thread should not be NULL but there may 2430 * still be some uio_td's that are still being passed as NULL through to 2431 * nfsm_request(). 2432 */ 2433 static int 2434 nfs_msg(struct thread *td, char *server, char *msg) 2435 { 2436 tpr_t tpr; 2437 2438 if (td && td->td_proc) 2439 tpr = tprintf_open(td->td_proc); 2440 else 2441 tpr = NULL; 2442 tprintf(tpr, "nfs server %s: %s\n", server, msg); 2443 tprintf_close(tpr); 2444 return (0); 2445 } 2446 2447 #ifndef NFS_NOSERVER 2448 /* 2449 * Socket upcall routine for the nfsd sockets. 2450 * The caddr_t arg is a pointer to the "struct nfssvc_sock". 2451 * Essentially do as much as possible non-blocking, else punt and it will 2452 * be called with MB_WAIT from an nfsd. 2453 */ 2454 void 2455 nfsrv_rcv(struct socket *so, void *arg, int waitflag) 2456 { 2457 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 2458 struct mbuf *m; 2459 struct sockaddr *nam; 2460 struct sockbuf sio; 2461 int flags, error; 2462 int nparallel_wakeup = 0; 2463 2464 if ((slp->ns_flag & SLP_VALID) == 0) 2465 return; 2466 2467 /* 2468 * Do not allow an infinite number of completed RPC records to build 2469 * up before we stop reading data from the socket. Otherwise we could 2470 * end up holding onto an unreasonable number of mbufs for requests 2471 * waiting for service. 2472 * 2473 * This should give pretty good feedback to the TCP 2474 * layer and prevents a memory crunch for other protocols. 2475 * 2476 * Note that the same service socket can be dispatched to several 2477 * nfs servers simultaniously. 2478 * 2479 * the tcp protocol callback calls us with MB_DONTWAIT. 2480 * nfsd calls us with MB_WAIT (typically). 2481 */ 2482 if (waitflag == MB_DONTWAIT && slp->ns_numrec >= nfsd_waiting / 2 + 1) { 2483 slp->ns_flag |= SLP_NEEDQ; 2484 goto dorecs; 2485 } 2486 2487 /* 2488 * Handle protocol specifics to parse an RPC request. We always 2489 * pull from the socket using non-blocking I/O. 2490 */ 2491 if (so->so_type == SOCK_STREAM) { 2492 /* 2493 * The data has to be read in an orderly fashion from a TCP 2494 * stream, unlike a UDP socket. It is possible for soreceive 2495 * and/or nfsrv_getstream() to block, so make sure only one 2496 * entity is messing around with the TCP stream at any given 2497 * moment. The receive sockbuf's lock in soreceive is not 2498 * sufficient. 2499 * 2500 * Note that this procedure can be called from any number of 2501 * NFS severs *OR* can be upcalled directly from a TCP 2502 * protocol thread. 2503 */ 2504 if (slp->ns_flag & SLP_GETSTREAM) { 2505 slp->ns_flag |= SLP_NEEDQ; 2506 goto dorecs; 2507 } 2508 slp->ns_flag |= SLP_GETSTREAM; 2509 2510 /* 2511 * Do soreceive(). Pull out as much data as possible without 2512 * blocking. 2513 */ 2514 sbinit(&sio, 1000000000); 2515 flags = MSG_DONTWAIT; 2516 error = so_pru_soreceive(so, &nam, NULL, &sio, NULL, &flags); 2517 if (error || sio.sb_mb == NULL) { 2518 if (error == EWOULDBLOCK) 2519 slp->ns_flag |= SLP_NEEDQ; 2520 else 2521 slp->ns_flag |= SLP_DISCONN; 2522 slp->ns_flag &= ~SLP_GETSTREAM; 2523 goto dorecs; 2524 } 2525 m = sio.sb_mb; 2526 if (slp->ns_rawend) { 2527 slp->ns_rawend->m_next = m; 2528 slp->ns_cc += sio.sb_cc; 2529 } else { 2530 slp->ns_raw = m; 2531 slp->ns_cc = sio.sb_cc; 2532 } 2533 while (m->m_next) 2534 m = m->m_next; 2535 slp->ns_rawend = m; 2536 2537 /* 2538 * Now try and parse as many record(s) as we can out of the 2539 * raw stream data. 2540 */ 2541 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup); 2542 if (error) { 2543 if (error == EPERM) 2544 slp->ns_flag |= SLP_DISCONN; 2545 else 2546 slp->ns_flag |= SLP_NEEDQ; 2547 } 2548 slp->ns_flag &= ~SLP_GETSTREAM; 2549 } else { 2550 /* 2551 * For UDP soreceive typically pulls just one packet, loop 2552 * to get the whole batch. 2553 */ 2554 do { 2555 sbinit(&sio, 1000000000); 2556 flags = MSG_DONTWAIT; 2557 error = so_pru_soreceive(so, &nam, NULL, &sio, 2558 NULL, &flags); 2559 if (sio.sb_mb) { 2560 struct nfsrv_rec *rec; 2561 int mf = (waitflag & MB_DONTWAIT) ? 2562 M_NOWAIT : M_WAITOK; 2563 rec = kmalloc(sizeof(struct nfsrv_rec), 2564 M_NFSRVDESC, mf); 2565 if (!rec) { 2566 if (nam) 2567 FREE(nam, M_SONAME); 2568 m_freem(sio.sb_mb); 2569 continue; 2570 } 2571 nfs_realign(&sio.sb_mb, 10 * NFSX_UNSIGNED); 2572 rec->nr_address = nam; 2573 rec->nr_packet = sio.sb_mb; 2574 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2575 ++slp->ns_numrec; 2576 ++nparallel_wakeup; 2577 } 2578 if (error) { 2579 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) 2580 && error != EWOULDBLOCK) { 2581 slp->ns_flag |= SLP_DISCONN; 2582 goto dorecs; 2583 } 2584 } 2585 } while (sio.sb_mb); 2586 } 2587 2588 /* 2589 * If we were upcalled from the tcp protocol layer and we have 2590 * fully parsed records ready to go, or there is new data pending, 2591 * or something went wrong, try to wake up an nfsd thread to deal 2592 * with it. 2593 */ 2594 dorecs: 2595 if (waitflag == MB_DONTWAIT && (slp->ns_numrec > 0 2596 || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) { 2597 nfsrv_wakenfsd(slp, nparallel_wakeup); 2598 } 2599 } 2600 2601 /* 2602 * Try and extract an RPC request from the mbuf data list received on a 2603 * stream socket. The "waitflag" argument indicates whether or not it 2604 * can sleep. 2605 */ 2606 static int 2607 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp) 2608 { 2609 struct mbuf *m, **mpp; 2610 char *cp1, *cp2; 2611 int len; 2612 struct mbuf *om, *m2, *recm; 2613 u_int32_t recmark; 2614 2615 for (;;) { 2616 if (slp->ns_reclen == 0) { 2617 if (slp->ns_cc < NFSX_UNSIGNED) 2618 return (0); 2619 m = slp->ns_raw; 2620 if (m->m_len >= NFSX_UNSIGNED) { 2621 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED); 2622 m->m_data += NFSX_UNSIGNED; 2623 m->m_len -= NFSX_UNSIGNED; 2624 } else { 2625 cp1 = (caddr_t)&recmark; 2626 cp2 = mtod(m, caddr_t); 2627 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { 2628 while (m->m_len == 0) { 2629 m = m->m_next; 2630 cp2 = mtod(m, caddr_t); 2631 } 2632 *cp1++ = *cp2++; 2633 m->m_data++; 2634 m->m_len--; 2635 } 2636 } 2637 slp->ns_cc -= NFSX_UNSIGNED; 2638 recmark = ntohl(recmark); 2639 slp->ns_reclen = recmark & ~0x80000000; 2640 if (recmark & 0x80000000) 2641 slp->ns_flag |= SLP_LASTFRAG; 2642 else 2643 slp->ns_flag &= ~SLP_LASTFRAG; 2644 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) { 2645 log(LOG_ERR, "%s (%d) from nfs client\n", 2646 "impossible packet length", 2647 slp->ns_reclen); 2648 return (EPERM); 2649 } 2650 } 2651 2652 /* 2653 * Now get the record part. 2654 * 2655 * Note that slp->ns_reclen may be 0. Linux sometimes 2656 * generates 0-length RPCs 2657 */ 2658 recm = NULL; 2659 if (slp->ns_cc == slp->ns_reclen) { 2660 recm = slp->ns_raw; 2661 slp->ns_raw = slp->ns_rawend = NULL; 2662 slp->ns_cc = slp->ns_reclen = 0; 2663 } else if (slp->ns_cc > slp->ns_reclen) { 2664 len = 0; 2665 m = slp->ns_raw; 2666 om = NULL; 2667 2668 while (len < slp->ns_reclen) { 2669 if ((len + m->m_len) > slp->ns_reclen) { 2670 m2 = m_copym(m, 0, slp->ns_reclen - len, 2671 waitflag); 2672 if (m2) { 2673 if (om) { 2674 om->m_next = m2; 2675 recm = slp->ns_raw; 2676 } else 2677 recm = m2; 2678 m->m_data += slp->ns_reclen - len; 2679 m->m_len -= slp->ns_reclen - len; 2680 len = slp->ns_reclen; 2681 } else { 2682 return (EWOULDBLOCK); 2683 } 2684 } else if ((len + m->m_len) == slp->ns_reclen) { 2685 om = m; 2686 len += m->m_len; 2687 m = m->m_next; 2688 recm = slp->ns_raw; 2689 om->m_next = NULL; 2690 } else { 2691 om = m; 2692 len += m->m_len; 2693 m = m->m_next; 2694 } 2695 } 2696 slp->ns_raw = m; 2697 slp->ns_cc -= len; 2698 slp->ns_reclen = 0; 2699 } else { 2700 return (0); 2701 } 2702 2703 /* 2704 * Accumulate the fragments into a record. 2705 */ 2706 mpp = &slp->ns_frag; 2707 while (*mpp) 2708 mpp = &((*mpp)->m_next); 2709 *mpp = recm; 2710 if (slp->ns_flag & SLP_LASTFRAG) { 2711 struct nfsrv_rec *rec; 2712 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK; 2713 rec = kmalloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf); 2714 if (!rec) { 2715 m_freem(slp->ns_frag); 2716 } else { 2717 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED); 2718 rec->nr_address = NULL; 2719 rec->nr_packet = slp->ns_frag; 2720 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2721 ++slp->ns_numrec; 2722 ++*countp; 2723 } 2724 slp->ns_frag = NULL; 2725 } 2726 } 2727 } 2728 2729 /* 2730 * Parse an RPC header. 2731 */ 2732 int 2733 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd, 2734 struct nfsrv_descript **ndp) 2735 { 2736 struct nfsrv_rec *rec; 2737 struct mbuf *m; 2738 struct sockaddr *nam; 2739 struct nfsrv_descript *nd; 2740 int error; 2741 2742 *ndp = NULL; 2743 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec)) 2744 return (ENOBUFS); 2745 rec = STAILQ_FIRST(&slp->ns_rec); 2746 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link); 2747 KKASSERT(slp->ns_numrec > 0); 2748 --slp->ns_numrec; 2749 nam = rec->nr_address; 2750 m = rec->nr_packet; 2751 kfree(rec, M_NFSRVDESC); 2752 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript), 2753 M_NFSRVDESC, M_WAITOK); 2754 nd->nd_md = nd->nd_mrep = m; 2755 nd->nd_nam2 = nam; 2756 nd->nd_dpos = mtod(m, caddr_t); 2757 error = nfs_getreq(nd, nfsd, TRUE); 2758 if (error) { 2759 if (nam) { 2760 FREE(nam, M_SONAME); 2761 } 2762 kfree((caddr_t)nd, M_NFSRVDESC); 2763 return (error); 2764 } 2765 *ndp = nd; 2766 nfsd->nfsd_nd = nd; 2767 return (0); 2768 } 2769 2770 /* 2771 * Try to assign service sockets to nfsd threads based on the number 2772 * of new rpc requests that have been queued on the service socket. 2773 * 2774 * If no nfsd's are available or additonal requests are pending, set the 2775 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for 2776 * the work in the nfssvc_sock list when it is finished processing its 2777 * current work. This flag is only cleared when an nfsd can not find 2778 * any new work to perform. 2779 */ 2780 void 2781 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel) 2782 { 2783 struct nfsd *nd; 2784 2785 if ((slp->ns_flag & SLP_VALID) == 0) 2786 return; 2787 if (nparallel <= 1) 2788 nparallel = 1; 2789 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) { 2790 if (nd->nfsd_flag & NFSD_WAITING) { 2791 nd->nfsd_flag &= ~NFSD_WAITING; 2792 if (nd->nfsd_slp) 2793 panic("nfsd wakeup"); 2794 slp->ns_sref++; 2795 nd->nfsd_slp = slp; 2796 wakeup((caddr_t)nd); 2797 if (--nparallel == 0) 2798 break; 2799 } 2800 } 2801 if (nparallel) { 2802 slp->ns_flag |= SLP_DOREC; 2803 nfsd_head_flag |= NFSD_CHECKSLP; 2804 } 2805 } 2806 #endif /* NFS_NOSERVER */ 2807