1 /* 2 * Copyright (c) 1989, 1991, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Rick Macklem at The University of Guelph. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95 37 * $FreeBSD: src/sys/nfs/nfs_socket.c,v 1.60.2.6 2003/03/26 01:44:46 alfred Exp $ 38 * $DragonFly: src/sys/vfs/nfs/nfs_socket.c,v 1.45 2007/05/18 17:05:13 dillon Exp $ 39 */ 40 41 /* 42 * Socket operations for use by nfs 43 */ 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/proc.h> 48 #include <sys/malloc.h> 49 #include <sys/mount.h> 50 #include <sys/kernel.h> 51 #include <sys/mbuf.h> 52 #include <sys/vnode.h> 53 #include <sys/fcntl.h> 54 #include <sys/protosw.h> 55 #include <sys/resourcevar.h> 56 #include <sys/socket.h> 57 #include <sys/socketvar.h> 58 #include <sys/socketops.h> 59 #include <sys/syslog.h> 60 #include <sys/thread.h> 61 #include <sys/tprintf.h> 62 #include <sys/sysctl.h> 63 #include <sys/signalvar.h> 64 #include <sys/mutex.h> 65 66 #include <sys/signal2.h> 67 #include <sys/mutex2.h> 68 69 #include <netinet/in.h> 70 #include <netinet/tcp.h> 71 #include <sys/thread2.h> 72 73 #include "rpcv2.h" 74 #include "nfsproto.h" 75 #include "nfs.h" 76 #include "xdr_subs.h" 77 #include "nfsm_subs.h" 78 #include "nfsmount.h" 79 #include "nfsnode.h" 80 #include "nfsrtt.h" 81 82 #define TRUE 1 83 #define FALSE 0 84 85 /* 86 * RTT calculations are scaled by 256 (8 bits). A proper fractional 87 * RTT will still be calculated even with a slow NFS timer. 88 */ 89 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum]] 90 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum]] 91 #define NFS_RTT_SCALE_BITS 8 /* bits */ 92 #define NFS_RTT_SCALE 256 /* value */ 93 94 /* 95 * Defines which timer to use for the procnum. 96 * 0 - default 97 * 1 - getattr 98 * 2 - lookup 99 * 3 - read 100 * 4 - write 101 */ 102 static int proct[NFS_NPROCS] = { 103 0, 1, 0, 2, 1, 3, 3, 4, 0, 0, /* 00-09 */ 104 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, /* 10-19 */ 105 0, 5, 0, 0, 0, 0, /* 20-29 */ 106 }; 107 108 static int multt[NFS_NPROCS] = { 109 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 00-09 */ 110 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 10-19 */ 111 1, 2, 1, 1, 1, 1, /* 20-29 */ 112 }; 113 114 static int nfs_backoff[8] = { 2, 3, 5, 8, 13, 21, 34, 55 }; 115 static int nfs_realign_test; 116 static int nfs_realign_count; 117 static int nfs_showrtt; 118 static int nfs_showrexmit; 119 int nfs_maxasyncbio = NFS_MAXASYNCBIO; 120 121 SYSCTL_DECL(_vfs_nfs); 122 123 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, ""); 124 SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, ""); 125 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrtt, CTLFLAG_RW, &nfs_showrtt, 0, ""); 126 SYSCTL_INT(_vfs_nfs, OID_AUTO, showrexmit, CTLFLAG_RW, &nfs_showrexmit, 0, ""); 127 SYSCTL_INT(_vfs_nfs, OID_AUTO, maxasyncbio, CTLFLAG_RW, &nfs_maxasyncbio, 0, ""); 128 129 static int nfs_request_setup(nfsm_info_t info); 130 static int nfs_request_auth(struct nfsreq *rep); 131 static int nfs_request_try(struct nfsreq *rep); 132 static int nfs_request_waitreply(struct nfsreq *rep); 133 static int nfs_request_processreply(nfsm_info_t info, int); 134 135 int nfsrtton = 0; 136 struct nfsrtt nfsrtt; 137 struct callout nfs_timer_handle; 138 139 static int nfs_msg (struct thread *,char *,char *); 140 static int nfs_rcvlock (struct nfsmount *nmp, struct nfsreq *myreq); 141 static void nfs_rcvunlock (struct nfsmount *nmp); 142 static void nfs_realign (struct mbuf **pm, int hsiz); 143 static int nfs_receive (struct nfsmount *nmp, struct nfsreq *rep, 144 struct sockaddr **aname, struct mbuf **mp); 145 static void nfs_softterm (struct nfsreq *rep, int islocked); 146 static void nfs_hardterm (struct nfsreq *rep, int islocked); 147 static int nfs_reconnect (struct nfsmount *nmp, struct nfsreq *rep); 148 #ifndef NFS_NOSERVER 149 static int nfsrv_getstream (struct nfssvc_sock *, int, int *); 150 static void nfs_timer_req(struct nfsreq *req); 151 152 int (*nfsrv3_procs[NFS_NPROCS]) (struct nfsrv_descript *nd, 153 struct nfssvc_sock *slp, 154 struct thread *td, 155 struct mbuf **mreqp) = { 156 nfsrv_null, 157 nfsrv_getattr, 158 nfsrv_setattr, 159 nfsrv_lookup, 160 nfsrv3_access, 161 nfsrv_readlink, 162 nfsrv_read, 163 nfsrv_write, 164 nfsrv_create, 165 nfsrv_mkdir, 166 nfsrv_symlink, 167 nfsrv_mknod, 168 nfsrv_remove, 169 nfsrv_rmdir, 170 nfsrv_rename, 171 nfsrv_link, 172 nfsrv_readdir, 173 nfsrv_readdirplus, 174 nfsrv_statfs, 175 nfsrv_fsinfo, 176 nfsrv_pathconf, 177 nfsrv_commit, 178 nfsrv_noop, 179 nfsrv_noop, 180 nfsrv_noop, 181 nfsrv_noop 182 }; 183 #endif /* NFS_NOSERVER */ 184 185 /* 186 * Initialize sockets and congestion for a new NFS connection. 187 * We do not free the sockaddr if error. 188 */ 189 int 190 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep) 191 { 192 struct socket *so; 193 int error; 194 struct sockaddr *saddr; 195 struct sockaddr_in *sin; 196 struct thread *td = &thread0; /* only used for socreate and sobind */ 197 198 nmp->nm_so = NULL; 199 saddr = nmp->nm_nam; 200 error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype, 201 nmp->nm_soproto, td); 202 if (error) 203 goto bad; 204 so = nmp->nm_so; 205 nmp->nm_soflags = so->so_proto->pr_flags; 206 207 /* 208 * Some servers require that the client port be a reserved port number. 209 */ 210 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 211 struct sockopt sopt; 212 int ip; 213 struct sockaddr_in ssin; 214 215 bzero(&sopt, sizeof sopt); 216 ip = IP_PORTRANGE_LOW; 217 sopt.sopt_level = IPPROTO_IP; 218 sopt.sopt_name = IP_PORTRANGE; 219 sopt.sopt_val = (void *)&ip; 220 sopt.sopt_valsize = sizeof(ip); 221 sopt.sopt_td = NULL; 222 error = sosetopt(so, &sopt); 223 if (error) 224 goto bad; 225 bzero(&ssin, sizeof ssin); 226 sin = &ssin; 227 sin->sin_len = sizeof (struct sockaddr_in); 228 sin->sin_family = AF_INET; 229 sin->sin_addr.s_addr = INADDR_ANY; 230 sin->sin_port = htons(0); 231 error = sobind(so, (struct sockaddr *)sin, td); 232 if (error) 233 goto bad; 234 bzero(&sopt, sizeof sopt); 235 ip = IP_PORTRANGE_DEFAULT; 236 sopt.sopt_level = IPPROTO_IP; 237 sopt.sopt_name = IP_PORTRANGE; 238 sopt.sopt_val = (void *)&ip; 239 sopt.sopt_valsize = sizeof(ip); 240 sopt.sopt_td = NULL; 241 error = sosetopt(so, &sopt); 242 if (error) 243 goto bad; 244 } 245 246 /* 247 * Protocols that do not require connections may be optionally left 248 * unconnected for servers that reply from a port other than NFS_PORT. 249 */ 250 if (nmp->nm_flag & NFSMNT_NOCONN) { 251 if (nmp->nm_soflags & PR_CONNREQUIRED) { 252 error = ENOTCONN; 253 goto bad; 254 } 255 } else { 256 error = soconnect(so, nmp->nm_nam, td); 257 if (error) 258 goto bad; 259 260 /* 261 * Wait for the connection to complete. Cribbed from the 262 * connect system call but with the wait timing out so 263 * that interruptible mounts don't hang here for a long time. 264 */ 265 crit_enter(); 266 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 267 (void) tsleep((caddr_t)&so->so_timeo, 0, 268 "nfscon", 2 * hz); 269 if ((so->so_state & SS_ISCONNECTING) && 270 so->so_error == 0 && rep && 271 (error = nfs_sigintr(nmp, rep, rep->r_td)) != 0){ 272 so->so_state &= ~SS_ISCONNECTING; 273 crit_exit(); 274 goto bad; 275 } 276 } 277 if (so->so_error) { 278 error = so->so_error; 279 so->so_error = 0; 280 crit_exit(); 281 goto bad; 282 } 283 crit_exit(); 284 } 285 so->so_rcv.ssb_timeo = (5 * hz); 286 so->so_snd.ssb_timeo = (5 * hz); 287 288 /* 289 * Get buffer reservation size from sysctl, but impose reasonable 290 * limits. 291 */ 292 if (nmp->nm_sotype == SOCK_STREAM) { 293 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 294 struct sockopt sopt; 295 int val; 296 297 bzero(&sopt, sizeof sopt); 298 sopt.sopt_level = SOL_SOCKET; 299 sopt.sopt_name = SO_KEEPALIVE; 300 sopt.sopt_val = &val; 301 sopt.sopt_valsize = sizeof val; 302 val = 1; 303 sosetopt(so, &sopt); 304 } 305 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 306 struct sockopt sopt; 307 int val; 308 309 bzero(&sopt, sizeof sopt); 310 sopt.sopt_level = IPPROTO_TCP; 311 sopt.sopt_name = TCP_NODELAY; 312 sopt.sopt_val = &val; 313 sopt.sopt_valsize = sizeof val; 314 val = 1; 315 sosetopt(so, &sopt); 316 } 317 } 318 error = soreserve(so, nfs_soreserve, nfs_soreserve, NULL); 319 if (error) 320 goto bad; 321 so->so_rcv.ssb_flags |= SSB_NOINTR; 322 so->so_snd.ssb_flags |= SSB_NOINTR; 323 324 /* Initialize other non-zero congestion variables */ 325 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = 326 nmp->nm_srtt[3] = (NFS_TIMEO << NFS_RTT_SCALE_BITS); 327 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 328 nmp->nm_sdrtt[3] = 0; 329 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED; 330 nmp->nm_timeouts = 0; 331 return (0); 332 333 bad: 334 nfs_disconnect(nmp); 335 return (error); 336 } 337 338 /* 339 * Reconnect routine: 340 * Called when a connection is broken on a reliable protocol. 341 * - clean up the old socket 342 * - nfs_connect() again 343 * - set R_NEEDSXMIT for all outstanding requests on mount point 344 * If this fails the mount point is DEAD! 345 * nb: Must be called with the nfs_sndlock() set on the mount point. 346 */ 347 static int 348 nfs_reconnect(struct nfsmount *nmp, struct nfsreq *rep) 349 { 350 struct nfsreq *req; 351 int error; 352 353 nfs_disconnect(nmp); 354 while ((error = nfs_connect(nmp, rep)) != 0) { 355 if (error == EINTR || error == ERESTART) 356 return (EINTR); 357 (void) tsleep((caddr_t)&lbolt, 0, "nfscon", 0); 358 } 359 360 /* 361 * Loop through outstanding request list and fix up all requests 362 * on old socket. 363 */ 364 crit_enter(); 365 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 366 KKASSERT(req->r_nmp == nmp); 367 req->r_flags |= R_NEEDSXMIT; 368 } 369 crit_exit(); 370 return (0); 371 } 372 373 /* 374 * NFS disconnect. Clean up and unlink. 375 */ 376 void 377 nfs_disconnect(struct nfsmount *nmp) 378 { 379 struct socket *so; 380 381 if (nmp->nm_so) { 382 so = nmp->nm_so; 383 nmp->nm_so = NULL; 384 soshutdown(so, SHUT_RDWR); 385 soclose(so, FNONBLOCK); 386 } 387 } 388 389 void 390 nfs_safedisconnect(struct nfsmount *nmp) 391 { 392 nfs_rcvlock(nmp, NULL); 393 nfs_disconnect(nmp); 394 nfs_rcvunlock(nmp); 395 } 396 397 /* 398 * This is the nfs send routine. For connection based socket types, it 399 * must be called with an nfs_sndlock() on the socket. 400 * "rep == NULL" indicates that it has been called from a server. 401 * For the client side: 402 * - return EINTR if the RPC is terminated, 0 otherwise 403 * - set R_NEEDSXMIT if the send fails for any reason 404 * - do any cleanup required by recoverable socket errors (?) 405 * For the server side: 406 * - return EINTR or ERESTART if interrupted by a signal 407 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 408 * - do any cleanup required by recoverable socket errors (?) 409 */ 410 int 411 nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top, 412 struct nfsreq *rep) 413 { 414 struct sockaddr *sendnam; 415 int error, soflags, flags; 416 417 if (rep) { 418 if (rep->r_flags & R_SOFTTERM) { 419 m_freem(top); 420 return (EINTR); 421 } 422 if ((so = rep->r_nmp->nm_so) == NULL) { 423 rep->r_flags |= R_NEEDSXMIT; 424 m_freem(top); 425 return (0); 426 } 427 rep->r_flags &= ~R_NEEDSXMIT; 428 soflags = rep->r_nmp->nm_soflags; 429 } else { 430 soflags = so->so_proto->pr_flags; 431 } 432 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 433 sendnam = NULL; 434 else 435 sendnam = nam; 436 if (so->so_type == SOCK_SEQPACKET) 437 flags = MSG_EOR; 438 else 439 flags = 0; 440 441 error = so_pru_sosend(so, sendnam, NULL, top, NULL, flags, 442 curthread /*XXX*/); 443 /* 444 * ENOBUFS for dgram sockets is transient and non fatal. 445 * No need to log, and no need to break a soft mount. 446 */ 447 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) { 448 error = 0; 449 /* 450 * do backoff retransmit on client 451 */ 452 if (rep) { 453 if ((rep->r_nmp->nm_state & NFSSTA_SENDSPACE) == 0) { 454 rep->r_nmp->nm_state |= NFSSTA_SENDSPACE; 455 kprintf("Warning: NFS: Insufficient sendspace " 456 "(%lu),\n" 457 "\t You must increase vfs.nfs.soreserve" 458 "or decrease vfs.nfs.maxasyncbio\n", 459 so->so_snd.ssb_hiwat); 460 } 461 rep->r_flags |= R_NEEDSXMIT; 462 } 463 } 464 465 if (error) { 466 if (rep) { 467 log(LOG_INFO, "nfs send error %d for server %s\n",error, 468 rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname); 469 /* 470 * Deal with errors for the client side. 471 */ 472 if (rep->r_flags & R_SOFTTERM) 473 error = EINTR; 474 else 475 rep->r_flags |= R_NEEDSXMIT; 476 } else { 477 log(LOG_INFO, "nfsd send error %d\n", error); 478 } 479 480 /* 481 * Handle any recoverable (soft) socket errors here. (?) 482 */ 483 if (error != EINTR && error != ERESTART && 484 error != EWOULDBLOCK && error != EPIPE) 485 error = 0; 486 } 487 return (error); 488 } 489 490 /* 491 * Receive a Sun RPC Request/Reply. For SOCK_DGRAM, the work is all 492 * done by soreceive(), but for SOCK_STREAM we must deal with the Record 493 * Mark and consolidate the data into a new mbuf list. 494 * nb: Sometimes TCP passes the data up to soreceive() in long lists of 495 * small mbufs. 496 * For SOCK_STREAM we must be very careful to read an entire record once 497 * we have read any of it, even if the system call has been interrupted. 498 */ 499 static int 500 nfs_receive(struct nfsmount *nmp, struct nfsreq *rep, 501 struct sockaddr **aname, struct mbuf **mp) 502 { 503 struct socket *so; 504 struct sockbuf sio; 505 struct uio auio; 506 struct iovec aio; 507 struct mbuf *m; 508 struct mbuf *control; 509 u_int32_t len; 510 struct sockaddr **getnam; 511 int error, sotype, rcvflg; 512 struct thread *td = curthread; /* XXX */ 513 514 /* 515 * Set up arguments for soreceive() 516 */ 517 *mp = NULL; 518 *aname = NULL; 519 sotype = nmp->nm_sotype; 520 521 /* 522 * For reliable protocols, lock against other senders/receivers 523 * in case a reconnect is necessary. 524 * For SOCK_STREAM, first get the Record Mark to find out how much 525 * more there is to get. 526 * We must lock the socket against other receivers 527 * until we have an entire rpc request/reply. 528 */ 529 if (sotype != SOCK_DGRAM) { 530 error = nfs_sndlock(nmp, rep); 531 if (error) 532 return (error); 533 tryagain: 534 /* 535 * Check for fatal errors and resending request. 536 */ 537 /* 538 * Ugh: If a reconnect attempt just happened, nm_so 539 * would have changed. NULL indicates a failed 540 * attempt that has essentially shut down this 541 * mount point. 542 */ 543 if (rep && (rep->r_mrep || (rep->r_flags & R_SOFTTERM))) { 544 nfs_sndunlock(nmp); 545 return (EINTR); 546 } 547 so = nmp->nm_so; 548 if (so == NULL) { 549 error = nfs_reconnect(nmp, rep); 550 if (error) { 551 nfs_sndunlock(nmp); 552 return (error); 553 } 554 goto tryagain; 555 } 556 while (rep && (rep->r_flags & R_NEEDSXMIT)) { 557 m = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT); 558 nfsstats.rpcretries++; 559 error = nfs_send(so, rep->r_nmp->nm_nam, m, rep); 560 if (error) { 561 if (error == EINTR || error == ERESTART || 562 (error = nfs_reconnect(nmp, rep)) != 0) { 563 nfs_sndunlock(nmp); 564 return (error); 565 } 566 goto tryagain; 567 } 568 } 569 nfs_sndunlock(nmp); 570 if (sotype == SOCK_STREAM) { 571 /* 572 * Get the length marker from the stream 573 */ 574 aio.iov_base = (caddr_t)&len; 575 aio.iov_len = sizeof(u_int32_t); 576 auio.uio_iov = &aio; 577 auio.uio_iovcnt = 1; 578 auio.uio_segflg = UIO_SYSSPACE; 579 auio.uio_rw = UIO_READ; 580 auio.uio_offset = 0; 581 auio.uio_resid = sizeof(u_int32_t); 582 auio.uio_td = td; 583 do { 584 rcvflg = MSG_WAITALL; 585 error = so_pru_soreceive(so, NULL, &auio, NULL, 586 NULL, &rcvflg); 587 if (error == EWOULDBLOCK && rep) { 588 if (rep->r_flags & R_SOFTTERM) 589 return (EINTR); 590 } 591 } while (error == EWOULDBLOCK); 592 593 if (error == 0 && auio.uio_resid > 0) { 594 /* 595 * Only log short packets if not EOF 596 */ 597 if (auio.uio_resid != sizeof(u_int32_t)) 598 log(LOG_INFO, 599 "short receive (%d/%d) from nfs server %s\n", 600 (int)(sizeof(u_int32_t) - auio.uio_resid), 601 (int)sizeof(u_int32_t), 602 nmp->nm_mountp->mnt_stat.f_mntfromname); 603 error = EPIPE; 604 } 605 if (error) 606 goto errout; 607 len = ntohl(len) & ~0x80000000; 608 /* 609 * This is SERIOUS! We are out of sync with the sender 610 * and forcing a disconnect/reconnect is all I can do. 611 */ 612 if (len > NFS_MAXPACKET) { 613 log(LOG_ERR, "%s (%d) from nfs server %s\n", 614 "impossible packet length", 615 len, 616 nmp->nm_mountp->mnt_stat.f_mntfromname); 617 error = EFBIG; 618 goto errout; 619 } 620 621 /* 622 * Get the rest of the packet as an mbuf chain 623 */ 624 sbinit(&sio, len); 625 do { 626 rcvflg = MSG_WAITALL; 627 error = so_pru_soreceive(so, NULL, NULL, &sio, 628 NULL, &rcvflg); 629 } while (error == EWOULDBLOCK || error == EINTR || 630 error == ERESTART); 631 if (error == 0 && sio.sb_cc != len) { 632 if (sio.sb_cc != 0) 633 log(LOG_INFO, 634 "short receive (%d/%d) from nfs server %s\n", 635 len - auio.uio_resid, len, 636 nmp->nm_mountp->mnt_stat.f_mntfromname); 637 error = EPIPE; 638 } 639 *mp = sio.sb_mb; 640 } else { 641 /* 642 * Non-stream, so get the whole packet by not 643 * specifying MSG_WAITALL and by specifying a large 644 * length. 645 * 646 * We have no use for control msg., but must grab them 647 * and then throw them away so we know what is going 648 * on. 649 */ 650 sbinit(&sio, 100000000); 651 do { 652 rcvflg = 0; 653 error = so_pru_soreceive(so, NULL, NULL, &sio, 654 &control, &rcvflg); 655 if (control) 656 m_freem(control); 657 if (error == EWOULDBLOCK && rep) { 658 if (rep->r_flags & R_SOFTTERM) { 659 m_freem(sio.sb_mb); 660 return (EINTR); 661 } 662 } 663 } while (error == EWOULDBLOCK || 664 (error == 0 && sio.sb_mb == NULL && control)); 665 if ((rcvflg & MSG_EOR) == 0) 666 kprintf("Egad!!\n"); 667 if (error == 0 && sio.sb_mb == NULL) 668 error = EPIPE; 669 len = sio.sb_cc; 670 *mp = sio.sb_mb; 671 } 672 errout: 673 if (error && error != EINTR && error != ERESTART) { 674 m_freem(*mp); 675 *mp = NULL; 676 if (error != EPIPE) { 677 log(LOG_INFO, 678 "receive error %d from nfs server %s\n", 679 error, 680 nmp->nm_mountp->mnt_stat.f_mntfromname); 681 } 682 error = nfs_sndlock(nmp, rep); 683 if (!error) { 684 error = nfs_reconnect(nmp, rep); 685 if (!error) 686 goto tryagain; 687 else 688 nfs_sndunlock(nmp); 689 } 690 } 691 } else { 692 if ((so = nmp->nm_so) == NULL) 693 return (EACCES); 694 if (so->so_state & SS_ISCONNECTED) 695 getnam = NULL; 696 else 697 getnam = aname; 698 sbinit(&sio, 100000000); 699 do { 700 rcvflg = 0; 701 error = so_pru_soreceive(so, getnam, NULL, &sio, 702 NULL, &rcvflg); 703 if (error == EWOULDBLOCK && rep && 704 (rep->r_flags & R_SOFTTERM)) { 705 m_freem(sio.sb_mb); 706 return (EINTR); 707 } 708 } while (error == EWOULDBLOCK); 709 710 len = sio.sb_cc; 711 *mp = sio.sb_mb; 712 713 /* 714 * A shutdown may result in no error and no mbuf. 715 * Convert to EPIPE. 716 */ 717 if (*mp == NULL && error == 0) 718 error = EPIPE; 719 } 720 if (error) { 721 m_freem(*mp); 722 *mp = NULL; 723 } 724 725 /* 726 * Search for any mbufs that are not a multiple of 4 bytes long 727 * or with m_data not longword aligned. 728 * These could cause pointer alignment problems, so copy them to 729 * well aligned mbufs. 730 */ 731 nfs_realign(mp, 5 * NFSX_UNSIGNED); 732 return (error); 733 } 734 735 /* 736 * Implement receipt of reply on a socket. 737 * 738 * We must search through the list of received datagrams matching them 739 * with outstanding requests using the xid, until ours is found. 740 * 741 * If myrep is NULL we process packets on the socket until 742 * interrupted or until nm_reqrxq is non-empty. 743 */ 744 /* ARGSUSED */ 745 int 746 nfs_reply(struct nfsmount *nmp, struct nfsreq *myrep) 747 { 748 struct nfsreq *rep; 749 struct sockaddr *nam; 750 u_int32_t rxid; 751 u_int32_t *tl; 752 int error; 753 struct nfsm_info info; 754 755 /* 756 * Loop around until we get our own reply 757 */ 758 for (;;) { 759 /* 760 * Lock against other receivers so that I don't get stuck in 761 * sbwait() after someone else has received my reply for me. 762 * Also necessary for connection based protocols to avoid 763 * race conditions during a reconnect. 764 * 765 * If nfs_rcvlock() returns EALREADY, that means that 766 * the reply has already been recieved by another 767 * process and we can return immediately. In this 768 * case, the lock is not taken to avoid races with 769 * other processes. 770 */ 771 info.mrep = NULL; 772 773 error = nfs_rcvlock(nmp, myrep); 774 if (error == EALREADY) 775 return (0); 776 if (error) 777 return (error); 778 779 /* 780 * If myrep is NULL we are the receiver helper thread. 781 * Stop waiting for incoming replies if there are 782 * messages sitting on reqrxq that we need to process, 783 * or if a shutdown request is pending. 784 */ 785 if (myrep == NULL && (TAILQ_FIRST(&nmp->nm_reqrxq) || 786 nmp->nm_rxstate > NFSSVC_PENDING)) { 787 nfs_rcvunlock(nmp); 788 return(EWOULDBLOCK); 789 } 790 791 /* 792 * Get the next Rpc reply off the socket 793 * 794 * We cannot release the receive lock until we've 795 * filled in rep->r_mrep, otherwise a waiting 796 * thread may deadlock in soreceive with no incoming 797 * packets expected. 798 */ 799 error = nfs_receive(nmp, myrep, &nam, &info.mrep); 800 if (error) { 801 /* 802 * Ignore routing errors on connectionless protocols?? 803 */ 804 nfs_rcvunlock(nmp); 805 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 806 if (nmp->nm_so == NULL) 807 return (error); 808 nmp->nm_so->so_error = 0; 809 continue; 810 } 811 return (error); 812 } 813 if (nam) 814 FREE(nam, M_SONAME); 815 816 /* 817 * Get the xid and check that it is an rpc reply 818 */ 819 info.md = info.mrep; 820 info.dpos = mtod(info.md, caddr_t); 821 NULLOUT(tl = nfsm_dissect(&info, 2*NFSX_UNSIGNED)); 822 rxid = *tl++; 823 if (*tl != rpc_reply) { 824 nfsstats.rpcinvalid++; 825 m_freem(info.mrep); 826 info.mrep = NULL; 827 nfsmout: 828 nfs_rcvunlock(nmp); 829 continue; 830 } 831 832 /* 833 * Loop through the request list to match up the reply 834 * Iff no match, just drop the datagram. On match, set 835 * r_mrep atomically to prevent the timer from messing 836 * around with the request after we have exited the critical 837 * section. 838 */ 839 crit_enter(); 840 TAILQ_FOREACH(rep, &nmp->nm_reqq, r_chain) { 841 if (rep->r_mrep == NULL && rxid == rep->r_xid) 842 break; 843 } 844 845 /* 846 * Fill in the rest of the reply if we found a match. 847 * 848 * Deal with duplicate responses if there was no match. 849 */ 850 if (rep) { 851 rep->r_md = info.md; 852 rep->r_dpos = info.dpos; 853 if (nfsrtton) { 854 struct rttl *rt; 855 856 rt = &nfsrtt.rttl[nfsrtt.pos]; 857 rt->proc = rep->r_procnum; 858 rt->rto = 0; 859 rt->sent = 0; 860 rt->cwnd = nmp->nm_maxasync_scaled; 861 rt->srtt = nmp->nm_srtt[proct[rep->r_procnum] - 1]; 862 rt->sdrtt = nmp->nm_sdrtt[proct[rep->r_procnum] - 1]; 863 rt->fsid = nmp->nm_mountp->mnt_stat.f_fsid; 864 getmicrotime(&rt->tstamp); 865 if (rep->r_flags & R_TIMING) 866 rt->rtt = rep->r_rtt; 867 else 868 rt->rtt = 1000000; 869 nfsrtt.pos = (nfsrtt.pos + 1) % NFSRTTLOGSIZ; 870 } 871 872 /* 873 * New congestion control is based only on async 874 * requests. 875 */ 876 if (nmp->nm_maxasync_scaled < NFS_MAXASYNC_SCALED) 877 ++nmp->nm_maxasync_scaled; 878 if (rep->r_flags & R_SENT) { 879 rep->r_flags &= ~R_SENT; 880 } 881 /* 882 * Update rtt using a gain of 0.125 on the mean 883 * and a gain of 0.25 on the deviation. 884 * 885 * NOTE SRTT/SDRTT are only good if R_TIMING is set. 886 */ 887 if ((rep->r_flags & R_TIMING) && rep->r_rexmit == 0) { 888 /* 889 * Since the timer resolution of 890 * NFS_HZ is so course, it can often 891 * result in r_rtt == 0. Since 892 * r_rtt == N means that the actual 893 * rtt is between N+dt and N+2-dt ticks, 894 * add 1. 895 */ 896 int n; 897 int d; 898 899 #define NFSRSB NFS_RTT_SCALE_BITS 900 n = ((NFS_SRTT(rep) * 7) + 901 (rep->r_rtt << NFSRSB)) >> 3; 902 d = n - NFS_SRTT(rep); 903 NFS_SRTT(rep) = n; 904 905 /* 906 * Don't let the jitter calculation decay 907 * too quickly, but we want a fast rampup. 908 */ 909 if (d < 0) 910 d = -d; 911 d <<= NFSRSB; 912 if (d < NFS_SDRTT(rep)) 913 n = ((NFS_SDRTT(rep) * 15) + d) >> 4; 914 else 915 n = ((NFS_SDRTT(rep) * 3) + d) >> 2; 916 NFS_SDRTT(rep) = n; 917 #undef NFSRSB 918 } 919 nmp->nm_timeouts = 0; 920 rep->r_mrep = info.mrep; 921 nfs_hardterm(rep, 0); 922 } else { 923 /* 924 * Extract vers, prog, nfsver, procnum. A duplicate 925 * response means we didn't wait long enough so 926 * we increase the SRTT to avoid future spurious 927 * timeouts. 928 */ 929 u_int procnum = nmp->nm_lastreprocnum; 930 int n; 931 932 if (procnum < NFS_NPROCS && proct[procnum]) { 933 if (nfs_showrexmit) 934 kprintf("D"); 935 n = nmp->nm_srtt[proct[procnum]]; 936 n += NFS_ASYSCALE * NFS_HZ; 937 if (n < NFS_ASYSCALE * NFS_HZ * 10) 938 n = NFS_ASYSCALE * NFS_HZ * 10; 939 nmp->nm_srtt[proct[procnum]] = n; 940 } 941 } 942 nfs_rcvunlock(nmp); 943 crit_exit(); 944 945 /* 946 * If not matched to a request, drop it. 947 * If it's mine, get out. 948 */ 949 if (rep == NULL) { 950 nfsstats.rpcunexpected++; 951 m_freem(info.mrep); 952 info.mrep = NULL; 953 } else if (rep == myrep) { 954 if (rep->r_mrep == NULL) 955 panic("nfsreply nil"); 956 return (0); 957 } 958 } 959 } 960 961 /* 962 * Run the request state machine until the target state is reached 963 * or a fatal error occurs. The target state is not run. Specifying 964 * a target of NFSM_STATE_DONE runs the state machine until the rpc 965 * is complete. 966 * 967 * EINPROGRESS is returned for all states other then the DONE state, 968 * indicating that the rpc is still in progress. 969 */ 970 int 971 nfs_request(struct nfsm_info *info, nfsm_state_t bstate, nfsm_state_t estate) 972 { 973 struct nfsreq *req; 974 975 while (info->state >= bstate && info->state < estate) { 976 switch(info->state) { 977 case NFSM_STATE_SETUP: 978 /* 979 * Setup the nfsreq. Any error which occurs during 980 * this state is fatal. 981 */ 982 info->error = nfs_request_setup(info); 983 if (info->error) { 984 info->state = NFSM_STATE_DONE; 985 return (info->error); 986 } else { 987 req = info->req; 988 req->r_mrp = &info->mrep; 989 req->r_mdp = &info->md; 990 req->r_dposp = &info->dpos; 991 info->state = NFSM_STATE_AUTH; 992 } 993 break; 994 case NFSM_STATE_AUTH: 995 /* 996 * Authenticate the nfsreq. Any error which occurs 997 * during this state is fatal. 998 */ 999 info->error = nfs_request_auth(info->req); 1000 if (info->error) { 1001 info->state = NFSM_STATE_DONE; 1002 return (info->error); 1003 } else { 1004 info->state = NFSM_STATE_TRY; 1005 } 1006 break; 1007 case NFSM_STATE_TRY: 1008 /* 1009 * Transmit or retransmit attempt. An error in this 1010 * state is ignored and we always move on to the 1011 * next state. 1012 * 1013 * This can trivially race the receiver if the 1014 * request is asynchronous. nfs_request_try() 1015 * will thus set the state for us and we 1016 * must also return immediately if we are 1017 * running an async state machine, because 1018 * info can become invalid due to races after 1019 * try() returns. 1020 */ 1021 if (info->req->r_flags & R_ASYNC) { 1022 nfs_request_try(info->req); 1023 if (estate == NFSM_STATE_WAITREPLY) 1024 return (EINPROGRESS); 1025 } else { 1026 nfs_request_try(info->req); 1027 info->state = NFSM_STATE_WAITREPLY; 1028 } 1029 break; 1030 case NFSM_STATE_WAITREPLY: 1031 /* 1032 * Wait for a reply or timeout and move on to the 1033 * next state. The error returned by this state 1034 * is passed to the processing code in the next 1035 * state. 1036 */ 1037 info->error = nfs_request_waitreply(info->req); 1038 info->state = NFSM_STATE_PROCESSREPLY; 1039 break; 1040 case NFSM_STATE_PROCESSREPLY: 1041 /* 1042 * Process the reply or timeout. Errors which occur 1043 * in this state may cause the state machine to 1044 * go back to an earlier state, and are fatal 1045 * otherwise. 1046 */ 1047 info->error = nfs_request_processreply(info, 1048 info->error); 1049 switch(info->error) { 1050 case ENEEDAUTH: 1051 info->state = NFSM_STATE_AUTH; 1052 break; 1053 case EAGAIN: 1054 info->state = NFSM_STATE_TRY; 1055 break; 1056 default: 1057 /* 1058 * Operation complete, with or without an 1059 * error. We are done. 1060 */ 1061 info->req = NULL; 1062 info->state = NFSM_STATE_DONE; 1063 return (info->error); 1064 } 1065 break; 1066 case NFSM_STATE_DONE: 1067 /* 1068 * Shouldn't be reached 1069 */ 1070 return (info->error); 1071 /* NOT REACHED */ 1072 } 1073 } 1074 1075 /* 1076 * If we are done return the error code (if any). 1077 * Otherwise return EINPROGRESS. 1078 */ 1079 if (info->state == NFSM_STATE_DONE) 1080 return (info->error); 1081 return (EINPROGRESS); 1082 } 1083 1084 /* 1085 * nfs_request - goes something like this 1086 * - fill in request struct 1087 * - links it into list 1088 * - calls nfs_send() for first transmit 1089 * - calls nfs_receive() to get reply 1090 * - break down rpc header and return with nfs reply pointed to 1091 * by mrep or error 1092 * nb: always frees up mreq mbuf list 1093 */ 1094 static int 1095 nfs_request_setup(nfsm_info_t info) 1096 { 1097 struct nfsreq *req; 1098 struct nfsmount *nmp; 1099 struct mbuf *m; 1100 int i; 1101 1102 /* 1103 * Reject requests while attempting a forced unmount. 1104 */ 1105 if (info->vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) { 1106 m_freem(info->mreq); 1107 info->mreq = NULL; 1108 return (ESTALE); 1109 } 1110 nmp = VFSTONFS(info->vp->v_mount); 1111 req = kmalloc(sizeof(struct nfsreq), M_NFSREQ, M_WAITOK); 1112 req->r_nmp = nmp; 1113 req->r_vp = info->vp; 1114 req->r_td = info->td; 1115 req->r_procnum = info->procnum; 1116 req->r_mreq = NULL; 1117 req->r_cred = info->cred; 1118 1119 i = 0; 1120 m = info->mreq; 1121 while (m) { 1122 i += m->m_len; 1123 m = m->m_next; 1124 } 1125 req->r_mrest = info->mreq; 1126 req->r_mrest_len = i; 1127 1128 /* 1129 * The presence of a non-NULL r_info in req indicates 1130 * async completion via our helper threads. See the receiver 1131 * code. 1132 */ 1133 if (info->bio) { 1134 req->r_info = info; 1135 req->r_flags = R_ASYNC; 1136 } else { 1137 req->r_info = NULL; 1138 req->r_flags = 0; 1139 } 1140 info->req = req; 1141 return(0); 1142 } 1143 1144 static int 1145 nfs_request_auth(struct nfsreq *rep) 1146 { 1147 struct nfsmount *nmp = rep->r_nmp; 1148 struct mbuf *m; 1149 char nickv[RPCX_NICKVERF]; 1150 int error = 0, auth_len, auth_type; 1151 int verf_len; 1152 u_int32_t xid; 1153 char *auth_str, *verf_str; 1154 struct ucred *cred; 1155 1156 cred = rep->r_cred; 1157 rep->r_failed_auth = 0; 1158 1159 /* 1160 * Get the RPC header with authorization. 1161 */ 1162 verf_str = auth_str = NULL; 1163 if (nmp->nm_flag & NFSMNT_KERB) { 1164 verf_str = nickv; 1165 verf_len = sizeof (nickv); 1166 auth_type = RPCAUTH_KERB4; 1167 bzero((caddr_t)rep->r_key, sizeof(rep->r_key)); 1168 if (rep->r_failed_auth || 1169 nfs_getnickauth(nmp, cred, &auth_str, &auth_len, 1170 verf_str, verf_len)) { 1171 error = nfs_getauth(nmp, rep, cred, &auth_str, 1172 &auth_len, verf_str, &verf_len, rep->r_key); 1173 if (error) { 1174 m_freem(rep->r_mrest); 1175 rep->r_mrest = NULL; 1176 kfree((caddr_t)rep, M_NFSREQ); 1177 return (error); 1178 } 1179 } 1180 } else { 1181 auth_type = RPCAUTH_UNIX; 1182 if (cred->cr_ngroups < 1) 1183 panic("nfsreq nogrps"); 1184 auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ? 1185 nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) + 1186 5 * NFSX_UNSIGNED; 1187 } 1188 m = nfsm_rpchead(cred, nmp->nm_flag, rep->r_procnum, auth_type, 1189 auth_len, auth_str, verf_len, verf_str, 1190 rep->r_mrest, rep->r_mrest_len, &rep->r_mheadend, &xid); 1191 rep->r_mrest = NULL; 1192 if (auth_str) 1193 kfree(auth_str, M_TEMP); 1194 1195 /* 1196 * For stream protocols, insert a Sun RPC Record Mark. 1197 */ 1198 if (nmp->nm_sotype == SOCK_STREAM) { 1199 M_PREPEND(m, NFSX_UNSIGNED, MB_WAIT); 1200 if (m == NULL) { 1201 kfree(rep, M_NFSREQ); 1202 return (ENOBUFS); 1203 } 1204 *mtod(m, u_int32_t *) = htonl(0x80000000 | 1205 (m->m_pkthdr.len - NFSX_UNSIGNED)); 1206 } 1207 rep->r_mreq = m; 1208 rep->r_xid = xid; 1209 return (0); 1210 } 1211 1212 static int 1213 nfs_request_try(struct nfsreq *rep) 1214 { 1215 struct nfsmount *nmp = rep->r_nmp; 1216 struct mbuf *m2; 1217 int error; 1218 1219 /* 1220 * Request is not on any queue, only the owner has access to it 1221 * so it should not be locked by anyone atm. 1222 * 1223 * Interlock to prevent races. While locked the only remote 1224 * action possible is for r_mrep to be set (once we enqueue it). 1225 */ 1226 if (rep->r_flags == 0xdeadc0de) { 1227 print_backtrace(); 1228 panic("flags nbad\n"); 1229 } 1230 KKASSERT((rep->r_flags & (R_LOCKED | R_ONREQQ)) == 0); 1231 if (nmp->nm_flag & NFSMNT_SOFT) 1232 rep->r_retry = nmp->nm_retry; 1233 else 1234 rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ 1235 rep->r_rtt = rep->r_rexmit = 0; 1236 if (proct[rep->r_procnum] > 0) 1237 rep->r_flags |= R_TIMING | R_LOCKED; 1238 else 1239 rep->r_flags |= R_LOCKED; 1240 rep->r_mrep = NULL; 1241 1242 /* 1243 * Do the client side RPC. 1244 */ 1245 nfsstats.rpcrequests++; 1246 1247 /* 1248 * Chain request into list of outstanding requests. Be sure 1249 * to put it LAST so timer finds oldest requests first. Note 1250 * that our control of R_LOCKED prevents the request from 1251 * getting ripped out from under us or transmitted by the 1252 * timer code. 1253 * 1254 * For requests with info structures we must atomically set the 1255 * info's state because the structure could become invalid upon 1256 * return due to races (i.e., if async) 1257 */ 1258 crit_enter(); 1259 mtx_link_init(&rep->r_link); 1260 TAILQ_INSERT_TAIL(&nmp->nm_reqq, rep, r_chain); 1261 rep->r_flags |= R_ONREQQ; 1262 ++nmp->nm_reqqlen; 1263 if (rep->r_flags & R_ASYNC) 1264 rep->r_info->state = NFSM_STATE_WAITREPLY; 1265 crit_exit(); 1266 1267 error = 0; 1268 1269 /* 1270 * Send if we can. Congestion control is not handled here any more 1271 * becausing trying to defer the initial send based on the nfs_timer 1272 * requires having a very fast nfs_timer, which is silly. 1273 */ 1274 if (nmp->nm_so) { 1275 if (nmp->nm_soflags & PR_CONNREQUIRED) 1276 error = nfs_sndlock(nmp, rep); 1277 if (error == 0) { 1278 m2 = m_copym(rep->r_mreq, 0, M_COPYALL, MB_WAIT); 1279 error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep); 1280 if (nmp->nm_soflags & PR_CONNREQUIRED) 1281 nfs_sndunlock(nmp); 1282 rep->r_flags &= ~R_NEEDSXMIT; 1283 if ((rep->r_flags & R_SENT) == 0) { 1284 rep->r_flags |= R_SENT; 1285 } 1286 } else { 1287 rep->r_flags |= R_NEEDSXMIT; 1288 } 1289 } else { 1290 rep->r_flags |= R_NEEDSXMIT; 1291 rep->r_rtt = -1; 1292 } 1293 if (error == EPIPE) 1294 error = 0; 1295 1296 /* 1297 * Release the lock. The only remote action that may have occurred 1298 * would have been the setting of rep->r_mrep. If this occured 1299 * and the request was async we have to move it to the reader 1300 * thread's queue for action. 1301 * 1302 * For async requests also make sure the reader is woken up so 1303 * it gets on the socket to read responses. 1304 */ 1305 crit_enter(); 1306 if (rep->r_flags & R_ASYNC) { 1307 if (rep->r_mrep) 1308 nfs_hardterm(rep, 1); 1309 rep->r_flags &= ~R_LOCKED; 1310 nfssvc_iod_reader_wakeup(nmp); 1311 } else { 1312 rep->r_flags &= ~R_LOCKED; 1313 } 1314 if (rep->r_flags & R_WANTED) { 1315 rep->r_flags &= ~R_WANTED; 1316 wakeup(rep); 1317 } 1318 crit_exit(); 1319 return (error); 1320 } 1321 1322 /* 1323 * This code is only called for synchronous requests. Completed synchronous 1324 * requests are left on reqq and we remove them before moving on to the 1325 * processing state. 1326 */ 1327 static int 1328 nfs_request_waitreply(struct nfsreq *rep) 1329 { 1330 struct nfsmount *nmp = rep->r_nmp; 1331 int error; 1332 1333 KKASSERT((rep->r_flags & R_ASYNC) == 0); 1334 1335 /* 1336 * Wait until the request is finished. 1337 */ 1338 error = nfs_reply(nmp, rep); 1339 1340 /* 1341 * RPC done, unlink the request, but don't rip it out from under 1342 * the callout timer. 1343 * 1344 * Once unlinked no other receiver or the timer will have 1345 * visibility, so we do not have to set R_LOCKED. 1346 */ 1347 crit_enter(); 1348 while (rep->r_flags & R_LOCKED) { 1349 rep->r_flags |= R_WANTED; 1350 tsleep(rep, 0, "nfstrac", 0); 1351 } 1352 KKASSERT(rep->r_flags & R_ONREQQ); 1353 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain); 1354 rep->r_flags &= ~R_ONREQQ; 1355 --nmp->nm_reqqlen; 1356 if (TAILQ_FIRST(&nmp->nm_bioq) && 1357 nmp->nm_reqqlen == NFS_MAXASYNCBIO * 2 / 3) { 1358 nfssvc_iod_writer_wakeup(nmp); 1359 } 1360 crit_exit(); 1361 1362 /* 1363 * Decrement the outstanding request count. 1364 */ 1365 if (rep->r_flags & R_SENT) { 1366 rep->r_flags &= ~R_SENT; 1367 } 1368 return (error); 1369 } 1370 1371 /* 1372 * Process reply with error returned from nfs_requet_waitreply(). 1373 * 1374 * Returns EAGAIN if it wants us to loop up to nfs_request_try() again. 1375 * Returns ENEEDAUTH if it wants us to loop up to nfs_request_auth() again. 1376 */ 1377 static int 1378 nfs_request_processreply(nfsm_info_t info, int error) 1379 { 1380 struct nfsreq *req = info->req; 1381 struct nfsmount *nmp = req->r_nmp; 1382 u_int32_t *tl; 1383 int verf_type; 1384 int i; 1385 1386 /* 1387 * If there was a successful reply and a tprintf msg. 1388 * tprintf a response. 1389 */ 1390 if (error == 0 && (req->r_flags & R_TPRINTFMSG)) { 1391 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname, 1392 "is alive again"); 1393 } 1394 info->mrep = req->r_mrep; 1395 info->md = req->r_md; 1396 info->dpos = req->r_dpos; 1397 if (error) { 1398 m_freem(req->r_mreq); 1399 req->r_mreq = NULL; 1400 kfree(req, M_NFSREQ); 1401 info->req = NULL; 1402 return (error); 1403 } 1404 1405 /* 1406 * break down the rpc header and check if ok 1407 */ 1408 NULLOUT(tl = nfsm_dissect(info, 3 * NFSX_UNSIGNED)); 1409 if (*tl++ == rpc_msgdenied) { 1410 if (*tl == rpc_mismatch) { 1411 error = EOPNOTSUPP; 1412 } else if ((nmp->nm_flag & NFSMNT_KERB) && 1413 *tl++ == rpc_autherr) { 1414 if (req->r_failed_auth == 0) { 1415 req->r_failed_auth++; 1416 req->r_mheadend->m_next = NULL; 1417 m_freem(info->mrep); 1418 info->mrep = NULL; 1419 m_freem(req->r_mreq); 1420 return (ENEEDAUTH); 1421 } else { 1422 error = EAUTH; 1423 } 1424 } else { 1425 error = EACCES; 1426 } 1427 m_freem(info->mrep); 1428 info->mrep = NULL; 1429 m_freem(req->r_mreq); 1430 req->r_mreq = NULL; 1431 kfree(req, M_NFSREQ); 1432 info->req = NULL; 1433 return (error); 1434 } 1435 1436 /* 1437 * Grab any Kerberos verifier, otherwise just throw it away. 1438 */ 1439 verf_type = fxdr_unsigned(int, *tl++); 1440 i = fxdr_unsigned(int32_t, *tl); 1441 if ((nmp->nm_flag & NFSMNT_KERB) && verf_type == RPCAUTH_KERB4) { 1442 error = nfs_savenickauth(nmp, req->r_cred, i, req->r_key, 1443 &info->md, &info->dpos, info->mrep); 1444 if (error) 1445 goto nfsmout; 1446 } else if (i > 0) { 1447 ERROROUT(nfsm_adv(info, nfsm_rndup(i))); 1448 } 1449 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED)); 1450 /* 0 == ok */ 1451 if (*tl == 0) { 1452 NULLOUT(tl = nfsm_dissect(info, NFSX_UNSIGNED)); 1453 if (*tl != 0) { 1454 error = fxdr_unsigned(int, *tl); 1455 1456 /* 1457 * Does anyone even implement this? Just impose 1458 * a 1-second delay. 1459 */ 1460 if ((nmp->nm_flag & NFSMNT_NFSV3) && 1461 error == NFSERR_TRYLATER) { 1462 m_freem(info->mrep); 1463 info->mrep = NULL; 1464 error = 0; 1465 1466 tsleep((caddr_t)&lbolt, 0, "nqnfstry", 0); 1467 return (EAGAIN); /* goto tryagain */ 1468 } 1469 1470 /* 1471 * If the File Handle was stale, invalidate the 1472 * lookup cache, just in case. 1473 * 1474 * To avoid namecache<->vnode deadlocks we must 1475 * release the vnode lock if we hold it. 1476 */ 1477 if (error == ESTALE) { 1478 struct vnode *vp = req->r_vp; 1479 int ltype; 1480 1481 ltype = lockstatus(&vp->v_lock, curthread); 1482 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED) 1483 lockmgr(&vp->v_lock, LK_RELEASE); 1484 cache_inval_vp(vp, CINV_CHILDREN); 1485 if (ltype == LK_EXCLUSIVE || ltype == LK_SHARED) 1486 lockmgr(&vp->v_lock, ltype); 1487 } 1488 if (nmp->nm_flag & NFSMNT_NFSV3) { 1489 KKASSERT(*req->r_mrp == info->mrep); 1490 KKASSERT(*req->r_mdp == info->md); 1491 KKASSERT(*req->r_dposp == info->dpos); 1492 error |= NFSERR_RETERR; 1493 } else { 1494 m_freem(info->mrep); 1495 info->mrep = NULL; 1496 } 1497 m_freem(req->r_mreq); 1498 req->r_mreq = NULL; 1499 kfree(req, M_NFSREQ); 1500 info->req = NULL; 1501 return (error); 1502 } 1503 1504 KKASSERT(*req->r_mrp == info->mrep); 1505 KKASSERT(*req->r_mdp == info->md); 1506 KKASSERT(*req->r_dposp == info->dpos); 1507 m_freem(req->r_mreq); 1508 req->r_mreq = NULL; 1509 FREE(req, M_NFSREQ); 1510 return (0); 1511 } 1512 m_freem(info->mrep); 1513 info->mrep = NULL; 1514 error = EPROTONOSUPPORT; 1515 nfsmout: 1516 m_freem(req->r_mreq); 1517 req->r_mreq = NULL; 1518 kfree(req, M_NFSREQ); 1519 info->req = NULL; 1520 return (error); 1521 } 1522 1523 #ifndef NFS_NOSERVER 1524 /* 1525 * Generate the rpc reply header 1526 * siz arg. is used to decide if adding a cluster is worthwhile 1527 */ 1528 int 1529 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp, 1530 int err, struct mbuf **mrq, struct mbuf **mbp, caddr_t *bposp) 1531 { 1532 u_int32_t *tl; 1533 struct nfsm_info info; 1534 1535 siz += RPC_REPLYSIZ; 1536 info.mb = m_getl(max_hdr + siz, MB_WAIT, MT_DATA, M_PKTHDR, NULL); 1537 info.mreq = info.mb; 1538 info.mreq->m_pkthdr.len = 0; 1539 /* 1540 * If this is not a cluster, try and leave leading space 1541 * for the lower level headers. 1542 */ 1543 if ((max_hdr + siz) < MINCLSIZE) 1544 info.mreq->m_data += max_hdr; 1545 tl = mtod(info.mreq, u_int32_t *); 1546 info.mreq->m_len = 6 * NFSX_UNSIGNED; 1547 info.bpos = ((caddr_t)tl) + info.mreq->m_len; 1548 *tl++ = txdr_unsigned(nd->nd_retxid); 1549 *tl++ = rpc_reply; 1550 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) { 1551 *tl++ = rpc_msgdenied; 1552 if (err & NFSERR_AUTHERR) { 1553 *tl++ = rpc_autherr; 1554 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR); 1555 info.mreq->m_len -= NFSX_UNSIGNED; 1556 info.bpos -= NFSX_UNSIGNED; 1557 } else { 1558 *tl++ = rpc_mismatch; 1559 *tl++ = txdr_unsigned(RPC_VER2); 1560 *tl = txdr_unsigned(RPC_VER2); 1561 } 1562 } else { 1563 *tl++ = rpc_msgaccepted; 1564 1565 /* 1566 * For Kerberos authentication, we must send the nickname 1567 * verifier back, otherwise just RPCAUTH_NULL. 1568 */ 1569 if (nd->nd_flag & ND_KERBFULL) { 1570 struct nfsuid *nuidp; 1571 struct timeval ktvin, ktvout; 1572 1573 for (nuidp = NUIDHASH(slp, nd->nd_cr.cr_uid)->lh_first; 1574 nuidp != 0; nuidp = nuidp->nu_hash.le_next) { 1575 if (nuidp->nu_cr.cr_uid == nd->nd_cr.cr_uid && 1576 (!nd->nd_nam2 || netaddr_match(NU_NETFAM(nuidp), 1577 &nuidp->nu_haddr, nd->nd_nam2))) 1578 break; 1579 } 1580 if (nuidp) { 1581 ktvin.tv_sec = 1582 txdr_unsigned(nuidp->nu_timestamp.tv_sec - 1); 1583 ktvin.tv_usec = 1584 txdr_unsigned(nuidp->nu_timestamp.tv_usec); 1585 1586 /* 1587 * Encrypt the timestamp in ecb mode using the 1588 * session key. 1589 */ 1590 #ifdef NFSKERB 1591 XXX 1592 #endif 1593 1594 *tl++ = rpc_auth_kerb; 1595 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED); 1596 *tl = ktvout.tv_sec; 1597 tl = nfsm_build(&info, 3 * NFSX_UNSIGNED); 1598 *tl++ = ktvout.tv_usec; 1599 *tl++ = txdr_unsigned(nuidp->nu_cr.cr_uid); 1600 } else { 1601 *tl++ = 0; 1602 *tl++ = 0; 1603 } 1604 } else { 1605 *tl++ = 0; 1606 *tl++ = 0; 1607 } 1608 switch (err) { 1609 case EPROGUNAVAIL: 1610 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 1611 break; 1612 case EPROGMISMATCH: 1613 *tl = txdr_unsigned(RPC_PROGMISMATCH); 1614 tl = nfsm_build(&info, 2 * NFSX_UNSIGNED); 1615 *tl++ = txdr_unsigned(2); 1616 *tl = txdr_unsigned(3); 1617 break; 1618 case EPROCUNAVAIL: 1619 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 1620 break; 1621 case EBADRPC: 1622 *tl = txdr_unsigned(RPC_GARBAGE); 1623 break; 1624 default: 1625 *tl = 0; 1626 if (err != NFSERR_RETVOID) { 1627 tl = nfsm_build(&info, NFSX_UNSIGNED); 1628 if (err) 1629 *tl = txdr_unsigned(nfsrv_errmap(nd, err)); 1630 else 1631 *tl = 0; 1632 } 1633 break; 1634 }; 1635 } 1636 1637 if (mrq != NULL) 1638 *mrq = info.mreq; 1639 *mbp = info.mb; 1640 *bposp = info.bpos; 1641 if (err != 0 && err != NFSERR_RETVOID) 1642 nfsstats.srvrpc_errs++; 1643 return (0); 1644 } 1645 1646 1647 #endif /* NFS_NOSERVER */ 1648 1649 /* 1650 * Nfs timer routine. 1651 * 1652 * Scan the nfsreq list and retranmit any requests that have timed out 1653 * To avoid retransmission attempts on STREAM sockets (in the future) make 1654 * sure to set the r_retry field to 0 (implies nm_retry == 0). 1655 * 1656 * Requests with attached responses, terminated requests, and 1657 * locked requests are ignored. Locked requests will be picked up 1658 * in a later timer call. 1659 */ 1660 void 1661 nfs_timer(void *arg /* never used */) 1662 { 1663 struct nfsmount *nmp; 1664 struct nfsreq *req; 1665 #ifndef NFS_NOSERVER 1666 struct nfssvc_sock *slp; 1667 u_quad_t cur_usec; 1668 #endif /* NFS_NOSERVER */ 1669 1670 crit_enter(); 1671 TAILQ_FOREACH(nmp, &nfs_mountq, nm_entry) { 1672 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1673 KKASSERT(nmp == req->r_nmp); 1674 if (req->r_mrep) 1675 continue; 1676 if (req->r_flags & (R_SOFTTERM | R_LOCKED)) 1677 continue; 1678 req->r_flags |= R_LOCKED; 1679 if (nfs_sigintr(nmp, req, req->r_td)) { 1680 nfs_softterm(req, 1); 1681 } else { 1682 nfs_timer_req(req); 1683 } 1684 req->r_flags &= ~R_LOCKED; 1685 if (req->r_flags & R_WANTED) { 1686 req->r_flags &= ~R_WANTED; 1687 wakeup(req); 1688 } 1689 } 1690 } 1691 #ifndef NFS_NOSERVER 1692 1693 /* 1694 * Scan the write gathering queues for writes that need to be 1695 * completed now. 1696 */ 1697 cur_usec = nfs_curusec(); 1698 TAILQ_FOREACH(slp, &nfssvc_sockhead, ns_chain) { 1699 if (slp->ns_tq.lh_first && slp->ns_tq.lh_first->nd_time<=cur_usec) 1700 nfsrv_wakenfsd(slp, 1); 1701 } 1702 #endif /* NFS_NOSERVER */ 1703 crit_exit(); 1704 callout_reset(&nfs_timer_handle, nfs_ticks, nfs_timer, NULL); 1705 } 1706 1707 static 1708 void 1709 nfs_timer_req(struct nfsreq *req) 1710 { 1711 struct thread *td = &thread0; /* XXX for creds, will break if sleep */ 1712 struct nfsmount *nmp = req->r_nmp; 1713 struct mbuf *m; 1714 struct socket *so; 1715 int timeo; 1716 int error; 1717 1718 /* 1719 * rtt ticks and timeout calculation. Return if the timeout 1720 * has not been reached yet, unless the packet is flagged 1721 * for an immediate send. 1722 * 1723 * The mean rtt doesn't help when we get random I/Os, we have 1724 * to multiply by fairly large numbers. 1725 */ 1726 if (req->r_rtt >= 0) { 1727 /* 1728 * Calculate the timeout to test against. 1729 */ 1730 req->r_rtt++; 1731 if (nmp->nm_flag & NFSMNT_DUMBTIMR) { 1732 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS; 1733 } else if (req->r_flags & R_TIMING) { 1734 timeo = NFS_SRTT(req) + NFS_SDRTT(req); 1735 } else { 1736 timeo = nmp->nm_timeo << NFS_RTT_SCALE_BITS; 1737 } 1738 timeo *= multt[req->r_procnum]; 1739 /* timeo is still scaled by SCALE_BITS */ 1740 1741 #define NFSFS (NFS_RTT_SCALE * NFS_HZ) 1742 if (req->r_flags & R_TIMING) { 1743 static long last_time; 1744 if (nfs_showrtt && last_time != time_second) { 1745 kprintf("rpccmd %d NFS SRTT %d SDRTT %d " 1746 "timeo %d.%03d\n", 1747 proct[req->r_procnum], 1748 NFS_SRTT(req), NFS_SDRTT(req), 1749 timeo / NFSFS, 1750 timeo % NFSFS * 1000 / NFSFS); 1751 last_time = time_second; 1752 } 1753 } 1754 #undef NFSFS 1755 1756 /* 1757 * deal with nfs_timer jitter. 1758 */ 1759 timeo = (timeo >> NFS_RTT_SCALE_BITS) + 1; 1760 if (timeo < 2) 1761 timeo = 2; 1762 1763 if (nmp->nm_timeouts > 0) 1764 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 1765 if (timeo > NFS_MAXTIMEO) 1766 timeo = NFS_MAXTIMEO; 1767 if (req->r_rtt <= timeo) { 1768 if ((req->r_flags & R_NEEDSXMIT) == 0) 1769 return; 1770 } else if (nmp->nm_timeouts < 8) { 1771 nmp->nm_timeouts++; 1772 } 1773 } 1774 1775 /* 1776 * Check for server not responding 1777 */ 1778 if ((req->r_flags & R_TPRINTFMSG) == 0 && 1779 req->r_rexmit > nmp->nm_deadthresh) { 1780 nfs_msg(req->r_td, nmp->nm_mountp->mnt_stat.f_mntfromname, 1781 "not responding"); 1782 req->r_flags |= R_TPRINTFMSG; 1783 } 1784 if (req->r_rexmit >= req->r_retry) { /* too many */ 1785 nfsstats.rpctimeouts++; 1786 nfs_softterm(req, 1); 1787 return; 1788 } 1789 1790 /* 1791 * Generally disable retransmission on reliable sockets, 1792 * unless the request is flagged for immediate send. 1793 */ 1794 if (nmp->nm_sotype != SOCK_DGRAM) { 1795 if (++req->r_rexmit > NFS_MAXREXMIT) 1796 req->r_rexmit = NFS_MAXREXMIT; 1797 if ((req->r_flags & R_NEEDSXMIT) == 0) 1798 return; 1799 } 1800 1801 /* 1802 * Stop here if we do not have a socket! 1803 */ 1804 if ((so = nmp->nm_so) == NULL) 1805 return; 1806 1807 /* 1808 * If there is enough space and the window allows.. resend it. 1809 * 1810 * r_rtt is left intact in case we get an answer after the 1811 * retry that was a reply to the original packet. 1812 */ 1813 if (ssb_space(&so->so_snd) >= req->r_mreq->m_pkthdr.len && 1814 (req->r_flags & (R_SENT | R_NEEDSXMIT)) && 1815 (m = m_copym(req->r_mreq, 0, M_COPYALL, MB_DONTWAIT))){ 1816 if ((nmp->nm_flag & NFSMNT_NOCONN) == 0) 1817 error = so_pru_send(so, 0, m, NULL, NULL, td); 1818 else 1819 error = so_pru_send(so, 0, m, nmp->nm_nam, 1820 NULL, td); 1821 if (error) { 1822 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) 1823 so->so_error = 0; 1824 req->r_flags |= R_NEEDSXMIT; 1825 } else if (req->r_mrep == NULL) { 1826 /* 1827 * Iff first send, start timing 1828 * else turn timing off, backoff timer 1829 * and divide congestion window by 2. 1830 * 1831 * It is possible for the so_pru_send() to 1832 * block and for us to race a reply so we 1833 * only do this if the reply field has not 1834 * been filled in. R_LOCKED will prevent 1835 * the request from being ripped out from under 1836 * us entirely. 1837 * 1838 * Record the last resent procnum to aid us 1839 * in duplicate detection on receive. 1840 */ 1841 if ((req->r_flags & R_NEEDSXMIT) == 0) { 1842 if (nfs_showrexmit) 1843 kprintf("X"); 1844 if (++req->r_rexmit > NFS_MAXREXMIT) 1845 req->r_rexmit = NFS_MAXREXMIT; 1846 nmp->nm_maxasync_scaled >>= 1; 1847 if (nmp->nm_maxasync_scaled < NFS_MINASYNC_SCALED) 1848 nmp->nm_maxasync_scaled = NFS_MINASYNC_SCALED; 1849 nfsstats.rpcretries++; 1850 nmp->nm_lastreprocnum = req->r_procnum; 1851 } else { 1852 req->r_flags |= R_SENT; 1853 req->r_flags &= ~R_NEEDSXMIT; 1854 } 1855 } 1856 } 1857 } 1858 1859 /* 1860 * Mark all of an nfs mount's outstanding requests with R_SOFTTERM and 1861 * wait for all requests to complete. This is used by forced unmounts 1862 * to terminate any outstanding RPCs. 1863 * 1864 * Locked requests cannot be canceled but will be marked for 1865 * soft-termination. 1866 */ 1867 int 1868 nfs_nmcancelreqs(struct nfsmount *nmp) 1869 { 1870 struct nfsreq *req; 1871 int i; 1872 1873 crit_enter(); 1874 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1875 if (req->r_mrep != NULL || (req->r_flags & R_SOFTTERM)) 1876 continue; 1877 nfs_softterm(req, 0); 1878 } 1879 /* XXX the other two queues as well */ 1880 crit_exit(); 1881 1882 for (i = 0; i < 30; i++) { 1883 crit_enter(); 1884 TAILQ_FOREACH(req, &nmp->nm_reqq, r_chain) { 1885 if (nmp == req->r_nmp) 1886 break; 1887 } 1888 crit_exit(); 1889 if (req == NULL) 1890 return (0); 1891 tsleep(&lbolt, 0, "nfscancel", 0); 1892 } 1893 return (EBUSY); 1894 } 1895 1896 /* 1897 * Soft-terminate a request, effectively marking it as failed. 1898 * 1899 * Must be called from within a critical section. 1900 */ 1901 static void 1902 nfs_softterm(struct nfsreq *rep, int islocked) 1903 { 1904 rep->r_flags |= R_SOFTTERM; 1905 nfs_hardterm(rep, islocked); 1906 } 1907 1908 /* 1909 * Hard-terminate a request, typically after getting a response. 1910 * 1911 * The state machine can still decide to re-issue it later if necessary. 1912 * 1913 * Must be called from within a critical section. 1914 */ 1915 static void 1916 nfs_hardterm(struct nfsreq *rep, int islocked) 1917 { 1918 struct nfsmount *nmp = rep->r_nmp; 1919 1920 /* 1921 * The nm_send count is decremented now to avoid deadlocks 1922 * when the process in soreceive() hasn't yet managed to send 1923 * its own request. 1924 */ 1925 if (rep->r_flags & R_SENT) { 1926 rep->r_flags &= ~R_SENT; 1927 } 1928 1929 /* 1930 * If we locked the request or nobody else has locked the request, 1931 * and the request is async, we can move it to the reader thread's 1932 * queue now and fix up the state. 1933 * 1934 * If we locked the request or nobody else has locked the request, 1935 * we can wake up anyone blocked waiting for a response on the 1936 * request. 1937 */ 1938 if (islocked || (rep->r_flags & R_LOCKED) == 0) { 1939 if ((rep->r_flags & (R_ONREQQ | R_ASYNC)) == 1940 (R_ONREQQ | R_ASYNC)) { 1941 rep->r_flags &= ~R_ONREQQ; 1942 TAILQ_REMOVE(&nmp->nm_reqq, rep, r_chain); 1943 --nmp->nm_reqqlen; 1944 TAILQ_INSERT_TAIL(&nmp->nm_reqrxq, rep, r_chain); 1945 KKASSERT(rep->r_info->state == NFSM_STATE_TRY || 1946 rep->r_info->state == NFSM_STATE_WAITREPLY); 1947 rep->r_info->state = NFSM_STATE_PROCESSREPLY; 1948 nfssvc_iod_reader_wakeup(nmp); 1949 if (TAILQ_FIRST(&nmp->nm_bioq) && 1950 nmp->nm_reqqlen == NFS_MAXASYNCBIO * 2 / 3) { 1951 nfssvc_iod_writer_wakeup(nmp); 1952 } 1953 } 1954 mtx_abort_ex_link(&nmp->nm_rxlock, &rep->r_link); 1955 } 1956 } 1957 1958 /* 1959 * Test for a termination condition pending on the process. 1960 * This is used for NFSMNT_INT mounts. 1961 */ 1962 int 1963 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td) 1964 { 1965 sigset_t tmpset; 1966 struct proc *p; 1967 struct lwp *lp; 1968 1969 if (rep && (rep->r_flags & R_SOFTTERM)) 1970 return (EINTR); 1971 /* Terminate all requests while attempting a forced unmount. */ 1972 if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF) 1973 return (EINTR); 1974 if (!(nmp->nm_flag & NFSMNT_INT)) 1975 return (0); 1976 /* td might be NULL YYY */ 1977 if (td == NULL || (p = td->td_proc) == NULL) 1978 return (0); 1979 1980 lp = td->td_lwp; 1981 tmpset = lwp_sigpend(lp); 1982 SIGSETNAND(tmpset, lp->lwp_sigmask); 1983 SIGSETNAND(tmpset, p->p_sigignore); 1984 if (SIGNOTEMPTY(tmpset) && NFSINT_SIGMASK(tmpset)) 1985 return (EINTR); 1986 1987 return (0); 1988 } 1989 1990 /* 1991 * Lock a socket against others. 1992 * Necessary for STREAM sockets to ensure you get an entire rpc request/reply 1993 * and also to avoid race conditions between the processes with nfs requests 1994 * in progress when a reconnect is necessary. 1995 */ 1996 int 1997 nfs_sndlock(struct nfsmount *nmp, struct nfsreq *rep) 1998 { 1999 mtx_t mtx = &nmp->nm_txlock; 2000 struct thread *td; 2001 int slptimeo; 2002 int slpflag; 2003 int error; 2004 2005 slpflag = 0; 2006 slptimeo = 0; 2007 td = rep ? rep->r_td : NULL; 2008 if (nmp->nm_flag & NFSMNT_INT) 2009 slpflag = PCATCH; 2010 2011 while ((error = mtx_lock_ex_try(mtx)) != 0) { 2012 if (nfs_sigintr(nmp, rep, td)) { 2013 error = EINTR; 2014 break; 2015 } 2016 error = mtx_lock_ex(mtx, "nfsndlck", slpflag, slptimeo); 2017 if (error == 0) 2018 break; 2019 if (slpflag == PCATCH) { 2020 slpflag = 0; 2021 slptimeo = 2 * hz; 2022 } 2023 } 2024 /* Always fail if our request has been cancelled. */ 2025 if (rep && (rep->r_flags & R_SOFTTERM)) { 2026 if (error == 0) 2027 mtx_unlock(mtx); 2028 error = EINTR; 2029 } 2030 return (error); 2031 } 2032 2033 /* 2034 * Unlock the stream socket for others. 2035 */ 2036 void 2037 nfs_sndunlock(struct nfsmount *nmp) 2038 { 2039 mtx_unlock(&nmp->nm_txlock); 2040 } 2041 2042 /* 2043 * Lock the receiver side of the socket. 2044 * 2045 * rep may be NULL. 2046 */ 2047 static int 2048 nfs_rcvlock(struct nfsmount *nmp, struct nfsreq *rep) 2049 { 2050 mtx_t mtx = &nmp->nm_rxlock; 2051 int slpflag; 2052 int slptimeo; 2053 int error; 2054 2055 /* 2056 * Unconditionally check for completion in case another nfsiod 2057 * get the packet while the caller was blocked, before the caller 2058 * called us. Packet reception is handled by mainline code which 2059 * is protected by the BGL at the moment. 2060 * 2061 * We do not strictly need the second check just before the 2062 * tsleep(), but it's good defensive programming. 2063 */ 2064 if (rep && rep->r_mrep != NULL) 2065 return (EALREADY); 2066 2067 if (nmp->nm_flag & NFSMNT_INT) 2068 slpflag = PCATCH; 2069 else 2070 slpflag = 0; 2071 slptimeo = 0; 2072 2073 while ((error = mtx_lock_ex_try(mtx)) != 0) { 2074 if (nfs_sigintr(nmp, rep, (rep ? rep->r_td : NULL))) { 2075 error = EINTR; 2076 break; 2077 } 2078 if (rep && rep->r_mrep != NULL) { 2079 error = EALREADY; 2080 break; 2081 } 2082 2083 /* 2084 * NOTE: can return ENOLCK, but in that case rep->r_mrep 2085 * will already be set. 2086 */ 2087 if (rep) { 2088 error = mtx_lock_ex_link(mtx, &rep->r_link, 2089 "nfsrcvlk", 2090 slpflag, slptimeo); 2091 } else { 2092 error = mtx_lock_ex(mtx, "nfsrcvlk", slpflag, slptimeo); 2093 } 2094 if (error == 0) 2095 break; 2096 2097 /* 2098 * If our reply was recieved while we were sleeping, 2099 * then just return without taking the lock to avoid a 2100 * situation where a single iod could 'capture' the 2101 * recieve lock. 2102 */ 2103 if (rep && rep->r_mrep != NULL) { 2104 error = EALREADY; 2105 break; 2106 } 2107 if (slpflag == PCATCH) { 2108 slpflag = 0; 2109 slptimeo = 2 * hz; 2110 } 2111 } 2112 if (error == 0) { 2113 if (rep && rep->r_mrep != NULL) { 2114 error = EALREADY; 2115 mtx_unlock(mtx); 2116 } 2117 } 2118 return (error); 2119 } 2120 2121 /* 2122 * Unlock the stream socket for others. 2123 */ 2124 static void 2125 nfs_rcvunlock(struct nfsmount *nmp) 2126 { 2127 mtx_unlock(&nmp->nm_rxlock); 2128 } 2129 2130 /* 2131 * nfs_realign: 2132 * 2133 * Check for badly aligned mbuf data and realign by copying the unaligned 2134 * portion of the data into a new mbuf chain and freeing the portions 2135 * of the old chain that were replaced. 2136 * 2137 * We cannot simply realign the data within the existing mbuf chain 2138 * because the underlying buffers may contain other rpc commands and 2139 * we cannot afford to overwrite them. 2140 * 2141 * We would prefer to avoid this situation entirely. The situation does 2142 * not occur with NFS/UDP and is supposed to only occassionally occur 2143 * with TCP. Use vfs.nfs.realign_count and realign_test to check this. 2144 */ 2145 static void 2146 nfs_realign(struct mbuf **pm, int hsiz) 2147 { 2148 struct mbuf *m; 2149 struct mbuf *n = NULL; 2150 int off = 0; 2151 2152 ++nfs_realign_test; 2153 2154 while ((m = *pm) != NULL) { 2155 if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) { 2156 n = m_getl(m->m_len, MB_WAIT, MT_DATA, 0, NULL); 2157 n->m_len = 0; 2158 break; 2159 } 2160 pm = &m->m_next; 2161 } 2162 2163 /* 2164 * If n is non-NULL, loop on m copying data, then replace the 2165 * portion of the chain that had to be realigned. 2166 */ 2167 if (n != NULL) { 2168 ++nfs_realign_count; 2169 while (m) { 2170 m_copyback(n, off, m->m_len, mtod(m, caddr_t)); 2171 off += m->m_len; 2172 m = m->m_next; 2173 } 2174 m_freem(*pm); 2175 *pm = n; 2176 } 2177 } 2178 2179 #ifndef NFS_NOSERVER 2180 2181 /* 2182 * Parse an RPC request 2183 * - verify it 2184 * - fill in the cred struct. 2185 */ 2186 int 2187 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header) 2188 { 2189 int len, i; 2190 u_int32_t *tl; 2191 struct uio uio; 2192 struct iovec iov; 2193 caddr_t cp; 2194 u_int32_t nfsvers, auth_type; 2195 uid_t nickuid; 2196 int error = 0, ticklen; 2197 struct nfsuid *nuidp; 2198 struct timeval tvin, tvout; 2199 struct nfsm_info info; 2200 #if 0 /* until encrypted keys are implemented */ 2201 NFSKERBKEYSCHED_T keys; /* stores key schedule */ 2202 #endif 2203 2204 info.mrep = nd->nd_mrep; 2205 info.md = nd->nd_md; 2206 info.dpos = nd->nd_dpos; 2207 2208 if (has_header) { 2209 NULLOUT(tl = nfsm_dissect(&info, 10 * NFSX_UNSIGNED)); 2210 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++); 2211 if (*tl++ != rpc_call) { 2212 m_freem(info.mrep); 2213 return (EBADRPC); 2214 } 2215 } else { 2216 NULLOUT(tl = nfsm_dissect(&info, 8 * NFSX_UNSIGNED)); 2217 } 2218 nd->nd_repstat = 0; 2219 nd->nd_flag = 0; 2220 if (*tl++ != rpc_vers) { 2221 nd->nd_repstat = ERPCMISMATCH; 2222 nd->nd_procnum = NFSPROC_NOOP; 2223 return (0); 2224 } 2225 if (*tl != nfs_prog) { 2226 nd->nd_repstat = EPROGUNAVAIL; 2227 nd->nd_procnum = NFSPROC_NOOP; 2228 return (0); 2229 } 2230 tl++; 2231 nfsvers = fxdr_unsigned(u_int32_t, *tl++); 2232 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) { 2233 nd->nd_repstat = EPROGMISMATCH; 2234 nd->nd_procnum = NFSPROC_NOOP; 2235 return (0); 2236 } 2237 if (nfsvers == NFS_VER3) 2238 nd->nd_flag = ND_NFSV3; 2239 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++); 2240 if (nd->nd_procnum == NFSPROC_NULL) 2241 return (0); 2242 if (nd->nd_procnum >= NFS_NPROCS || 2243 (nd->nd_procnum >= NQNFSPROC_GETLEASE) || 2244 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) { 2245 nd->nd_repstat = EPROCUNAVAIL; 2246 nd->nd_procnum = NFSPROC_NOOP; 2247 return (0); 2248 } 2249 if ((nd->nd_flag & ND_NFSV3) == 0) 2250 nd->nd_procnum = nfsv3_procid[nd->nd_procnum]; 2251 auth_type = *tl++; 2252 len = fxdr_unsigned(int, *tl++); 2253 if (len < 0 || len > RPCAUTH_MAXSIZ) { 2254 m_freem(info.mrep); 2255 return (EBADRPC); 2256 } 2257 2258 nd->nd_flag &= ~ND_KERBAUTH; 2259 /* 2260 * Handle auth_unix or auth_kerb. 2261 */ 2262 if (auth_type == rpc_auth_unix) { 2263 len = fxdr_unsigned(int, *++tl); 2264 if (len < 0 || len > NFS_MAXNAMLEN) { 2265 m_freem(info.mrep); 2266 return (EBADRPC); 2267 } 2268 ERROROUT(nfsm_adv(&info, nfsm_rndup(len))); 2269 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED)); 2270 bzero((caddr_t)&nd->nd_cr, sizeof (struct ucred)); 2271 nd->nd_cr.cr_ref = 1; 2272 nd->nd_cr.cr_uid = fxdr_unsigned(uid_t, *tl++); 2273 nd->nd_cr.cr_gid = fxdr_unsigned(gid_t, *tl++); 2274 len = fxdr_unsigned(int, *tl); 2275 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 2276 m_freem(info.mrep); 2277 return (EBADRPC); 2278 } 2279 NULLOUT(tl = nfsm_dissect(&info, (len + 2) * NFSX_UNSIGNED)); 2280 for (i = 1; i <= len; i++) 2281 if (i < NGROUPS) 2282 nd->nd_cr.cr_groups[i] = fxdr_unsigned(gid_t, *tl++); 2283 else 2284 tl++; 2285 nd->nd_cr.cr_ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); 2286 if (nd->nd_cr.cr_ngroups > 1) 2287 nfsrvw_sort(nd->nd_cr.cr_groups, nd->nd_cr.cr_ngroups); 2288 len = fxdr_unsigned(int, *++tl); 2289 if (len < 0 || len > RPCAUTH_MAXSIZ) { 2290 m_freem(info.mrep); 2291 return (EBADRPC); 2292 } 2293 if (len > 0) { 2294 ERROROUT(nfsm_adv(&info, nfsm_rndup(len))); 2295 } 2296 } else if (auth_type == rpc_auth_kerb) { 2297 switch (fxdr_unsigned(int, *tl++)) { 2298 case RPCAKN_FULLNAME: 2299 ticklen = fxdr_unsigned(int, *tl); 2300 *((u_int32_t *)nfsd->nfsd_authstr) = *tl; 2301 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED; 2302 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED; 2303 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) { 2304 m_freem(info.mrep); 2305 return (EBADRPC); 2306 } 2307 uio.uio_offset = 0; 2308 uio.uio_iov = &iov; 2309 uio.uio_iovcnt = 1; 2310 uio.uio_segflg = UIO_SYSSPACE; 2311 iov.iov_base = (caddr_t)&nfsd->nfsd_authstr[4]; 2312 iov.iov_len = RPCAUTH_MAXSIZ - 4; 2313 ERROROUT(nfsm_mtouio(&info, &uio, uio.uio_resid)); 2314 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED)); 2315 if (*tl++ != rpc_auth_kerb || 2316 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) { 2317 kprintf("Bad kerb verifier\n"); 2318 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2319 nd->nd_procnum = NFSPROC_NOOP; 2320 return (0); 2321 } 2322 NULLOUT(cp = nfsm_dissect(&info, 4 * NFSX_UNSIGNED)); 2323 tl = (u_int32_t *)cp; 2324 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) { 2325 kprintf("Not fullname kerb verifier\n"); 2326 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2327 nd->nd_procnum = NFSPROC_NOOP; 2328 return (0); 2329 } 2330 cp += NFSX_UNSIGNED; 2331 bcopy(cp, nfsd->nfsd_verfstr, 3 * NFSX_UNSIGNED); 2332 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED; 2333 nd->nd_flag |= ND_KERBFULL; 2334 nfsd->nfsd_flag |= NFSD_NEEDAUTH; 2335 break; 2336 case RPCAKN_NICKNAME: 2337 if (len != 2 * NFSX_UNSIGNED) { 2338 kprintf("Kerb nickname short\n"); 2339 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED); 2340 nd->nd_procnum = NFSPROC_NOOP; 2341 return (0); 2342 } 2343 nickuid = fxdr_unsigned(uid_t, *tl); 2344 NULLOUT(tl = nfsm_dissect(&info, 2 * NFSX_UNSIGNED)); 2345 if (*tl++ != rpc_auth_kerb || 2346 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) { 2347 kprintf("Kerb nick verifier bad\n"); 2348 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 2349 nd->nd_procnum = NFSPROC_NOOP; 2350 return (0); 2351 } 2352 NULLOUT(tl = nfsm_dissect(&info, 3 * NFSX_UNSIGNED)); 2353 tvin.tv_sec = *tl++; 2354 tvin.tv_usec = *tl; 2355 2356 for (nuidp = NUIDHASH(nfsd->nfsd_slp,nickuid)->lh_first; 2357 nuidp != 0; nuidp = nuidp->nu_hash.le_next) { 2358 if (nuidp->nu_cr.cr_uid == nickuid && 2359 (!nd->nd_nam2 || 2360 netaddr_match(NU_NETFAM(nuidp), 2361 &nuidp->nu_haddr, nd->nd_nam2))) 2362 break; 2363 } 2364 if (!nuidp) { 2365 nd->nd_repstat = 2366 (NFSERR_AUTHERR|AUTH_REJECTCRED); 2367 nd->nd_procnum = NFSPROC_NOOP; 2368 return (0); 2369 } 2370 2371 /* 2372 * Now, decrypt the timestamp using the session key 2373 * and validate it. 2374 */ 2375 #ifdef NFSKERB 2376 XXX 2377 #endif 2378 2379 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec); 2380 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec); 2381 if (nuidp->nu_expire < time_second || 2382 nuidp->nu_timestamp.tv_sec > tvout.tv_sec || 2383 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec && 2384 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) { 2385 nuidp->nu_expire = 0; 2386 nd->nd_repstat = 2387 (NFSERR_AUTHERR|AUTH_REJECTVERF); 2388 nd->nd_procnum = NFSPROC_NOOP; 2389 return (0); 2390 } 2391 nfsrv_setcred(&nuidp->nu_cr, &nd->nd_cr); 2392 nd->nd_flag |= ND_KERBNICK; 2393 }; 2394 } else { 2395 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED); 2396 nd->nd_procnum = NFSPROC_NOOP; 2397 return (0); 2398 } 2399 2400 nd->nd_md = info.md; 2401 nd->nd_dpos = info.dpos; 2402 return (0); 2403 nfsmout: 2404 return (error); 2405 } 2406 2407 #endif 2408 2409 /* 2410 * Send a message to the originating process's terminal. The thread and/or 2411 * process may be NULL. YYY the thread should not be NULL but there may 2412 * still be some uio_td's that are still being passed as NULL through to 2413 * nfsm_request(). 2414 */ 2415 static int 2416 nfs_msg(struct thread *td, char *server, char *msg) 2417 { 2418 tpr_t tpr; 2419 2420 if (td && td->td_proc) 2421 tpr = tprintf_open(td->td_proc); 2422 else 2423 tpr = NULL; 2424 tprintf(tpr, "nfs server %s: %s\n", server, msg); 2425 tprintf_close(tpr); 2426 return (0); 2427 } 2428 2429 #ifndef NFS_NOSERVER 2430 /* 2431 * Socket upcall routine for the nfsd sockets. 2432 * The caddr_t arg is a pointer to the "struct nfssvc_sock". 2433 * Essentially do as much as possible non-blocking, else punt and it will 2434 * be called with MB_WAIT from an nfsd. 2435 */ 2436 void 2437 nfsrv_rcv(struct socket *so, void *arg, int waitflag) 2438 { 2439 struct nfssvc_sock *slp = (struct nfssvc_sock *)arg; 2440 struct mbuf *m; 2441 struct sockaddr *nam; 2442 struct sockbuf sio; 2443 int flags, error; 2444 int nparallel_wakeup = 0; 2445 2446 if ((slp->ns_flag & SLP_VALID) == 0) 2447 return; 2448 2449 /* 2450 * Do not allow an infinite number of completed RPC records to build 2451 * up before we stop reading data from the socket. Otherwise we could 2452 * end up holding onto an unreasonable number of mbufs for requests 2453 * waiting for service. 2454 * 2455 * This should give pretty good feedback to the TCP 2456 * layer and prevents a memory crunch for other protocols. 2457 * 2458 * Note that the same service socket can be dispatched to several 2459 * nfs servers simultaniously. 2460 * 2461 * the tcp protocol callback calls us with MB_DONTWAIT. 2462 * nfsd calls us with MB_WAIT (typically). 2463 */ 2464 if (waitflag == MB_DONTWAIT && slp->ns_numrec >= nfsd_waiting / 2 + 1) { 2465 slp->ns_flag |= SLP_NEEDQ; 2466 goto dorecs; 2467 } 2468 2469 /* 2470 * Handle protocol specifics to parse an RPC request. We always 2471 * pull from the socket using non-blocking I/O. 2472 */ 2473 if (so->so_type == SOCK_STREAM) { 2474 /* 2475 * The data has to be read in an orderly fashion from a TCP 2476 * stream, unlike a UDP socket. It is possible for soreceive 2477 * and/or nfsrv_getstream() to block, so make sure only one 2478 * entity is messing around with the TCP stream at any given 2479 * moment. The receive sockbuf's lock in soreceive is not 2480 * sufficient. 2481 * 2482 * Note that this procedure can be called from any number of 2483 * NFS severs *OR* can be upcalled directly from a TCP 2484 * protocol thread. 2485 */ 2486 if (slp->ns_flag & SLP_GETSTREAM) { 2487 slp->ns_flag |= SLP_NEEDQ; 2488 goto dorecs; 2489 } 2490 slp->ns_flag |= SLP_GETSTREAM; 2491 2492 /* 2493 * Do soreceive(). Pull out as much data as possible without 2494 * blocking. 2495 */ 2496 sbinit(&sio, 1000000000); 2497 flags = MSG_DONTWAIT; 2498 error = so_pru_soreceive(so, &nam, NULL, &sio, NULL, &flags); 2499 if (error || sio.sb_mb == NULL) { 2500 if (error == EWOULDBLOCK) 2501 slp->ns_flag |= SLP_NEEDQ; 2502 else 2503 slp->ns_flag |= SLP_DISCONN; 2504 slp->ns_flag &= ~SLP_GETSTREAM; 2505 goto dorecs; 2506 } 2507 m = sio.sb_mb; 2508 if (slp->ns_rawend) { 2509 slp->ns_rawend->m_next = m; 2510 slp->ns_cc += sio.sb_cc; 2511 } else { 2512 slp->ns_raw = m; 2513 slp->ns_cc = sio.sb_cc; 2514 } 2515 while (m->m_next) 2516 m = m->m_next; 2517 slp->ns_rawend = m; 2518 2519 /* 2520 * Now try and parse as many record(s) as we can out of the 2521 * raw stream data. 2522 */ 2523 error = nfsrv_getstream(slp, waitflag, &nparallel_wakeup); 2524 if (error) { 2525 if (error == EPERM) 2526 slp->ns_flag |= SLP_DISCONN; 2527 else 2528 slp->ns_flag |= SLP_NEEDQ; 2529 } 2530 slp->ns_flag &= ~SLP_GETSTREAM; 2531 } else { 2532 /* 2533 * For UDP soreceive typically pulls just one packet, loop 2534 * to get the whole batch. 2535 */ 2536 do { 2537 sbinit(&sio, 1000000000); 2538 flags = MSG_DONTWAIT; 2539 error = so_pru_soreceive(so, &nam, NULL, &sio, 2540 NULL, &flags); 2541 if (sio.sb_mb) { 2542 struct nfsrv_rec *rec; 2543 int mf = (waitflag & MB_DONTWAIT) ? 2544 M_NOWAIT : M_WAITOK; 2545 rec = kmalloc(sizeof(struct nfsrv_rec), 2546 M_NFSRVDESC, mf); 2547 if (!rec) { 2548 if (nam) 2549 FREE(nam, M_SONAME); 2550 m_freem(sio.sb_mb); 2551 continue; 2552 } 2553 nfs_realign(&sio.sb_mb, 10 * NFSX_UNSIGNED); 2554 rec->nr_address = nam; 2555 rec->nr_packet = sio.sb_mb; 2556 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2557 ++slp->ns_numrec; 2558 ++nparallel_wakeup; 2559 } 2560 if (error) { 2561 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) 2562 && error != EWOULDBLOCK) { 2563 slp->ns_flag |= SLP_DISCONN; 2564 goto dorecs; 2565 } 2566 } 2567 } while (sio.sb_mb); 2568 } 2569 2570 /* 2571 * If we were upcalled from the tcp protocol layer and we have 2572 * fully parsed records ready to go, or there is new data pending, 2573 * or something went wrong, try to wake up an nfsd thread to deal 2574 * with it. 2575 */ 2576 dorecs: 2577 if (waitflag == MB_DONTWAIT && (slp->ns_numrec > 0 2578 || (slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)))) { 2579 nfsrv_wakenfsd(slp, nparallel_wakeup); 2580 } 2581 } 2582 2583 /* 2584 * Try and extract an RPC request from the mbuf data list received on a 2585 * stream socket. The "waitflag" argument indicates whether or not it 2586 * can sleep. 2587 */ 2588 static int 2589 nfsrv_getstream(struct nfssvc_sock *slp, int waitflag, int *countp) 2590 { 2591 struct mbuf *m, **mpp; 2592 char *cp1, *cp2; 2593 int len; 2594 struct mbuf *om, *m2, *recm; 2595 u_int32_t recmark; 2596 2597 for (;;) { 2598 if (slp->ns_reclen == 0) { 2599 if (slp->ns_cc < NFSX_UNSIGNED) 2600 return (0); 2601 m = slp->ns_raw; 2602 if (m->m_len >= NFSX_UNSIGNED) { 2603 bcopy(mtod(m, caddr_t), (caddr_t)&recmark, NFSX_UNSIGNED); 2604 m->m_data += NFSX_UNSIGNED; 2605 m->m_len -= NFSX_UNSIGNED; 2606 } else { 2607 cp1 = (caddr_t)&recmark; 2608 cp2 = mtod(m, caddr_t); 2609 while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { 2610 while (m->m_len == 0) { 2611 m = m->m_next; 2612 cp2 = mtod(m, caddr_t); 2613 } 2614 *cp1++ = *cp2++; 2615 m->m_data++; 2616 m->m_len--; 2617 } 2618 } 2619 slp->ns_cc -= NFSX_UNSIGNED; 2620 recmark = ntohl(recmark); 2621 slp->ns_reclen = recmark & ~0x80000000; 2622 if (recmark & 0x80000000) 2623 slp->ns_flag |= SLP_LASTFRAG; 2624 else 2625 slp->ns_flag &= ~SLP_LASTFRAG; 2626 if (slp->ns_reclen > NFS_MAXPACKET || slp->ns_reclen <= 0) { 2627 log(LOG_ERR, "%s (%d) from nfs client\n", 2628 "impossible packet length", 2629 slp->ns_reclen); 2630 return (EPERM); 2631 } 2632 } 2633 2634 /* 2635 * Now get the record part. 2636 * 2637 * Note that slp->ns_reclen may be 0. Linux sometimes 2638 * generates 0-length RPCs 2639 */ 2640 recm = NULL; 2641 if (slp->ns_cc == slp->ns_reclen) { 2642 recm = slp->ns_raw; 2643 slp->ns_raw = slp->ns_rawend = NULL; 2644 slp->ns_cc = slp->ns_reclen = 0; 2645 } else if (slp->ns_cc > slp->ns_reclen) { 2646 len = 0; 2647 m = slp->ns_raw; 2648 om = NULL; 2649 2650 while (len < slp->ns_reclen) { 2651 if ((len + m->m_len) > slp->ns_reclen) { 2652 m2 = m_copym(m, 0, slp->ns_reclen - len, 2653 waitflag); 2654 if (m2) { 2655 if (om) { 2656 om->m_next = m2; 2657 recm = slp->ns_raw; 2658 } else 2659 recm = m2; 2660 m->m_data += slp->ns_reclen - len; 2661 m->m_len -= slp->ns_reclen - len; 2662 len = slp->ns_reclen; 2663 } else { 2664 return (EWOULDBLOCK); 2665 } 2666 } else if ((len + m->m_len) == slp->ns_reclen) { 2667 om = m; 2668 len += m->m_len; 2669 m = m->m_next; 2670 recm = slp->ns_raw; 2671 om->m_next = NULL; 2672 } else { 2673 om = m; 2674 len += m->m_len; 2675 m = m->m_next; 2676 } 2677 } 2678 slp->ns_raw = m; 2679 slp->ns_cc -= len; 2680 slp->ns_reclen = 0; 2681 } else { 2682 return (0); 2683 } 2684 2685 /* 2686 * Accumulate the fragments into a record. 2687 */ 2688 mpp = &slp->ns_frag; 2689 while (*mpp) 2690 mpp = &((*mpp)->m_next); 2691 *mpp = recm; 2692 if (slp->ns_flag & SLP_LASTFRAG) { 2693 struct nfsrv_rec *rec; 2694 int mf = (waitflag & MB_DONTWAIT) ? M_NOWAIT : M_WAITOK; 2695 rec = kmalloc(sizeof(struct nfsrv_rec), M_NFSRVDESC, mf); 2696 if (!rec) { 2697 m_freem(slp->ns_frag); 2698 } else { 2699 nfs_realign(&slp->ns_frag, 10 * NFSX_UNSIGNED); 2700 rec->nr_address = NULL; 2701 rec->nr_packet = slp->ns_frag; 2702 STAILQ_INSERT_TAIL(&slp->ns_rec, rec, nr_link); 2703 ++slp->ns_numrec; 2704 ++*countp; 2705 } 2706 slp->ns_frag = NULL; 2707 } 2708 } 2709 } 2710 2711 /* 2712 * Parse an RPC header. 2713 */ 2714 int 2715 nfsrv_dorec(struct nfssvc_sock *slp, struct nfsd *nfsd, 2716 struct nfsrv_descript **ndp) 2717 { 2718 struct nfsrv_rec *rec; 2719 struct mbuf *m; 2720 struct sockaddr *nam; 2721 struct nfsrv_descript *nd; 2722 int error; 2723 2724 *ndp = NULL; 2725 if ((slp->ns_flag & SLP_VALID) == 0 || !STAILQ_FIRST(&slp->ns_rec)) 2726 return (ENOBUFS); 2727 rec = STAILQ_FIRST(&slp->ns_rec); 2728 STAILQ_REMOVE_HEAD(&slp->ns_rec, nr_link); 2729 KKASSERT(slp->ns_numrec > 0); 2730 --slp->ns_numrec; 2731 nam = rec->nr_address; 2732 m = rec->nr_packet; 2733 kfree(rec, M_NFSRVDESC); 2734 MALLOC(nd, struct nfsrv_descript *, sizeof (struct nfsrv_descript), 2735 M_NFSRVDESC, M_WAITOK); 2736 nd->nd_md = nd->nd_mrep = m; 2737 nd->nd_nam2 = nam; 2738 nd->nd_dpos = mtod(m, caddr_t); 2739 error = nfs_getreq(nd, nfsd, TRUE); 2740 if (error) { 2741 if (nam) { 2742 FREE(nam, M_SONAME); 2743 } 2744 kfree((caddr_t)nd, M_NFSRVDESC); 2745 return (error); 2746 } 2747 *ndp = nd; 2748 nfsd->nfsd_nd = nd; 2749 return (0); 2750 } 2751 2752 /* 2753 * Try to assign service sockets to nfsd threads based on the number 2754 * of new rpc requests that have been queued on the service socket. 2755 * 2756 * If no nfsd's are available or additonal requests are pending, set the 2757 * NFSD_CHECKSLP flag so that one of the running nfsds will go look for 2758 * the work in the nfssvc_sock list when it is finished processing its 2759 * current work. This flag is only cleared when an nfsd can not find 2760 * any new work to perform. 2761 */ 2762 void 2763 nfsrv_wakenfsd(struct nfssvc_sock *slp, int nparallel) 2764 { 2765 struct nfsd *nd; 2766 2767 if ((slp->ns_flag & SLP_VALID) == 0) 2768 return; 2769 if (nparallel <= 1) 2770 nparallel = 1; 2771 TAILQ_FOREACH(nd, &nfsd_head, nfsd_chain) { 2772 if (nd->nfsd_flag & NFSD_WAITING) { 2773 nd->nfsd_flag &= ~NFSD_WAITING; 2774 if (nd->nfsd_slp) 2775 panic("nfsd wakeup"); 2776 slp->ns_sref++; 2777 nd->nfsd_slp = slp; 2778 wakeup((caddr_t)nd); 2779 if (--nparallel == 0) 2780 break; 2781 } 2782 } 2783 if (nparallel) { 2784 slp->ns_flag |= SLP_DOREC; 2785 nfsd_head_flag |= NFSD_CHECKSLP; 2786 } 2787 } 2788 #endif /* NFS_NOSERVER */ 2789