1 /* $NetBSD: nfs_socket.c,v 1.197 2015/07/15 03:28:55 manu Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1991, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Rick Macklem at The University of Guelph. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95 35 */ 36 37 /* 38 * Socket operations for use by nfs 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: nfs_socket.c,v 1.197 2015/07/15 03:28:55 manu Exp $"); 43 44 #ifdef _KERNEL_OPT 45 #include "opt_nfs.h" 46 #include "opt_mbuftrace.h" 47 #endif 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/evcnt.h> 52 #include <sys/callout.h> 53 #include <sys/proc.h> 54 #include <sys/mount.h> 55 #include <sys/kernel.h> 56 #include <sys/kmem.h> 57 #include <sys/mbuf.h> 58 #include <sys/vnode.h> 59 #include <sys/domain.h> 60 #include <sys/protosw.h> 61 #include <sys/socket.h> 62 #include <sys/socketvar.h> 63 #include <sys/syslog.h> 64 #include <sys/tprintf.h> 65 #include <sys/namei.h> 66 #include <sys/signal.h> 67 #include <sys/signalvar.h> 68 #include <sys/kauth.h> 69 #include <sys/time.h> 70 71 #include <netinet/in.h> 72 #include <netinet/tcp.h> 73 74 #include <nfs/rpcv2.h> 75 #include <nfs/nfsproto.h> 76 #include <nfs/nfs.h> 77 #include <nfs/xdr_subs.h> 78 #include <nfs/nfsm_subs.h> 79 #include <nfs/nfsmount.h> 80 #include <nfs/nfsnode.h> 81 #include <nfs/nfsrtt.h> 82 #include <nfs/nfs_var.h> 83 84 #ifdef MBUFTRACE 85 struct mowner nfs_mowner = MOWNER_INIT("nfs",""); 86 #endif 87 88 /* 89 * Estimate rto for an nfs rpc sent via. an unreliable datagram. 90 * Use the mean and mean deviation of rtt for the appropriate type of rpc 91 * for the frequent rpcs and a default for the others. 92 * The justification for doing "other" this way is that these rpcs 93 * happen so infrequently that timer est. would probably be stale. 94 * Also, since many of these rpcs are 95 * non-idempotent, a conservative timeout is desired. 96 * getattr, lookup - A+2D 97 * read, write - A+4D 98 * other - nm_timeo 99 */ 100 #define NFS_RTO(n, t) \ 101 ((t) == 0 ? (n)->nm_timeo : \ 102 ((t) < 3 ? \ 103 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \ 104 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1))) 105 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[nfs_proct[(r)->r_procnum] - 1] 106 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[nfs_proct[(r)->r_procnum] - 1] 107 108 /* 109 * Defines which timer to use for the procnum. 110 * 0 - default 111 * 1 - getattr 112 * 2 - lookup 113 * 3 - read 114 * 4 - write 115 */ 116 const int nfs_proct[NFS_NPROCS] = { 117 [NFSPROC_NULL] = 0, 118 [NFSPROC_GETATTR] = 1, 119 [NFSPROC_SETATTR] = 0, 120 [NFSPROC_LOOKUP] = 2, 121 [NFSPROC_ACCESS] = 1, 122 [NFSPROC_READLINK] = 3, 123 [NFSPROC_READ] = 3, 124 [NFSPROC_WRITE] = 4, 125 [NFSPROC_CREATE] = 0, 126 [NFSPROC_MKDIR] = 0, 127 [NFSPROC_SYMLINK] = 0, 128 [NFSPROC_MKNOD] = 0, 129 [NFSPROC_REMOVE] = 0, 130 [NFSPROC_RMDIR] = 0, 131 [NFSPROC_RENAME] = 0, 132 [NFSPROC_LINK] = 0, 133 [NFSPROC_READDIR] = 3, 134 [NFSPROC_READDIRPLUS] = 3, 135 [NFSPROC_FSSTAT] = 0, 136 [NFSPROC_FSINFO] = 0, 137 [NFSPROC_PATHCONF] = 0, 138 [NFSPROC_COMMIT] = 0, 139 [NFSPROC_NOOP] = 0, 140 }; 141 142 #ifdef DEBUG 143 /* 144 * Avoid spamming the console with debugging messages. We only print 145 * the nfs timer and reply error debugs every 10 seconds. 146 */ 147 const struct timeval nfs_err_interval = { 10, 0 }; 148 struct timeval nfs_reply_last_err_time; 149 struct timeval nfs_timer_last_err_time; 150 #endif 151 152 /* 153 * There is a congestion window for outstanding rpcs maintained per mount 154 * point. The cwnd size is adjusted in roughly the way that: 155 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of 156 * SIGCOMM '88". ACM, August 1988. 157 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout 158 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd 159 * of rpcs is in progress. 160 * (The sent count and cwnd are scaled for integer arith.) 161 * Variants of "slow start" were tried and were found to be too much of a 162 * performance hit (ave. rtt 3 times larger), 163 * I suspect due to the large rtt that nfs rpcs have. 164 */ 165 int nfsrtton = 0; 166 struct nfsrtt nfsrtt; 167 static const int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, }; 168 struct nfsreqhead nfs_reqq; 169 static callout_t nfs_timer_ch; 170 static struct evcnt nfs_timer_ev; 171 static struct evcnt nfs_timer_start_ev; 172 static struct evcnt nfs_timer_stop_ev; 173 static kmutex_t nfs_timer_lock; 174 static bool (*nfs_timer_srvvec)(void); 175 176 /* 177 * Initialize sockets and congestion for a new NFS connection. 178 * We do not free the sockaddr if error. 179 */ 180 int 181 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep, struct lwp *l) 182 { 183 struct socket *so; 184 int error, rcvreserve, sndreserve; 185 struct sockaddr *saddr; 186 struct sockaddr_in sin; 187 struct sockaddr_in6 sin6; 188 int val; 189 190 nmp->nm_so = NULL; 191 saddr = mtod(nmp->nm_nam, struct sockaddr *); 192 error = socreate(saddr->sa_family, &nmp->nm_so, 193 nmp->nm_sotype, nmp->nm_soproto, l, NULL); 194 if (error) 195 goto bad; 196 so = nmp->nm_so; 197 #ifdef MBUFTRACE 198 so->so_mowner = &nfs_mowner; 199 so->so_rcv.sb_mowner = &nfs_mowner; 200 so->so_snd.sb_mowner = &nfs_mowner; 201 #endif 202 nmp->nm_soflags = so->so_proto->pr_flags; 203 204 /* 205 * Some servers require that the client port be a reserved port number. 206 */ 207 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 208 val = IP_PORTRANGE_LOW; 209 210 if ((error = so_setsockopt(NULL, so, IPPROTO_IP, IP_PORTRANGE, 211 &val, sizeof(val)))) 212 goto bad; 213 sin.sin_len = sizeof(struct sockaddr_in); 214 sin.sin_family = AF_INET; 215 sin.sin_addr.s_addr = INADDR_ANY; 216 sin.sin_port = 0; 217 error = sobind(so, (struct sockaddr *)&sin, &lwp0); 218 if (error) 219 goto bad; 220 } 221 if (saddr->sa_family == AF_INET6 && (nmp->nm_flag & NFSMNT_RESVPORT)) { 222 val = IPV6_PORTRANGE_LOW; 223 224 if ((error = so_setsockopt(NULL, so, IPPROTO_IPV6, 225 IPV6_PORTRANGE, &val, sizeof(val)))) 226 goto bad; 227 memset(&sin6, 0, sizeof(sin6)); 228 sin6.sin6_len = sizeof(struct sockaddr_in6); 229 sin6.sin6_family = AF_INET6; 230 error = sobind(so, (struct sockaddr *)&sin6, &lwp0); 231 if (error) 232 goto bad; 233 } 234 235 /* 236 * Protocols that do not require connections may be optionally left 237 * unconnected for servers that reply from a port other than NFS_PORT. 238 */ 239 solock(so); 240 if (nmp->nm_flag & NFSMNT_NOCONN) { 241 if (nmp->nm_soflags & PR_CONNREQUIRED) { 242 sounlock(so); 243 error = ENOTCONN; 244 goto bad; 245 } 246 } else { 247 error = soconnect(so, mtod(nmp->nm_nam, struct sockaddr *), l); 248 if (error) { 249 sounlock(so); 250 goto bad; 251 } 252 253 /* 254 * Wait for the connection to complete. Cribbed from the 255 * connect system call but with the wait timing out so 256 * that interruptible mounts don't hang here for a long time. 257 */ 258 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 259 (void)sowait(so, false, 2 * hz); 260 if ((so->so_state & SS_ISCONNECTING) && 261 so->so_error == 0 && rep && 262 (error = nfs_sigintr(nmp, rep, rep->r_lwp)) != 0){ 263 so->so_state &= ~SS_ISCONNECTING; 264 sounlock(so); 265 goto bad; 266 } 267 } 268 if (so->so_error) { 269 error = so->so_error; 270 so->so_error = 0; 271 sounlock(so); 272 goto bad; 273 } 274 } 275 if (nmp->nm_flag & (NFSMNT_SOFT | NFSMNT_INT)) { 276 so->so_rcv.sb_timeo = (5 * hz); 277 so->so_snd.sb_timeo = (5 * hz); 278 } else { 279 /* 280 * enable receive timeout to detect server crash and reconnect. 281 * otherwise, we can be stuck in soreceive forever. 282 */ 283 so->so_rcv.sb_timeo = (5 * hz); 284 so->so_snd.sb_timeo = 0; 285 } 286 if (nmp->nm_sotype == SOCK_DGRAM) { 287 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 3; 288 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) + 289 NFS_MAXPKTHDR) * 2; 290 } else if (nmp->nm_sotype == SOCK_SEQPACKET) { 291 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 3; 292 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) + 293 NFS_MAXPKTHDR) * 3; 294 } else { 295 sounlock(so); 296 if (nmp->nm_sotype != SOCK_STREAM) 297 panic("nfscon sotype"); 298 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 299 val = 1; 300 so_setsockopt(NULL, so, SOL_SOCKET, SO_KEEPALIVE, &val, 301 sizeof(val)); 302 } 303 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 304 val = 1; 305 so_setsockopt(NULL, so, IPPROTO_TCP, TCP_NODELAY, &val, 306 sizeof(val)); 307 } 308 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + 309 sizeof (u_int32_t)) * 3; 310 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + 311 sizeof (u_int32_t)) * 3; 312 solock(so); 313 } 314 error = soreserve(so, sndreserve, rcvreserve); 315 if (error) { 316 sounlock(so); 317 goto bad; 318 } 319 so->so_rcv.sb_flags |= SB_NOINTR; 320 so->so_snd.sb_flags |= SB_NOINTR; 321 sounlock(so); 322 323 /* Initialize other non-zero congestion variables */ 324 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = nmp->nm_srtt[3] = 325 NFS_TIMEO << 3; 326 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 327 nmp->nm_sdrtt[3] = 0; 328 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */ 329 nmp->nm_sent = 0; 330 nmp->nm_timeouts = 0; 331 return (0); 332 333 bad: 334 nfs_disconnect(nmp); 335 return (error); 336 } 337 338 /* 339 * Reconnect routine: 340 * Called when a connection is broken on a reliable protocol. 341 * - clean up the old socket 342 * - nfs_connect() again 343 * - set R_MUSTRESEND for all outstanding requests on mount point 344 * If this fails the mount point is DEAD! 345 * nb: Must be called with the nfs_sndlock() set on the mount point. 346 */ 347 int 348 nfs_reconnect(struct nfsreq *rep) 349 { 350 struct nfsreq *rp; 351 struct nfsmount *nmp = rep->r_nmp; 352 int error; 353 time_t before_ts; 354 355 nfs_disconnect(nmp); 356 357 /* 358 * Force unmount: do not try to reconnect 359 */ 360 if (nmp->nm_iflag & NFSMNT_DISMNTFORCE) 361 return EIO; 362 363 before_ts = time_uptime; 364 while ((error = nfs_connect(nmp, rep, &lwp0)) != 0) { 365 if (error == EINTR || error == ERESTART) 366 return (EINTR); 367 368 if (rep->r_flags & R_SOFTTERM) 369 return (EIO); 370 371 /* 372 * Soft mount can fail here, but not too fast: 373 * we want to make sure we at least honoured 374 * NFS timeout. 375 */ 376 if ((nmp->nm_flag & NFSMNT_SOFT) && 377 (time_uptime - before_ts > nmp->nm_timeo / NFS_HZ)) 378 return (EIO); 379 380 kpause("nfscn2", false, hz, NULL); 381 } 382 383 /* 384 * Loop through outstanding request list and fix up all requests 385 * on old socket. 386 */ 387 TAILQ_FOREACH(rp, &nfs_reqq, r_chain) { 388 if (rp->r_nmp == nmp) { 389 if ((rp->r_flags & R_MUSTRESEND) == 0) 390 rp->r_flags |= R_MUSTRESEND | R_REXMITTED; 391 rp->r_rexmit = 0; 392 } 393 } 394 return (0); 395 } 396 397 /* 398 * NFS disconnect. Clean up and unlink. 399 */ 400 void 401 nfs_disconnect(struct nfsmount *nmp) 402 { 403 struct socket *so; 404 int drain = 0; 405 406 if (nmp->nm_so) { 407 so = nmp->nm_so; 408 nmp->nm_so = NULL; 409 solock(so); 410 soshutdown(so, SHUT_RDWR); 411 sounlock(so); 412 drain = (nmp->nm_iflag & NFSMNT_DISMNT) != 0; 413 if (drain) { 414 /* 415 * soshutdown() above should wake up the current 416 * listener. 417 * Now wake up those waiting for the receive lock, and 418 * wait for them to go away unhappy, to prevent *nmp 419 * from evaporating while they're sleeping. 420 */ 421 mutex_enter(&nmp->nm_lock); 422 while (nmp->nm_waiters > 0) { 423 cv_broadcast(&nmp->nm_rcvcv); 424 cv_broadcast(&nmp->nm_sndcv); 425 cv_wait(&nmp->nm_disconcv, &nmp->nm_lock); 426 } 427 mutex_exit(&nmp->nm_lock); 428 } 429 soclose(so); 430 } 431 #ifdef DIAGNOSTIC 432 if (drain && (nmp->nm_waiters > 0)) 433 panic("nfs_disconnect: waiters left after drain?"); 434 #endif 435 } 436 437 void 438 nfs_safedisconnect(struct nfsmount *nmp) 439 { 440 struct nfsreq dummyreq; 441 442 memset(&dummyreq, 0, sizeof(dummyreq)); 443 dummyreq.r_nmp = nmp; 444 nfs_rcvlock(nmp, &dummyreq); /* XXX ignored error return */ 445 nfs_disconnect(nmp); 446 nfs_rcvunlock(nmp); 447 } 448 449 /* 450 * This is the nfs send routine. For connection based socket types, it 451 * must be called with an nfs_sndlock() on the socket. 452 * "rep == NULL" indicates that it has been called from a server. 453 * For the client side: 454 * - return EINTR if the RPC is terminated, 0 otherwise 455 * - set R_MUSTRESEND if the send fails for any reason 456 * - do any cleanup required by recoverable socket errors (? ? ?) 457 * For the server side: 458 * - return EINTR or ERESTART if interrupted by a signal 459 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 460 * - do any cleanup required by recoverable socket errors (? ? ?) 461 */ 462 int 463 nfs_send(struct socket *so, struct mbuf *nam, struct mbuf *top, struct nfsreq *rep, struct lwp *l) 464 { 465 struct sockaddr *sendnam; 466 int error, soflags, flags; 467 468 /* XXX nfs_doio()/nfs_request() calls with rep->r_lwp == NULL */ 469 if (l == NULL && rep->r_lwp == NULL) 470 l = curlwp; 471 472 if (rep) { 473 if (rep->r_flags & R_SOFTTERM) { 474 m_freem(top); 475 return (EINTR); 476 } 477 if ((so = rep->r_nmp->nm_so) == NULL) { 478 rep->r_flags |= R_MUSTRESEND; 479 m_freem(top); 480 return (0); 481 } 482 rep->r_flags &= ~R_MUSTRESEND; 483 soflags = rep->r_nmp->nm_soflags; 484 } else 485 soflags = so->so_proto->pr_flags; 486 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 487 sendnam = NULL; 488 else 489 sendnam = mtod(nam, struct sockaddr *); 490 if (so->so_type == SOCK_SEQPACKET) 491 flags = MSG_EOR; 492 else 493 flags = 0; 494 495 error = (*so->so_send)(so, sendnam, NULL, top, NULL, flags, l); 496 if (error) { 497 if (rep) { 498 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) { 499 /* 500 * We're too fast for the network/driver, 501 * and UDP isn't flowcontrolled. 502 * We need to resend. This is not fatal, 503 * just try again. 504 * 505 * Could be smarter here by doing some sort 506 * of a backoff, but this is rare. 507 */ 508 rep->r_flags |= R_MUSTRESEND; 509 } else { 510 if (error != EPIPE) 511 log(LOG_INFO, 512 "nfs send error %d for %s\n", 513 error, 514 rep->r_nmp->nm_mountp-> 515 mnt_stat.f_mntfromname); 516 /* 517 * Deal with errors for the client side. 518 */ 519 if (rep->r_flags & R_SOFTTERM) 520 error = EINTR; 521 else if (error != EMSGSIZE) 522 rep->r_flags |= R_MUSTRESEND; 523 } 524 } else { 525 /* 526 * See above. This error can happen under normal 527 * circumstances and the log is too noisy. 528 * The error will still show up in nfsstat. 529 */ 530 if (error != ENOBUFS || so->so_type != SOCK_DGRAM) 531 log(LOG_INFO, "nfsd send error %d\n", error); 532 } 533 534 /* 535 * Handle any recoverable (soft) socket errors here. (? ? ?) 536 */ 537 if (error != EINTR && error != ERESTART && 538 error != EWOULDBLOCK && error != EPIPE && 539 error != EMSGSIZE) 540 error = 0; 541 } 542 return (error); 543 } 544 545 /* 546 * Generate the rpc reply header 547 * siz arg. is used to decide if adding a cluster is worthwhile 548 */ 549 int 550 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp, int err, int cache, u_quad_t *frev, struct mbuf **mrq, struct mbuf **mbp, char **bposp) 551 { 552 u_int32_t *tl; 553 struct mbuf *mreq; 554 char *bpos; 555 struct mbuf *mb; 556 557 mreq = m_gethdr(M_WAIT, MT_DATA); 558 MCLAIM(mreq, &nfs_mowner); 559 mb = mreq; 560 /* 561 * If this is a big reply, use a cluster else 562 * try and leave leading space for the lower level headers. 563 */ 564 siz += RPC_REPLYSIZ; 565 if (siz >= max_datalen) { 566 m_clget(mreq, M_WAIT); 567 } else 568 mreq->m_data += max_hdr; 569 tl = mtod(mreq, u_int32_t *); 570 mreq->m_len = 6 * NFSX_UNSIGNED; 571 bpos = ((char *)tl) + mreq->m_len; 572 *tl++ = txdr_unsigned(nd->nd_retxid); 573 *tl++ = rpc_reply; 574 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) { 575 *tl++ = rpc_msgdenied; 576 if (err & NFSERR_AUTHERR) { 577 *tl++ = rpc_autherr; 578 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR); 579 mreq->m_len -= NFSX_UNSIGNED; 580 bpos -= NFSX_UNSIGNED; 581 } else { 582 *tl++ = rpc_mismatch; 583 *tl++ = txdr_unsigned(RPC_VER2); 584 *tl = txdr_unsigned(RPC_VER2); 585 } 586 } else { 587 *tl++ = rpc_msgaccepted; 588 589 /* 590 * For Kerberos authentication, we must send the nickname 591 * verifier back, otherwise just RPCAUTH_NULL. 592 */ 593 if (nd->nd_flag & ND_KERBFULL) { 594 struct nfsuid *nuidp; 595 struct timeval ktvin, ktvout; 596 597 memset(&ktvout, 0, sizeof ktvout); /* XXX gcc */ 598 599 LIST_FOREACH(nuidp, 600 NUIDHASH(slp, kauth_cred_geteuid(nd->nd_cr)), 601 nu_hash) { 602 if (kauth_cred_geteuid(nuidp->nu_cr) == 603 kauth_cred_geteuid(nd->nd_cr) && 604 (!nd->nd_nam2 || netaddr_match( 605 NU_NETFAM(nuidp), &nuidp->nu_haddr, 606 nd->nd_nam2))) 607 break; 608 } 609 if (nuidp) { 610 ktvin.tv_sec = 611 txdr_unsigned(nuidp->nu_timestamp.tv_sec 612 - 1); 613 ktvin.tv_usec = 614 txdr_unsigned(nuidp->nu_timestamp.tv_usec); 615 616 /* 617 * Encrypt the timestamp in ecb mode using the 618 * session key. 619 */ 620 #ifdef NFSKERB 621 XXX 622 #else 623 (void)ktvin.tv_sec; 624 #endif 625 626 *tl++ = rpc_auth_kerb; 627 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED); 628 *tl = ktvout.tv_sec; 629 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 630 *tl++ = ktvout.tv_usec; 631 *tl++ = txdr_unsigned( 632 kauth_cred_geteuid(nuidp->nu_cr)); 633 } else { 634 *tl++ = 0; 635 *tl++ = 0; 636 } 637 } else { 638 *tl++ = 0; 639 *tl++ = 0; 640 } 641 switch (err) { 642 case EPROGUNAVAIL: 643 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 644 break; 645 case EPROGMISMATCH: 646 *tl = txdr_unsigned(RPC_PROGMISMATCH); 647 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 648 *tl++ = txdr_unsigned(2); 649 *tl = txdr_unsigned(3); 650 break; 651 case EPROCUNAVAIL: 652 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 653 break; 654 case EBADRPC: 655 *tl = txdr_unsigned(RPC_GARBAGE); 656 break; 657 default: 658 *tl = 0; 659 if (err != NFSERR_RETVOID) { 660 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 661 if (err) 662 *tl = txdr_unsigned(nfsrv_errmap(nd, err)); 663 else 664 *tl = 0; 665 } 666 break; 667 }; 668 } 669 670 if (mrq != NULL) 671 *mrq = mreq; 672 *mbp = mb; 673 *bposp = bpos; 674 if (err != 0 && err != NFSERR_RETVOID) 675 nfsstats.srvrpc_errs++; 676 return (0); 677 } 678 679 static void 680 nfs_timer_schedule(void) 681 { 682 683 callout_schedule(&nfs_timer_ch, nfs_ticks); 684 } 685 686 void 687 nfs_timer_start(void) 688 { 689 690 if (callout_pending(&nfs_timer_ch)) 691 return; 692 693 nfs_timer_start_ev.ev_count++; 694 nfs_timer_schedule(); 695 } 696 697 void 698 nfs_timer_init(void) 699 { 700 701 mutex_init(&nfs_timer_lock, MUTEX_DEFAULT, IPL_NONE); 702 callout_init(&nfs_timer_ch, 0); 703 callout_setfunc(&nfs_timer_ch, nfs_timer, NULL); 704 evcnt_attach_dynamic(&nfs_timer_ev, EVCNT_TYPE_MISC, NULL, 705 "nfs", "timer"); 706 evcnt_attach_dynamic(&nfs_timer_start_ev, EVCNT_TYPE_MISC, NULL, 707 "nfs", "timer start"); 708 evcnt_attach_dynamic(&nfs_timer_stop_ev, EVCNT_TYPE_MISC, NULL, 709 "nfs", "timer stop"); 710 } 711 712 void 713 nfs_timer_fini(void) 714 { 715 716 callout_halt(&nfs_timer_ch, NULL); 717 callout_destroy(&nfs_timer_ch); 718 mutex_destroy(&nfs_timer_lock); 719 evcnt_detach(&nfs_timer_ev); 720 evcnt_detach(&nfs_timer_start_ev); 721 evcnt_detach(&nfs_timer_stop_ev); 722 } 723 724 void 725 nfs_timer_srvinit(bool (*func)(void)) 726 { 727 728 nfs_timer_srvvec = func; 729 } 730 731 void 732 nfs_timer_srvfini(void) 733 { 734 735 mutex_enter(&nfs_timer_lock); 736 nfs_timer_srvvec = NULL; 737 mutex_exit(&nfs_timer_lock); 738 } 739 740 741 /* 742 * Nfs timer routine 743 * Scan the nfsreq list and retranmit any requests that have timed out 744 * To avoid retransmission attempts on STREAM sockets (in the future) make 745 * sure to set the r_retry field to 0 (implies nm_retry == 0). 746 */ 747 void 748 nfs_timer(void *arg) 749 { 750 struct nfsreq *rep; 751 struct mbuf *m; 752 struct socket *so; 753 struct nfsmount *nmp; 754 int timeo; 755 int error; 756 bool more = false; 757 758 nfs_timer_ev.ev_count++; 759 760 mutex_enter(softnet_lock); /* XXX PR 40491 */ 761 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) { 762 more = true; 763 nmp = rep->r_nmp; 764 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) 765 continue; 766 if (nfs_sigintr(nmp, rep, rep->r_lwp)) { 767 rep->r_flags |= R_SOFTTERM; 768 continue; 769 } 770 if (rep->r_rtt >= 0) { 771 rep->r_rtt++; 772 if (nmp->nm_flag & NFSMNT_DUMBTIMR) 773 timeo = nmp->nm_timeo; 774 else 775 timeo = NFS_RTO(nmp, nfs_proct[rep->r_procnum]); 776 if (nmp->nm_timeouts > 0) 777 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 778 if (timeo > NFS_MAXTIMEO) 779 timeo = NFS_MAXTIMEO; 780 if (rep->r_rtt <= timeo) 781 continue; 782 if (nmp->nm_timeouts < 783 (sizeof(nfs_backoff) / sizeof(nfs_backoff[0]))) 784 nmp->nm_timeouts++; 785 } 786 /* 787 * Check for server not responding 788 */ 789 if ((rep->r_flags & R_TPRINTFMSG) == 0 && 790 rep->r_rexmit > nmp->nm_deadthresh) { 791 nfs_msg(rep->r_lwp, 792 nmp->nm_mountp->mnt_stat.f_mntfromname, 793 "not responding"); 794 rep->r_flags |= R_TPRINTFMSG; 795 } 796 if (rep->r_rexmit >= rep->r_retry) { /* too many */ 797 nfsstats.rpctimeouts++; 798 rep->r_flags |= R_SOFTTERM; 799 continue; 800 } 801 if (nmp->nm_sotype != SOCK_DGRAM) { 802 if (++rep->r_rexmit > NFS_MAXREXMIT) 803 rep->r_rexmit = NFS_MAXREXMIT; 804 continue; 805 } 806 if ((so = nmp->nm_so) == NULL) 807 continue; 808 809 /* 810 * If there is enough space and the window allows.. 811 * Resend it 812 * Set r_rtt to -1 in case we fail to send it now. 813 */ 814 /* solock(so); XXX PR 40491 */ 815 rep->r_rtt = -1; 816 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len && 817 ((nmp->nm_flag & NFSMNT_DUMBTIMR) || 818 (rep->r_flags & R_SENT) || 819 nmp->nm_sent < nmp->nm_cwnd) && 820 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){ 821 if (so->so_state & SS_ISCONNECTED) 822 error = (*so->so_proto->pr_usrreqs->pr_send)(so, 823 m, NULL, NULL, NULL); 824 else 825 error = (*so->so_proto->pr_usrreqs->pr_send)(so, 826 m, mtod(nmp->nm_nam, struct sockaddr *), 827 NULL, NULL); 828 if (error) { 829 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 830 #ifdef DEBUG 831 if (ratecheck(&nfs_timer_last_err_time, 832 &nfs_err_interval)) 833 printf("%s: ignoring error " 834 "%d\n", __func__, error); 835 #endif 836 so->so_error = 0; 837 } 838 } else { 839 /* 840 * Iff first send, start timing 841 * else turn timing off, backoff timer 842 * and divide congestion window by 2. 843 */ 844 if (rep->r_flags & R_SENT) { 845 rep->r_flags &= ~R_TIMING; 846 if (++rep->r_rexmit > NFS_MAXREXMIT) 847 rep->r_rexmit = NFS_MAXREXMIT; 848 nmp->nm_cwnd >>= 1; 849 if (nmp->nm_cwnd < NFS_CWNDSCALE) 850 nmp->nm_cwnd = NFS_CWNDSCALE; 851 nfsstats.rpcretries++; 852 } else { 853 rep->r_flags |= R_SENT; 854 nmp->nm_sent += NFS_CWNDSCALE; 855 } 856 rep->r_rtt = 0; 857 } 858 } 859 /* sounlock(so); XXX PR 40491 */ 860 } 861 mutex_exit(softnet_lock); /* XXX PR 40491 */ 862 863 mutex_enter(&nfs_timer_lock); 864 if (nfs_timer_srvvec != NULL) { 865 more |= (*nfs_timer_srvvec)(); 866 } 867 mutex_exit(&nfs_timer_lock); 868 869 if (more) { 870 nfs_timer_schedule(); 871 } else { 872 nfs_timer_stop_ev.ev_count++; 873 } 874 } 875 876 /* 877 * Test for a termination condition pending on the process. 878 * This is used for NFSMNT_INT mounts. 879 */ 880 int 881 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct lwp *l) 882 { 883 sigset_t ss; 884 885 if (rep && (rep->r_flags & R_SOFTTERM)) 886 return (EINTR); 887 if (!(nmp->nm_flag & NFSMNT_INT)) 888 return (0); 889 if (l) { 890 sigpending1(l, &ss); 891 #if 0 892 sigminusset(&l->l_proc->p_sigctx.ps_sigignore, &ss); 893 #endif 894 if (sigismember(&ss, SIGINT) || sigismember(&ss, SIGTERM) || 895 sigismember(&ss, SIGKILL) || sigismember(&ss, SIGHUP) || 896 sigismember(&ss, SIGQUIT)) 897 return (EINTR); 898 } 899 return (0); 900 } 901 902 int 903 nfs_rcvlock(struct nfsmount *nmp, struct nfsreq *rep) 904 { 905 int *flagp = &nmp->nm_iflag; 906 int slptimeo = 0; 907 bool catch_p; 908 int error = 0; 909 910 KASSERT(nmp == rep->r_nmp); 911 912 if (nmp->nm_flag & NFSMNT_SOFT) 913 slptimeo = nmp->nm_retry * nmp->nm_timeo; 914 915 if (nmp->nm_iflag & NFSMNT_DISMNTFORCE) 916 slptimeo = hz; 917 918 catch_p = (nmp->nm_flag & NFSMNT_INT) != 0; 919 mutex_enter(&nmp->nm_lock); 920 while (/* CONSTCOND */ true) { 921 if (*flagp & NFSMNT_DISMNT) { 922 cv_signal(&nmp->nm_disconcv); 923 error = EIO; 924 break; 925 } 926 /* If our reply was received while we were sleeping, 927 * then just return without taking the lock to avoid a 928 * situation where a single iod could 'capture' the 929 * receive lock. 930 */ 931 if (rep->r_mrep != NULL) { 932 cv_signal(&nmp->nm_rcvcv); 933 error = EALREADY; 934 break; 935 } 936 if (nfs_sigintr(rep->r_nmp, rep, rep->r_lwp)) { 937 cv_signal(&nmp->nm_rcvcv); 938 error = EINTR; 939 break; 940 } 941 if ((*flagp & NFSMNT_RCVLOCK) == 0) { 942 *flagp |= NFSMNT_RCVLOCK; 943 break; 944 } 945 if (catch_p) { 946 error = cv_timedwait_sig(&nmp->nm_rcvcv, &nmp->nm_lock, 947 slptimeo); 948 } else { 949 error = cv_timedwait(&nmp->nm_rcvcv, &nmp->nm_lock, 950 slptimeo); 951 } 952 if (error) { 953 if ((error == EWOULDBLOCK) && 954 (nmp->nm_flag & NFSMNT_SOFT)) { 955 error = EIO; 956 break; 957 } 958 error = 0; 959 } 960 if (catch_p) { 961 catch_p = false; 962 slptimeo = 2 * hz; 963 } 964 } 965 mutex_exit(&nmp->nm_lock); 966 return error; 967 } 968 969 /* 970 * Unlock the stream socket for others. 971 */ 972 void 973 nfs_rcvunlock(struct nfsmount *nmp) 974 { 975 976 mutex_enter(&nmp->nm_lock); 977 if ((nmp->nm_iflag & NFSMNT_RCVLOCK) == 0) 978 panic("nfs rcvunlock"); 979 nmp->nm_iflag &= ~NFSMNT_RCVLOCK; 980 cv_signal(&nmp->nm_rcvcv); 981 mutex_exit(&nmp->nm_lock); 982 } 983 984 /* 985 * Parse an RPC request 986 * - verify it 987 * - allocate and fill in the cred. 988 */ 989 int 990 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header) 991 { 992 int len, i; 993 u_int32_t *tl; 994 int32_t t1; 995 struct uio uio; 996 struct iovec iov; 997 char *dpos, *cp2, *cp; 998 u_int32_t nfsvers, auth_type; 999 uid_t nickuid; 1000 int error = 0, ticklen; 1001 struct mbuf *mrep, *md; 1002 struct nfsuid *nuidp; 1003 struct timeval tvin, tvout; 1004 1005 memset(&tvout, 0, sizeof tvout); /* XXX gcc */ 1006 1007 KASSERT(nd->nd_cr == NULL); 1008 mrep = nd->nd_mrep; 1009 md = nd->nd_md; 1010 dpos = nd->nd_dpos; 1011 if (has_header) { 1012 nfsm_dissect(tl, u_int32_t *, 10 * NFSX_UNSIGNED); 1013 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++); 1014 if (*tl++ != rpc_call) { 1015 m_freem(mrep); 1016 return (EBADRPC); 1017 } 1018 } else 1019 nfsm_dissect(tl, u_int32_t *, 8 * NFSX_UNSIGNED); 1020 nd->nd_repstat = 0; 1021 nd->nd_flag = 0; 1022 if (*tl++ != rpc_vers) { 1023 nd->nd_repstat = ERPCMISMATCH; 1024 nd->nd_procnum = NFSPROC_NOOP; 1025 return (0); 1026 } 1027 if (*tl != nfs_prog) { 1028 nd->nd_repstat = EPROGUNAVAIL; 1029 nd->nd_procnum = NFSPROC_NOOP; 1030 return (0); 1031 } 1032 tl++; 1033 nfsvers = fxdr_unsigned(u_int32_t, *tl++); 1034 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) { 1035 nd->nd_repstat = EPROGMISMATCH; 1036 nd->nd_procnum = NFSPROC_NOOP; 1037 return (0); 1038 } 1039 if (nfsvers == NFS_VER3) 1040 nd->nd_flag = ND_NFSV3; 1041 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++); 1042 if (nd->nd_procnum == NFSPROC_NULL) 1043 return (0); 1044 if (nd->nd_procnum > NFSPROC_COMMIT || 1045 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) { 1046 nd->nd_repstat = EPROCUNAVAIL; 1047 nd->nd_procnum = NFSPROC_NOOP; 1048 return (0); 1049 } 1050 if ((nd->nd_flag & ND_NFSV3) == 0) 1051 nd->nd_procnum = nfsv3_procid[nd->nd_procnum]; 1052 auth_type = *tl++; 1053 len = fxdr_unsigned(int, *tl++); 1054 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1055 m_freem(mrep); 1056 return (EBADRPC); 1057 } 1058 1059 nd->nd_flag &= ~ND_KERBAUTH; 1060 /* 1061 * Handle auth_unix or auth_kerb. 1062 */ 1063 if (auth_type == rpc_auth_unix) { 1064 uid_t uid; 1065 gid_t gid; 1066 1067 nd->nd_cr = kauth_cred_alloc(); 1068 len = fxdr_unsigned(int, *++tl); 1069 if (len < 0 || len > NFS_MAXNAMLEN) { 1070 m_freem(mrep); 1071 error = EBADRPC; 1072 goto errout; 1073 } 1074 nfsm_adv(nfsm_rndup(len)); 1075 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1076 1077 uid = fxdr_unsigned(uid_t, *tl++); 1078 gid = fxdr_unsigned(gid_t, *tl++); 1079 kauth_cred_setuid(nd->nd_cr, uid); 1080 kauth_cred_seteuid(nd->nd_cr, uid); 1081 kauth_cred_setsvuid(nd->nd_cr, uid); 1082 kauth_cred_setgid(nd->nd_cr, gid); 1083 kauth_cred_setegid(nd->nd_cr, gid); 1084 kauth_cred_setsvgid(nd->nd_cr, gid); 1085 1086 len = fxdr_unsigned(int, *tl); 1087 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 1088 m_freem(mrep); 1089 error = EBADRPC; 1090 goto errout; 1091 } 1092 nfsm_dissect(tl, u_int32_t *, (len + 2) * NFSX_UNSIGNED); 1093 1094 if (len > 0) { 1095 size_t grbuf_size = min(len, NGROUPS) * sizeof(gid_t); 1096 gid_t *grbuf = kmem_alloc(grbuf_size, KM_SLEEP); 1097 1098 for (i = 0; i < len; i++) { 1099 if (i < NGROUPS) /* XXX elad */ 1100 grbuf[i] = fxdr_unsigned(gid_t, *tl++); 1101 else 1102 tl++; 1103 } 1104 kauth_cred_setgroups(nd->nd_cr, grbuf, 1105 min(len, NGROUPS), -1, UIO_SYSSPACE); 1106 kmem_free(grbuf, grbuf_size); 1107 } 1108 1109 len = fxdr_unsigned(int, *++tl); 1110 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1111 m_freem(mrep); 1112 error = EBADRPC; 1113 goto errout; 1114 } 1115 if (len > 0) 1116 nfsm_adv(nfsm_rndup(len)); 1117 } else if (auth_type == rpc_auth_kerb) { 1118 switch (fxdr_unsigned(int, *tl++)) { 1119 case RPCAKN_FULLNAME: 1120 ticklen = fxdr_unsigned(int, *tl); 1121 *((u_int32_t *)nfsd->nfsd_authstr) = *tl; 1122 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED; 1123 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED; 1124 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) { 1125 m_freem(mrep); 1126 error = EBADRPC; 1127 goto errout; 1128 } 1129 uio.uio_offset = 0; 1130 uio.uio_iov = &iov; 1131 uio.uio_iovcnt = 1; 1132 UIO_SETUP_SYSSPACE(&uio); 1133 iov.iov_base = (void *)&nfsd->nfsd_authstr[4]; 1134 iov.iov_len = RPCAUTH_MAXSIZ - 4; 1135 nfsm_mtouio(&uio, uio.uio_resid); 1136 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1137 if (*tl++ != rpc_auth_kerb || 1138 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) { 1139 printf("Bad kerb verifier\n"); 1140 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1141 nd->nd_procnum = NFSPROC_NOOP; 1142 return (0); 1143 } 1144 nfsm_dissect(cp, void *, 4 * NFSX_UNSIGNED); 1145 tl = (u_int32_t *)cp; 1146 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) { 1147 printf("Not fullname kerb verifier\n"); 1148 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1149 nd->nd_procnum = NFSPROC_NOOP; 1150 return (0); 1151 } 1152 cp += NFSX_UNSIGNED; 1153 memcpy(nfsd->nfsd_verfstr, cp, 3 * NFSX_UNSIGNED); 1154 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED; 1155 nd->nd_flag |= ND_KERBFULL; 1156 nfsd->nfsd_flag |= NFSD_NEEDAUTH; 1157 break; 1158 case RPCAKN_NICKNAME: 1159 if (len != 2 * NFSX_UNSIGNED) { 1160 printf("Kerb nickname short\n"); 1161 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED); 1162 nd->nd_procnum = NFSPROC_NOOP; 1163 return (0); 1164 } 1165 nickuid = fxdr_unsigned(uid_t, *tl); 1166 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1167 if (*tl++ != rpc_auth_kerb || 1168 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) { 1169 printf("Kerb nick verifier bad\n"); 1170 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1171 nd->nd_procnum = NFSPROC_NOOP; 1172 return (0); 1173 } 1174 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1175 tvin.tv_sec = *tl++; 1176 tvin.tv_usec = *tl; 1177 1178 LIST_FOREACH(nuidp, NUIDHASH(nfsd->nfsd_slp, nickuid), 1179 nu_hash) { 1180 if (kauth_cred_geteuid(nuidp->nu_cr) == nickuid && 1181 (!nd->nd_nam2 || 1182 netaddr_match(NU_NETFAM(nuidp), 1183 &nuidp->nu_haddr, nd->nd_nam2))) 1184 break; 1185 } 1186 if (!nuidp) { 1187 nd->nd_repstat = 1188 (NFSERR_AUTHERR|AUTH_REJECTCRED); 1189 nd->nd_procnum = NFSPROC_NOOP; 1190 return (0); 1191 } 1192 1193 /* 1194 * Now, decrypt the timestamp using the session key 1195 * and validate it. 1196 */ 1197 #ifdef NFSKERB 1198 XXX 1199 #else 1200 (void)tvin.tv_sec; 1201 #endif 1202 1203 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec); 1204 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec); 1205 if (nuidp->nu_expire < time_second || 1206 nuidp->nu_timestamp.tv_sec > tvout.tv_sec || 1207 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec && 1208 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) { 1209 nuidp->nu_expire = 0; 1210 nd->nd_repstat = 1211 (NFSERR_AUTHERR|AUTH_REJECTVERF); 1212 nd->nd_procnum = NFSPROC_NOOP; 1213 return (0); 1214 } 1215 kauth_cred_hold(nuidp->nu_cr); 1216 nd->nd_cr = nuidp->nu_cr; 1217 nd->nd_flag |= ND_KERBNICK; 1218 } 1219 } else { 1220 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED); 1221 nd->nd_procnum = NFSPROC_NOOP; 1222 return (0); 1223 } 1224 1225 nd->nd_md = md; 1226 nd->nd_dpos = dpos; 1227 KASSERT((nd->nd_cr == NULL && (nfsd->nfsd_flag & NFSD_NEEDAUTH) != 0) 1228 || (nd->nd_cr != NULL && (nfsd->nfsd_flag & NFSD_NEEDAUTH) == 0)); 1229 return (0); 1230 nfsmout: 1231 errout: 1232 KASSERT(error != 0); 1233 if (nd->nd_cr != NULL) { 1234 kauth_cred_free(nd->nd_cr); 1235 nd->nd_cr = NULL; 1236 } 1237 return (error); 1238 } 1239 1240 int 1241 nfs_msg(struct lwp *l, const char *server, const char *msg) 1242 { 1243 tpr_t tpr; 1244 1245 #if 0 /* XXX nfs_timer can't block on proc_lock */ 1246 if (l) 1247 tpr = tprintf_open(l->l_proc); 1248 else 1249 #endif 1250 tpr = NULL; 1251 tprintf(tpr, "nfs server %s: %s\n", server, msg); 1252 tprintf_close(tpr); 1253 return (0); 1254 } 1255 1256 static struct pool nfs_srvdesc_pool; 1257 1258 void 1259 nfsdreq_init(void) 1260 { 1261 1262 pool_init(&nfs_srvdesc_pool, sizeof(struct nfsrv_descript), 1263 0, 0, 0, "nfsrvdescpl", &pool_allocator_nointr, IPL_NONE); 1264 } 1265 1266 void 1267 nfsdreq_fini(void) 1268 { 1269 1270 pool_destroy(&nfs_srvdesc_pool); 1271 } 1272 1273 struct nfsrv_descript * 1274 nfsdreq_alloc(void) 1275 { 1276 struct nfsrv_descript *nd; 1277 1278 nd = pool_get(&nfs_srvdesc_pool, PR_WAITOK); 1279 nd->nd_cr = NULL; 1280 return nd; 1281 } 1282 1283 void 1284 nfsdreq_free(struct nfsrv_descript *nd) 1285 { 1286 kauth_cred_t cr; 1287 1288 cr = nd->nd_cr; 1289 if (cr != NULL) { 1290 kauth_cred_free(cr); 1291 } 1292 pool_put(&nfs_srvdesc_pool, nd); 1293 } 1294