1 /* $NetBSD: nfs_socket.c,v 1.198 2016/06/17 14:28:29 christos Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1991, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Rick Macklem at The University of Guelph. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95 35 */ 36 37 /* 38 * Socket operations for use by nfs 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: nfs_socket.c,v 1.198 2016/06/17 14:28:29 christos Exp $"); 43 44 #ifdef _KERNEL_OPT 45 #include "opt_nfs.h" 46 #include "opt_mbuftrace.h" 47 #endif 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/evcnt.h> 52 #include <sys/callout.h> 53 #include <sys/proc.h> 54 #include <sys/mount.h> 55 #include <sys/kernel.h> 56 #include <sys/kmem.h> 57 #include <sys/mbuf.h> 58 #include <sys/vnode.h> 59 #include <sys/domain.h> 60 #include <sys/protosw.h> 61 #include <sys/socket.h> 62 #include <sys/socketvar.h> 63 #include <sys/syslog.h> 64 #include <sys/tprintf.h> 65 #include <sys/namei.h> 66 #include <sys/signal.h> 67 #include <sys/signalvar.h> 68 #include <sys/kauth.h> 69 #include <sys/time.h> 70 71 #include <netinet/in.h> 72 #include <netinet/tcp.h> 73 74 #include <nfs/rpcv2.h> 75 #include <nfs/nfsproto.h> 76 #include <nfs/nfs.h> 77 #include <nfs/xdr_subs.h> 78 #include <nfs/nfsm_subs.h> 79 #include <nfs/nfsmount.h> 80 #include <nfs/nfsnode.h> 81 #include <nfs/nfsrtt.h> 82 #include <nfs/nfs_var.h> 83 84 #ifdef MBUFTRACE 85 struct mowner nfs_mowner = MOWNER_INIT("nfs",""); 86 #endif 87 88 /* 89 * Estimate rto for an nfs rpc sent via. an unreliable datagram. 90 * Use the mean and mean deviation of rtt for the appropriate type of rpc 91 * for the frequent rpcs and a default for the others. 92 * The justification for doing "other" this way is that these rpcs 93 * happen so infrequently that timer est. would probably be stale. 94 * Also, since many of these rpcs are 95 * non-idempotent, a conservative timeout is desired. 96 * getattr, lookup - A+2D 97 * read, write - A+4D 98 * other - nm_timeo 99 */ 100 #define NFS_RTO(n, t) \ 101 ((t) == 0 ? (n)->nm_timeo : \ 102 ((t) < 3 ? \ 103 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \ 104 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1))) 105 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[nfs_proct[(r)->r_procnum] - 1] 106 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[nfs_proct[(r)->r_procnum] - 1] 107 108 /* 109 * Defines which timer to use for the procnum. 110 * 0 - default 111 * 1 - getattr 112 * 2 - lookup 113 * 3 - read 114 * 4 - write 115 */ 116 const int nfs_proct[NFS_NPROCS] = { 117 [NFSPROC_NULL] = 0, 118 [NFSPROC_GETATTR] = 1, 119 [NFSPROC_SETATTR] = 0, 120 [NFSPROC_LOOKUP] = 2, 121 [NFSPROC_ACCESS] = 1, 122 [NFSPROC_READLINK] = 3, 123 [NFSPROC_READ] = 3, 124 [NFSPROC_WRITE] = 4, 125 [NFSPROC_CREATE] = 0, 126 [NFSPROC_MKDIR] = 0, 127 [NFSPROC_SYMLINK] = 0, 128 [NFSPROC_MKNOD] = 0, 129 [NFSPROC_REMOVE] = 0, 130 [NFSPROC_RMDIR] = 0, 131 [NFSPROC_RENAME] = 0, 132 [NFSPROC_LINK] = 0, 133 [NFSPROC_READDIR] = 3, 134 [NFSPROC_READDIRPLUS] = 3, 135 [NFSPROC_FSSTAT] = 0, 136 [NFSPROC_FSINFO] = 0, 137 [NFSPROC_PATHCONF] = 0, 138 [NFSPROC_COMMIT] = 0, 139 [NFSPROC_NOOP] = 0, 140 }; 141 142 #ifdef DEBUG 143 /* 144 * Avoid spamming the console with debugging messages. We only print 145 * the nfs timer and reply error debugs every 10 seconds. 146 */ 147 const struct timeval nfs_err_interval = { 10, 0 }; 148 struct timeval nfs_reply_last_err_time; 149 struct timeval nfs_timer_last_err_time; 150 #endif 151 152 /* 153 * There is a congestion window for outstanding rpcs maintained per mount 154 * point. The cwnd size is adjusted in roughly the way that: 155 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of 156 * SIGCOMM '88". ACM, August 1988. 157 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout 158 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd 159 * of rpcs is in progress. 160 * (The sent count and cwnd are scaled for integer arith.) 161 * Variants of "slow start" were tried and were found to be too much of a 162 * performance hit (ave. rtt 3 times larger), 163 * I suspect due to the large rtt that nfs rpcs have. 164 */ 165 int nfsrtton = 0; 166 struct nfsrtt nfsrtt; 167 static const int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, }; 168 struct nfsreqhead nfs_reqq; 169 static callout_t nfs_timer_ch; 170 static struct evcnt nfs_timer_ev; 171 static struct evcnt nfs_timer_start_ev; 172 static struct evcnt nfs_timer_stop_ev; 173 static kmutex_t nfs_timer_lock; 174 static bool (*nfs_timer_srvvec)(void); 175 176 /* 177 * Initialize sockets and congestion for a new NFS connection. 178 * We do not free the sockaddr if error. 179 */ 180 int 181 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep, struct lwp *l) 182 { 183 struct socket *so; 184 int error, rcvreserve, sndreserve; 185 struct sockaddr *saddr; 186 struct sockaddr_in sin; 187 struct sockaddr_in6 sin6; 188 int val; 189 190 nmp->nm_so = NULL; 191 saddr = mtod(nmp->nm_nam, struct sockaddr *); 192 error = socreate(saddr->sa_family, &nmp->nm_so, 193 nmp->nm_sotype, nmp->nm_soproto, l, NULL); 194 if (error) 195 goto bad; 196 so = nmp->nm_so; 197 #ifdef MBUFTRACE 198 so->so_mowner = &nfs_mowner; 199 so->so_rcv.sb_mowner = &nfs_mowner; 200 so->so_snd.sb_mowner = &nfs_mowner; 201 #endif 202 nmp->nm_soflags = so->so_proto->pr_flags; 203 204 /* 205 * Some servers require that the client port be a reserved port number. 206 */ 207 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 208 val = IP_PORTRANGE_LOW; 209 210 if ((error = so_setsockopt(NULL, so, IPPROTO_IP, IP_PORTRANGE, 211 &val, sizeof(val)))) 212 goto bad; 213 sin.sin_len = sizeof(struct sockaddr_in); 214 sin.sin_family = AF_INET; 215 sin.sin_addr.s_addr = INADDR_ANY; 216 sin.sin_port = 0; 217 error = sobind(so, (struct sockaddr *)&sin, &lwp0); 218 if (error) 219 goto bad; 220 } 221 if (saddr->sa_family == AF_INET6 && (nmp->nm_flag & NFSMNT_RESVPORT)) { 222 val = IPV6_PORTRANGE_LOW; 223 224 if ((error = so_setsockopt(NULL, so, IPPROTO_IPV6, 225 IPV6_PORTRANGE, &val, sizeof(val)))) 226 goto bad; 227 memset(&sin6, 0, sizeof(sin6)); 228 sin6.sin6_len = sizeof(struct sockaddr_in6); 229 sin6.sin6_family = AF_INET6; 230 error = sobind(so, (struct sockaddr *)&sin6, &lwp0); 231 if (error) 232 goto bad; 233 } 234 235 /* 236 * Protocols that do not require connections may be optionally left 237 * unconnected for servers that reply from a port other than NFS_PORT. 238 */ 239 solock(so); 240 if (nmp->nm_flag & NFSMNT_NOCONN) { 241 if (nmp->nm_soflags & PR_CONNREQUIRED) { 242 sounlock(so); 243 error = ENOTCONN; 244 goto bad; 245 } 246 } else { 247 error = soconnect(so, mtod(nmp->nm_nam, struct sockaddr *), l); 248 if (error) { 249 sounlock(so); 250 goto bad; 251 } 252 253 /* 254 * Wait for the connection to complete. Cribbed from the 255 * connect system call but with the wait timing out so 256 * that interruptible mounts don't hang here for a long time. 257 */ 258 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 259 (void)sowait(so, false, 2 * hz); 260 if ((so->so_state & SS_ISCONNECTING) && 261 so->so_error == 0 && rep && 262 (error = nfs_sigintr(nmp, rep, rep->r_lwp)) != 0){ 263 so->so_state &= ~SS_ISCONNECTING; 264 sounlock(so); 265 goto bad; 266 } 267 } 268 if (so->so_error) { 269 error = so->so_error; 270 so->so_error = 0; 271 sounlock(so); 272 goto bad; 273 } 274 } 275 if (nmp->nm_flag & (NFSMNT_SOFT | NFSMNT_INT)) { 276 so->so_rcv.sb_timeo = (5 * hz); 277 so->so_snd.sb_timeo = (5 * hz); 278 } else { 279 /* 280 * enable receive timeout to detect server crash and reconnect. 281 * otherwise, we can be stuck in soreceive forever. 282 */ 283 so->so_rcv.sb_timeo = (5 * hz); 284 so->so_snd.sb_timeo = 0; 285 } 286 if (nmp->nm_sotype == SOCK_DGRAM) { 287 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 3; 288 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) + 289 NFS_MAXPKTHDR) * 2; 290 } else if (nmp->nm_sotype == SOCK_SEQPACKET) { 291 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 3; 292 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) + 293 NFS_MAXPKTHDR) * 3; 294 } else { 295 sounlock(so); 296 if (nmp->nm_sotype != SOCK_STREAM) 297 panic("nfscon sotype"); 298 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 299 val = 1; 300 so_setsockopt(NULL, so, SOL_SOCKET, SO_KEEPALIVE, &val, 301 sizeof(val)); 302 } 303 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 304 val = 1; 305 so_setsockopt(NULL, so, IPPROTO_TCP, TCP_NODELAY, &val, 306 sizeof(val)); 307 } 308 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + 309 sizeof (u_int32_t)) * 3; 310 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + 311 sizeof (u_int32_t)) * 3; 312 solock(so); 313 } 314 error = soreserve(so, sndreserve, rcvreserve); 315 if (error) { 316 sounlock(so); 317 goto bad; 318 } 319 so->so_rcv.sb_flags |= SB_NOINTR; 320 so->so_snd.sb_flags |= SB_NOINTR; 321 sounlock(so); 322 323 /* Initialize other non-zero congestion variables */ 324 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = nmp->nm_srtt[3] = 325 NFS_TIMEO << 3; 326 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 327 nmp->nm_sdrtt[3] = 0; 328 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */ 329 nmp->nm_sent = 0; 330 nmp->nm_timeouts = 0; 331 return (0); 332 333 bad: 334 nfs_disconnect(nmp); 335 return (error); 336 } 337 338 /* 339 * Reconnect routine: 340 * Called when a connection is broken on a reliable protocol. 341 * - clean up the old socket 342 * - nfs_connect() again 343 * - set R_MUSTRESEND for all outstanding requests on mount point 344 * If this fails the mount point is DEAD! 345 * nb: Must be called with the nfs_sndlock() set on the mount point. 346 */ 347 int 348 nfs_reconnect(struct nfsreq *rep) 349 { 350 struct nfsreq *rp; 351 struct nfsmount *nmp = rep->r_nmp; 352 int error, s; 353 time_t before_ts; 354 355 nfs_disconnect(nmp); 356 357 /* 358 * Force unmount: do not try to reconnect 359 */ 360 if (nmp->nm_iflag & NFSMNT_DISMNTFORCE) 361 return EIO; 362 363 before_ts = time_uptime; 364 while ((error = nfs_connect(nmp, rep, &lwp0)) != 0) { 365 if (error == EINTR || error == ERESTART) 366 return (EINTR); 367 368 if (rep->r_flags & R_SOFTTERM) 369 return (EIO); 370 371 /* 372 * Soft mount can fail here, but not too fast: 373 * we want to make sure we at least honoured 374 * NFS timeout. 375 */ 376 if ((nmp->nm_flag & NFSMNT_SOFT) && 377 (time_uptime - before_ts > nmp->nm_timeo / NFS_HZ)) 378 return (EIO); 379 380 kpause("nfscn2", false, hz, NULL); 381 } 382 383 /* 384 * Loop through outstanding request list and fix up all requests 385 * on old socket. 386 */ 387 s = splsoftnet(); 388 TAILQ_FOREACH(rp, &nfs_reqq, r_chain) { 389 if (rp->r_nmp == nmp) { 390 if ((rp->r_flags & R_MUSTRESEND) == 0) 391 rp->r_flags |= R_MUSTRESEND | R_REXMITTED; 392 rp->r_rexmit = 0; 393 } 394 } 395 splx(s); 396 return (0); 397 } 398 399 /* 400 * NFS disconnect. Clean up and unlink. 401 */ 402 void 403 nfs_disconnect(struct nfsmount *nmp) 404 { 405 struct socket *so; 406 int drain = 0; 407 408 if (nmp->nm_so) { 409 so = nmp->nm_so; 410 nmp->nm_so = NULL; 411 solock(so); 412 soshutdown(so, SHUT_RDWR); 413 sounlock(so); 414 drain = (nmp->nm_iflag & NFSMNT_DISMNT) != 0; 415 if (drain) { 416 /* 417 * soshutdown() above should wake up the current 418 * listener. 419 * Now wake up those waiting for the receive lock, and 420 * wait for them to go away unhappy, to prevent *nmp 421 * from evaporating while they're sleeping. 422 */ 423 mutex_enter(&nmp->nm_lock); 424 while (nmp->nm_waiters > 0) { 425 cv_broadcast(&nmp->nm_rcvcv); 426 cv_broadcast(&nmp->nm_sndcv); 427 cv_wait(&nmp->nm_disconcv, &nmp->nm_lock); 428 } 429 mutex_exit(&nmp->nm_lock); 430 } 431 soclose(so); 432 } 433 #ifdef DIAGNOSTIC 434 if (drain && (nmp->nm_waiters > 0)) 435 panic("nfs_disconnect: waiters left after drain?"); 436 #endif 437 } 438 439 void 440 nfs_safedisconnect(struct nfsmount *nmp) 441 { 442 struct nfsreq dummyreq; 443 444 memset(&dummyreq, 0, sizeof(dummyreq)); 445 dummyreq.r_nmp = nmp; 446 nfs_rcvlock(nmp, &dummyreq); /* XXX ignored error return */ 447 nfs_disconnect(nmp); 448 nfs_rcvunlock(nmp); 449 } 450 451 /* 452 * This is the nfs send routine. For connection based socket types, it 453 * must be called with an nfs_sndlock() on the socket. 454 * "rep == NULL" indicates that it has been called from a server. 455 * For the client side: 456 * - return EINTR if the RPC is terminated, 0 otherwise 457 * - set R_MUSTRESEND if the send fails for any reason 458 * - do any cleanup required by recoverable socket errors (? ? ?) 459 * For the server side: 460 * - return EINTR or ERESTART if interrupted by a signal 461 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 462 * - do any cleanup required by recoverable socket errors (? ? ?) 463 */ 464 int 465 nfs_send(struct socket *so, struct mbuf *nam, struct mbuf *top, struct nfsreq *rep, struct lwp *l) 466 { 467 struct sockaddr *sendnam; 468 int error, soflags, flags; 469 470 /* XXX nfs_doio()/nfs_request() calls with rep->r_lwp == NULL */ 471 if (l == NULL && rep->r_lwp == NULL) 472 l = curlwp; 473 474 if (rep) { 475 if (rep->r_flags & R_SOFTTERM) { 476 m_freem(top); 477 return (EINTR); 478 } 479 if ((so = rep->r_nmp->nm_so) == NULL) { 480 rep->r_flags |= R_MUSTRESEND; 481 m_freem(top); 482 return (0); 483 } 484 rep->r_flags &= ~R_MUSTRESEND; 485 soflags = rep->r_nmp->nm_soflags; 486 } else 487 soflags = so->so_proto->pr_flags; 488 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 489 sendnam = NULL; 490 else 491 sendnam = mtod(nam, struct sockaddr *); 492 if (so->so_type == SOCK_SEQPACKET) 493 flags = MSG_EOR; 494 else 495 flags = 0; 496 497 error = (*so->so_send)(so, sendnam, NULL, top, NULL, flags, l); 498 if (error) { 499 if (rep) { 500 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) { 501 /* 502 * We're too fast for the network/driver, 503 * and UDP isn't flowcontrolled. 504 * We need to resend. This is not fatal, 505 * just try again. 506 * 507 * Could be smarter here by doing some sort 508 * of a backoff, but this is rare. 509 */ 510 rep->r_flags |= R_MUSTRESEND; 511 } else { 512 if (error != EPIPE) 513 log(LOG_INFO, 514 "nfs send error %d for %s\n", 515 error, 516 rep->r_nmp->nm_mountp-> 517 mnt_stat.f_mntfromname); 518 /* 519 * Deal with errors for the client side. 520 */ 521 if (rep->r_flags & R_SOFTTERM) 522 error = EINTR; 523 else if (error != EMSGSIZE) 524 rep->r_flags |= R_MUSTRESEND; 525 } 526 } else { 527 /* 528 * See above. This error can happen under normal 529 * circumstances and the log is too noisy. 530 * The error will still show up in nfsstat. 531 */ 532 if (error != ENOBUFS || so->so_type != SOCK_DGRAM) 533 log(LOG_INFO, "nfsd send error %d\n", error); 534 } 535 536 /* 537 * Handle any recoverable (soft) socket errors here. (? ? ?) 538 */ 539 if (error != EINTR && error != ERESTART && 540 error != EWOULDBLOCK && error != EPIPE && 541 error != EMSGSIZE) 542 error = 0; 543 } 544 return (error); 545 } 546 547 /* 548 * Generate the rpc reply header 549 * siz arg. is used to decide if adding a cluster is worthwhile 550 */ 551 int 552 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp, int err, int cache, u_quad_t *frev, struct mbuf **mrq, struct mbuf **mbp, char **bposp) 553 { 554 u_int32_t *tl; 555 struct mbuf *mreq; 556 char *bpos; 557 struct mbuf *mb; 558 559 mreq = m_gethdr(M_WAIT, MT_DATA); 560 MCLAIM(mreq, &nfs_mowner); 561 mb = mreq; 562 /* 563 * If this is a big reply, use a cluster else 564 * try and leave leading space for the lower level headers. 565 */ 566 siz += RPC_REPLYSIZ; 567 if (siz >= max_datalen) { 568 m_clget(mreq, M_WAIT); 569 } else 570 mreq->m_data += max_hdr; 571 tl = mtod(mreq, u_int32_t *); 572 mreq->m_len = 6 * NFSX_UNSIGNED; 573 bpos = ((char *)tl) + mreq->m_len; 574 *tl++ = txdr_unsigned(nd->nd_retxid); 575 *tl++ = rpc_reply; 576 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) { 577 *tl++ = rpc_msgdenied; 578 if (err & NFSERR_AUTHERR) { 579 *tl++ = rpc_autherr; 580 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR); 581 mreq->m_len -= NFSX_UNSIGNED; 582 bpos -= NFSX_UNSIGNED; 583 } else { 584 *tl++ = rpc_mismatch; 585 *tl++ = txdr_unsigned(RPC_VER2); 586 *tl = txdr_unsigned(RPC_VER2); 587 } 588 } else { 589 *tl++ = rpc_msgaccepted; 590 591 /* 592 * For Kerberos authentication, we must send the nickname 593 * verifier back, otherwise just RPCAUTH_NULL. 594 */ 595 if (nd->nd_flag & ND_KERBFULL) { 596 struct nfsuid *nuidp; 597 struct timeval ktvin, ktvout; 598 599 memset(&ktvout, 0, sizeof ktvout); /* XXX gcc */ 600 601 LIST_FOREACH(nuidp, 602 NUIDHASH(slp, kauth_cred_geteuid(nd->nd_cr)), 603 nu_hash) { 604 if (kauth_cred_geteuid(nuidp->nu_cr) == 605 kauth_cred_geteuid(nd->nd_cr) && 606 (!nd->nd_nam2 || netaddr_match( 607 NU_NETFAM(nuidp), &nuidp->nu_haddr, 608 nd->nd_nam2))) 609 break; 610 } 611 if (nuidp) { 612 ktvin.tv_sec = 613 txdr_unsigned(nuidp->nu_timestamp.tv_sec 614 - 1); 615 ktvin.tv_usec = 616 txdr_unsigned(nuidp->nu_timestamp.tv_usec); 617 618 /* 619 * Encrypt the timestamp in ecb mode using the 620 * session key. 621 */ 622 #ifdef NFSKERB 623 XXX 624 #else 625 (void)ktvin.tv_sec; 626 #endif 627 628 *tl++ = rpc_auth_kerb; 629 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED); 630 *tl = ktvout.tv_sec; 631 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 632 *tl++ = ktvout.tv_usec; 633 *tl++ = txdr_unsigned( 634 kauth_cred_geteuid(nuidp->nu_cr)); 635 } else { 636 *tl++ = 0; 637 *tl++ = 0; 638 } 639 } else { 640 *tl++ = 0; 641 *tl++ = 0; 642 } 643 switch (err) { 644 case EPROGUNAVAIL: 645 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 646 break; 647 case EPROGMISMATCH: 648 *tl = txdr_unsigned(RPC_PROGMISMATCH); 649 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 650 *tl++ = txdr_unsigned(2); 651 *tl = txdr_unsigned(3); 652 break; 653 case EPROCUNAVAIL: 654 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 655 break; 656 case EBADRPC: 657 *tl = txdr_unsigned(RPC_GARBAGE); 658 break; 659 default: 660 *tl = 0; 661 if (err != NFSERR_RETVOID) { 662 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 663 if (err) 664 *tl = txdr_unsigned(nfsrv_errmap(nd, err)); 665 else 666 *tl = 0; 667 } 668 break; 669 }; 670 } 671 672 if (mrq != NULL) 673 *mrq = mreq; 674 *mbp = mb; 675 *bposp = bpos; 676 if (err != 0 && err != NFSERR_RETVOID) 677 nfsstats.srvrpc_errs++; 678 return (0); 679 } 680 681 static void 682 nfs_timer_schedule(void) 683 { 684 685 callout_schedule(&nfs_timer_ch, nfs_ticks); 686 } 687 688 void 689 nfs_timer_start(void) 690 { 691 692 if (callout_pending(&nfs_timer_ch)) 693 return; 694 695 nfs_timer_start_ev.ev_count++; 696 nfs_timer_schedule(); 697 } 698 699 void 700 nfs_timer_init(void) 701 { 702 703 mutex_init(&nfs_timer_lock, MUTEX_DEFAULT, IPL_NONE); 704 callout_init(&nfs_timer_ch, 0); 705 callout_setfunc(&nfs_timer_ch, nfs_timer, NULL); 706 evcnt_attach_dynamic(&nfs_timer_ev, EVCNT_TYPE_MISC, NULL, 707 "nfs", "timer"); 708 evcnt_attach_dynamic(&nfs_timer_start_ev, EVCNT_TYPE_MISC, NULL, 709 "nfs", "timer start"); 710 evcnt_attach_dynamic(&nfs_timer_stop_ev, EVCNT_TYPE_MISC, NULL, 711 "nfs", "timer stop"); 712 } 713 714 void 715 nfs_timer_fini(void) 716 { 717 718 callout_halt(&nfs_timer_ch, NULL); 719 callout_destroy(&nfs_timer_ch); 720 mutex_destroy(&nfs_timer_lock); 721 evcnt_detach(&nfs_timer_ev); 722 evcnt_detach(&nfs_timer_start_ev); 723 evcnt_detach(&nfs_timer_stop_ev); 724 } 725 726 void 727 nfs_timer_srvinit(bool (*func)(void)) 728 { 729 730 nfs_timer_srvvec = func; 731 } 732 733 void 734 nfs_timer_srvfini(void) 735 { 736 737 mutex_enter(&nfs_timer_lock); 738 nfs_timer_srvvec = NULL; 739 mutex_exit(&nfs_timer_lock); 740 } 741 742 743 /* 744 * Nfs timer routine 745 * Scan the nfsreq list and retranmit any requests that have timed out 746 * To avoid retransmission attempts on STREAM sockets (in the future) make 747 * sure to set the r_retry field to 0 (implies nm_retry == 0). 748 */ 749 void 750 nfs_timer(void *arg) 751 { 752 struct nfsreq *rep; 753 struct mbuf *m; 754 struct socket *so; 755 struct nfsmount *nmp; 756 int timeo; 757 int error; 758 bool more = false; 759 760 nfs_timer_ev.ev_count++; 761 762 mutex_enter(softnet_lock); /* XXX PR 40491 */ 763 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) { 764 more = true; 765 nmp = rep->r_nmp; 766 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) 767 continue; 768 if (nfs_sigintr(nmp, rep, rep->r_lwp)) { 769 rep->r_flags |= R_SOFTTERM; 770 continue; 771 } 772 if (rep->r_rtt >= 0) { 773 rep->r_rtt++; 774 if (nmp->nm_flag & NFSMNT_DUMBTIMR) 775 timeo = nmp->nm_timeo; 776 else 777 timeo = NFS_RTO(nmp, nfs_proct[rep->r_procnum]); 778 if (nmp->nm_timeouts > 0) 779 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 780 if (timeo > NFS_MAXTIMEO) 781 timeo = NFS_MAXTIMEO; 782 if (rep->r_rtt <= timeo) 783 continue; 784 if (nmp->nm_timeouts < 785 (sizeof(nfs_backoff) / sizeof(nfs_backoff[0]))) 786 nmp->nm_timeouts++; 787 } 788 /* 789 * Check for server not responding 790 */ 791 if ((rep->r_flags & R_TPRINTFMSG) == 0 && 792 rep->r_rexmit > nmp->nm_deadthresh) { 793 nfs_msg(rep->r_lwp, 794 nmp->nm_mountp->mnt_stat.f_mntfromname, 795 "not responding"); 796 rep->r_flags |= R_TPRINTFMSG; 797 } 798 if (rep->r_rexmit >= rep->r_retry) { /* too many */ 799 nfsstats.rpctimeouts++; 800 rep->r_flags |= R_SOFTTERM; 801 continue; 802 } 803 if (nmp->nm_sotype != SOCK_DGRAM) { 804 if (++rep->r_rexmit > NFS_MAXREXMIT) 805 rep->r_rexmit = NFS_MAXREXMIT; 806 continue; 807 } 808 if ((so = nmp->nm_so) == NULL) 809 continue; 810 811 /* 812 * If there is enough space and the window allows.. 813 * Resend it 814 * Set r_rtt to -1 in case we fail to send it now. 815 */ 816 /* solock(so); XXX PR 40491 */ 817 rep->r_rtt = -1; 818 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len && 819 ((nmp->nm_flag & NFSMNT_DUMBTIMR) || 820 (rep->r_flags & R_SENT) || 821 nmp->nm_sent < nmp->nm_cwnd) && 822 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){ 823 if (so->so_state & SS_ISCONNECTED) 824 error = (*so->so_proto->pr_usrreqs->pr_send)(so, 825 m, NULL, NULL, NULL); 826 else 827 error = (*so->so_proto->pr_usrreqs->pr_send)(so, 828 m, mtod(nmp->nm_nam, struct sockaddr *), 829 NULL, NULL); 830 if (error) { 831 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 832 #ifdef DEBUG 833 if (ratecheck(&nfs_timer_last_err_time, 834 &nfs_err_interval)) 835 printf("%s: ignoring error " 836 "%d\n", __func__, error); 837 #endif 838 so->so_error = 0; 839 } 840 } else { 841 /* 842 * Iff first send, start timing 843 * else turn timing off, backoff timer 844 * and divide congestion window by 2. 845 */ 846 if (rep->r_flags & R_SENT) { 847 rep->r_flags &= ~R_TIMING; 848 if (++rep->r_rexmit > NFS_MAXREXMIT) 849 rep->r_rexmit = NFS_MAXREXMIT; 850 nmp->nm_cwnd >>= 1; 851 if (nmp->nm_cwnd < NFS_CWNDSCALE) 852 nmp->nm_cwnd = NFS_CWNDSCALE; 853 nfsstats.rpcretries++; 854 } else { 855 rep->r_flags |= R_SENT; 856 nmp->nm_sent += NFS_CWNDSCALE; 857 } 858 rep->r_rtt = 0; 859 } 860 } 861 /* sounlock(so); XXX PR 40491 */ 862 } 863 mutex_exit(softnet_lock); /* XXX PR 40491 */ 864 865 mutex_enter(&nfs_timer_lock); 866 if (nfs_timer_srvvec != NULL) { 867 more |= (*nfs_timer_srvvec)(); 868 } 869 mutex_exit(&nfs_timer_lock); 870 871 if (more) { 872 nfs_timer_schedule(); 873 } else { 874 nfs_timer_stop_ev.ev_count++; 875 } 876 } 877 878 /* 879 * Test for a termination condition pending on the process. 880 * This is used for NFSMNT_INT mounts. 881 */ 882 int 883 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct lwp *l) 884 { 885 sigset_t ss; 886 887 if (rep && (rep->r_flags & R_SOFTTERM)) 888 return (EINTR); 889 if (!(nmp->nm_flag & NFSMNT_INT)) 890 return (0); 891 if (l) { 892 sigpending1(l, &ss); 893 #if 0 894 sigminusset(&l->l_proc->p_sigctx.ps_sigignore, &ss); 895 #endif 896 if (sigismember(&ss, SIGINT) || sigismember(&ss, SIGTERM) || 897 sigismember(&ss, SIGKILL) || sigismember(&ss, SIGHUP) || 898 sigismember(&ss, SIGQUIT)) 899 return (EINTR); 900 } 901 return (0); 902 } 903 904 int 905 nfs_rcvlock(struct nfsmount *nmp, struct nfsreq *rep) 906 { 907 int *flagp = &nmp->nm_iflag; 908 int slptimeo = 0; 909 bool catch_p; 910 int error = 0; 911 912 KASSERT(nmp == rep->r_nmp); 913 914 if (nmp->nm_flag & NFSMNT_SOFT) 915 slptimeo = nmp->nm_retry * nmp->nm_timeo; 916 917 if (nmp->nm_iflag & NFSMNT_DISMNTFORCE) 918 slptimeo = hz; 919 920 catch_p = (nmp->nm_flag & NFSMNT_INT) != 0; 921 mutex_enter(&nmp->nm_lock); 922 while (/* CONSTCOND */ true) { 923 if (*flagp & NFSMNT_DISMNT) { 924 cv_signal(&nmp->nm_disconcv); 925 error = EIO; 926 break; 927 } 928 /* If our reply was received while we were sleeping, 929 * then just return without taking the lock to avoid a 930 * situation where a single iod could 'capture' the 931 * receive lock. 932 */ 933 if (rep->r_mrep != NULL) { 934 cv_signal(&nmp->nm_rcvcv); 935 error = EALREADY; 936 break; 937 } 938 if (nfs_sigintr(rep->r_nmp, rep, rep->r_lwp)) { 939 cv_signal(&nmp->nm_rcvcv); 940 error = EINTR; 941 break; 942 } 943 if ((*flagp & NFSMNT_RCVLOCK) == 0) { 944 *flagp |= NFSMNT_RCVLOCK; 945 break; 946 } 947 if (catch_p) { 948 error = cv_timedwait_sig(&nmp->nm_rcvcv, &nmp->nm_lock, 949 slptimeo); 950 } else { 951 error = cv_timedwait(&nmp->nm_rcvcv, &nmp->nm_lock, 952 slptimeo); 953 } 954 if (error) { 955 if ((error == EWOULDBLOCK) && 956 (nmp->nm_flag & NFSMNT_SOFT)) { 957 error = EIO; 958 break; 959 } 960 error = 0; 961 } 962 if (catch_p) { 963 catch_p = false; 964 slptimeo = 2 * hz; 965 } 966 } 967 mutex_exit(&nmp->nm_lock); 968 return error; 969 } 970 971 /* 972 * Unlock the stream socket for others. 973 */ 974 void 975 nfs_rcvunlock(struct nfsmount *nmp) 976 { 977 978 mutex_enter(&nmp->nm_lock); 979 if ((nmp->nm_iflag & NFSMNT_RCVLOCK) == 0) 980 panic("nfs rcvunlock"); 981 nmp->nm_iflag &= ~NFSMNT_RCVLOCK; 982 cv_signal(&nmp->nm_rcvcv); 983 mutex_exit(&nmp->nm_lock); 984 } 985 986 /* 987 * Parse an RPC request 988 * - verify it 989 * - allocate and fill in the cred. 990 */ 991 int 992 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header) 993 { 994 int len, i; 995 u_int32_t *tl; 996 int32_t t1; 997 struct uio uio; 998 struct iovec iov; 999 char *dpos, *cp2, *cp; 1000 u_int32_t nfsvers, auth_type; 1001 uid_t nickuid; 1002 int error = 0, ticklen; 1003 struct mbuf *mrep, *md; 1004 struct nfsuid *nuidp; 1005 struct timeval tvin, tvout; 1006 1007 memset(&tvout, 0, sizeof tvout); /* XXX gcc */ 1008 1009 KASSERT(nd->nd_cr == NULL); 1010 mrep = nd->nd_mrep; 1011 md = nd->nd_md; 1012 dpos = nd->nd_dpos; 1013 if (has_header) { 1014 nfsm_dissect(tl, u_int32_t *, 10 * NFSX_UNSIGNED); 1015 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++); 1016 if (*tl++ != rpc_call) { 1017 m_freem(mrep); 1018 return (EBADRPC); 1019 } 1020 } else 1021 nfsm_dissect(tl, u_int32_t *, 8 * NFSX_UNSIGNED); 1022 nd->nd_repstat = 0; 1023 nd->nd_flag = 0; 1024 if (*tl++ != rpc_vers) { 1025 nd->nd_repstat = ERPCMISMATCH; 1026 nd->nd_procnum = NFSPROC_NOOP; 1027 return (0); 1028 } 1029 if (*tl != nfs_prog) { 1030 nd->nd_repstat = EPROGUNAVAIL; 1031 nd->nd_procnum = NFSPROC_NOOP; 1032 return (0); 1033 } 1034 tl++; 1035 nfsvers = fxdr_unsigned(u_int32_t, *tl++); 1036 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) { 1037 nd->nd_repstat = EPROGMISMATCH; 1038 nd->nd_procnum = NFSPROC_NOOP; 1039 return (0); 1040 } 1041 if (nfsvers == NFS_VER3) 1042 nd->nd_flag = ND_NFSV3; 1043 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++); 1044 if (nd->nd_procnum == NFSPROC_NULL) 1045 return (0); 1046 if (nd->nd_procnum > NFSPROC_COMMIT || 1047 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) { 1048 nd->nd_repstat = EPROCUNAVAIL; 1049 nd->nd_procnum = NFSPROC_NOOP; 1050 return (0); 1051 } 1052 if ((nd->nd_flag & ND_NFSV3) == 0) 1053 nd->nd_procnum = nfsv3_procid[nd->nd_procnum]; 1054 auth_type = *tl++; 1055 len = fxdr_unsigned(int, *tl++); 1056 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1057 m_freem(mrep); 1058 return (EBADRPC); 1059 } 1060 1061 nd->nd_flag &= ~ND_KERBAUTH; 1062 /* 1063 * Handle auth_unix or auth_kerb. 1064 */ 1065 if (auth_type == rpc_auth_unix) { 1066 uid_t uid; 1067 gid_t gid; 1068 1069 nd->nd_cr = kauth_cred_alloc(); 1070 len = fxdr_unsigned(int, *++tl); 1071 if (len < 0 || len > NFS_MAXNAMLEN) { 1072 m_freem(mrep); 1073 error = EBADRPC; 1074 goto errout; 1075 } 1076 nfsm_adv(nfsm_rndup(len)); 1077 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1078 1079 uid = fxdr_unsigned(uid_t, *tl++); 1080 gid = fxdr_unsigned(gid_t, *tl++); 1081 kauth_cred_setuid(nd->nd_cr, uid); 1082 kauth_cred_seteuid(nd->nd_cr, uid); 1083 kauth_cred_setsvuid(nd->nd_cr, uid); 1084 kauth_cred_setgid(nd->nd_cr, gid); 1085 kauth_cred_setegid(nd->nd_cr, gid); 1086 kauth_cred_setsvgid(nd->nd_cr, gid); 1087 1088 len = fxdr_unsigned(int, *tl); 1089 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 1090 m_freem(mrep); 1091 error = EBADRPC; 1092 goto errout; 1093 } 1094 nfsm_dissect(tl, u_int32_t *, (len + 2) * NFSX_UNSIGNED); 1095 1096 if (len > 0) { 1097 size_t grbuf_size = min(len, NGROUPS) * sizeof(gid_t); 1098 gid_t *grbuf = kmem_alloc(grbuf_size, KM_SLEEP); 1099 1100 for (i = 0; i < len; i++) { 1101 if (i < NGROUPS) /* XXX elad */ 1102 grbuf[i] = fxdr_unsigned(gid_t, *tl++); 1103 else 1104 tl++; 1105 } 1106 kauth_cred_setgroups(nd->nd_cr, grbuf, 1107 min(len, NGROUPS), -1, UIO_SYSSPACE); 1108 kmem_free(grbuf, grbuf_size); 1109 } 1110 1111 len = fxdr_unsigned(int, *++tl); 1112 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1113 m_freem(mrep); 1114 error = EBADRPC; 1115 goto errout; 1116 } 1117 if (len > 0) 1118 nfsm_adv(nfsm_rndup(len)); 1119 } else if (auth_type == rpc_auth_kerb) { 1120 switch (fxdr_unsigned(int, *tl++)) { 1121 case RPCAKN_FULLNAME: 1122 ticklen = fxdr_unsigned(int, *tl); 1123 *((u_int32_t *)nfsd->nfsd_authstr) = *tl; 1124 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED; 1125 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED; 1126 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) { 1127 m_freem(mrep); 1128 error = EBADRPC; 1129 goto errout; 1130 } 1131 uio.uio_offset = 0; 1132 uio.uio_iov = &iov; 1133 uio.uio_iovcnt = 1; 1134 UIO_SETUP_SYSSPACE(&uio); 1135 iov.iov_base = (void *)&nfsd->nfsd_authstr[4]; 1136 iov.iov_len = RPCAUTH_MAXSIZ - 4; 1137 nfsm_mtouio(&uio, uio.uio_resid); 1138 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1139 if (*tl++ != rpc_auth_kerb || 1140 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) { 1141 printf("Bad kerb verifier\n"); 1142 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1143 nd->nd_procnum = NFSPROC_NOOP; 1144 return (0); 1145 } 1146 nfsm_dissect(cp, void *, 4 * NFSX_UNSIGNED); 1147 tl = (u_int32_t *)cp; 1148 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) { 1149 printf("Not fullname kerb verifier\n"); 1150 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1151 nd->nd_procnum = NFSPROC_NOOP; 1152 return (0); 1153 } 1154 cp += NFSX_UNSIGNED; 1155 memcpy(nfsd->nfsd_verfstr, cp, 3 * NFSX_UNSIGNED); 1156 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED; 1157 nd->nd_flag |= ND_KERBFULL; 1158 nfsd->nfsd_flag |= NFSD_NEEDAUTH; 1159 break; 1160 case RPCAKN_NICKNAME: 1161 if (len != 2 * NFSX_UNSIGNED) { 1162 printf("Kerb nickname short\n"); 1163 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED); 1164 nd->nd_procnum = NFSPROC_NOOP; 1165 return (0); 1166 } 1167 nickuid = fxdr_unsigned(uid_t, *tl); 1168 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1169 if (*tl++ != rpc_auth_kerb || 1170 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) { 1171 printf("Kerb nick verifier bad\n"); 1172 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1173 nd->nd_procnum = NFSPROC_NOOP; 1174 return (0); 1175 } 1176 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1177 tvin.tv_sec = *tl++; 1178 tvin.tv_usec = *tl; 1179 1180 LIST_FOREACH(nuidp, NUIDHASH(nfsd->nfsd_slp, nickuid), 1181 nu_hash) { 1182 if (kauth_cred_geteuid(nuidp->nu_cr) == nickuid && 1183 (!nd->nd_nam2 || 1184 netaddr_match(NU_NETFAM(nuidp), 1185 &nuidp->nu_haddr, nd->nd_nam2))) 1186 break; 1187 } 1188 if (!nuidp) { 1189 nd->nd_repstat = 1190 (NFSERR_AUTHERR|AUTH_REJECTCRED); 1191 nd->nd_procnum = NFSPROC_NOOP; 1192 return (0); 1193 } 1194 1195 /* 1196 * Now, decrypt the timestamp using the session key 1197 * and validate it. 1198 */ 1199 #ifdef NFSKERB 1200 XXX 1201 #else 1202 (void)tvin.tv_sec; 1203 #endif 1204 1205 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec); 1206 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec); 1207 if (nuidp->nu_expire < time_second || 1208 nuidp->nu_timestamp.tv_sec > tvout.tv_sec || 1209 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec && 1210 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) { 1211 nuidp->nu_expire = 0; 1212 nd->nd_repstat = 1213 (NFSERR_AUTHERR|AUTH_REJECTVERF); 1214 nd->nd_procnum = NFSPROC_NOOP; 1215 return (0); 1216 } 1217 kauth_cred_hold(nuidp->nu_cr); 1218 nd->nd_cr = nuidp->nu_cr; 1219 nd->nd_flag |= ND_KERBNICK; 1220 } 1221 } else { 1222 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED); 1223 nd->nd_procnum = NFSPROC_NOOP; 1224 return (0); 1225 } 1226 1227 nd->nd_md = md; 1228 nd->nd_dpos = dpos; 1229 KASSERT((nd->nd_cr == NULL && (nfsd->nfsd_flag & NFSD_NEEDAUTH) != 0) 1230 || (nd->nd_cr != NULL && (nfsd->nfsd_flag & NFSD_NEEDAUTH) == 0)); 1231 return (0); 1232 nfsmout: 1233 errout: 1234 KASSERT(error != 0); 1235 if (nd->nd_cr != NULL) { 1236 kauth_cred_free(nd->nd_cr); 1237 nd->nd_cr = NULL; 1238 } 1239 return (error); 1240 } 1241 1242 int 1243 nfs_msg(struct lwp *l, const char *server, const char *msg) 1244 { 1245 tpr_t tpr; 1246 1247 #if 0 /* XXX nfs_timer can't block on proc_lock */ 1248 if (l) 1249 tpr = tprintf_open(l->l_proc); 1250 else 1251 #endif 1252 tpr = NULL; 1253 tprintf(tpr, "nfs server %s: %s\n", server, msg); 1254 tprintf_close(tpr); 1255 return (0); 1256 } 1257 1258 static struct pool nfs_srvdesc_pool; 1259 1260 void 1261 nfsdreq_init(void) 1262 { 1263 1264 pool_init(&nfs_srvdesc_pool, sizeof(struct nfsrv_descript), 1265 0, 0, 0, "nfsrvdescpl", &pool_allocator_nointr, IPL_NONE); 1266 } 1267 1268 void 1269 nfsdreq_fini(void) 1270 { 1271 1272 pool_destroy(&nfs_srvdesc_pool); 1273 } 1274 1275 struct nfsrv_descript * 1276 nfsdreq_alloc(void) 1277 { 1278 struct nfsrv_descript *nd; 1279 1280 nd = pool_get(&nfs_srvdesc_pool, PR_WAITOK); 1281 nd->nd_cr = NULL; 1282 return nd; 1283 } 1284 1285 void 1286 nfsdreq_free(struct nfsrv_descript *nd) 1287 { 1288 kauth_cred_t cr; 1289 1290 cr = nd->nd_cr; 1291 if (cr != NULL) { 1292 kauth_cred_free(cr); 1293 } 1294 pool_put(&nfs_srvdesc_pool, nd); 1295 } 1296