1 /* $NetBSD: nfs_socket.c,v 1.194 2015/04/03 20:01:07 rtr Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1991, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Rick Macklem at The University of Guelph. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95 35 */ 36 37 /* 38 * Socket operations for use by nfs 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: nfs_socket.c,v 1.194 2015/04/03 20:01:07 rtr Exp $"); 43 44 #ifdef _KERNEL_OPT 45 #include "opt_nfs.h" 46 #include "opt_mbuftrace.h" 47 #endif 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/evcnt.h> 52 #include <sys/callout.h> 53 #include <sys/proc.h> 54 #include <sys/mount.h> 55 #include <sys/kernel.h> 56 #include <sys/kmem.h> 57 #include <sys/mbuf.h> 58 #include <sys/vnode.h> 59 #include <sys/domain.h> 60 #include <sys/protosw.h> 61 #include <sys/socket.h> 62 #include <sys/socketvar.h> 63 #include <sys/syslog.h> 64 #include <sys/tprintf.h> 65 #include <sys/namei.h> 66 #include <sys/signal.h> 67 #include <sys/signalvar.h> 68 #include <sys/kauth.h> 69 70 #include <netinet/in.h> 71 #include <netinet/tcp.h> 72 73 #include <nfs/rpcv2.h> 74 #include <nfs/nfsproto.h> 75 #include <nfs/nfs.h> 76 #include <nfs/xdr_subs.h> 77 #include <nfs/nfsm_subs.h> 78 #include <nfs/nfsmount.h> 79 #include <nfs/nfsnode.h> 80 #include <nfs/nfsrtt.h> 81 #include <nfs/nfs_var.h> 82 83 #ifdef MBUFTRACE 84 struct mowner nfs_mowner = MOWNER_INIT("nfs",""); 85 #endif 86 87 /* 88 * Estimate rto for an nfs rpc sent via. an unreliable datagram. 89 * Use the mean and mean deviation of rtt for the appropriate type of rpc 90 * for the frequent rpcs and a default for the others. 91 * The justification for doing "other" this way is that these rpcs 92 * happen so infrequently that timer est. would probably be stale. 93 * Also, since many of these rpcs are 94 * non-idempotent, a conservative timeout is desired. 95 * getattr, lookup - A+2D 96 * read, write - A+4D 97 * other - nm_timeo 98 */ 99 #define NFS_RTO(n, t) \ 100 ((t) == 0 ? (n)->nm_timeo : \ 101 ((t) < 3 ? \ 102 (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \ 103 ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1))) 104 #define NFS_SRTT(r) (r)->r_nmp->nm_srtt[nfs_proct[(r)->r_procnum] - 1] 105 #define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[nfs_proct[(r)->r_procnum] - 1] 106 107 /* 108 * Defines which timer to use for the procnum. 109 * 0 - default 110 * 1 - getattr 111 * 2 - lookup 112 * 3 - read 113 * 4 - write 114 */ 115 const int nfs_proct[NFS_NPROCS] = { 116 [NFSPROC_NULL] = 0, 117 [NFSPROC_GETATTR] = 1, 118 [NFSPROC_SETATTR] = 0, 119 [NFSPROC_LOOKUP] = 2, 120 [NFSPROC_ACCESS] = 1, 121 [NFSPROC_READLINK] = 3, 122 [NFSPROC_READ] = 3, 123 [NFSPROC_WRITE] = 4, 124 [NFSPROC_CREATE] = 0, 125 [NFSPROC_MKDIR] = 0, 126 [NFSPROC_SYMLINK] = 0, 127 [NFSPROC_MKNOD] = 0, 128 [NFSPROC_REMOVE] = 0, 129 [NFSPROC_RMDIR] = 0, 130 [NFSPROC_RENAME] = 0, 131 [NFSPROC_LINK] = 0, 132 [NFSPROC_READDIR] = 3, 133 [NFSPROC_READDIRPLUS] = 3, 134 [NFSPROC_FSSTAT] = 0, 135 [NFSPROC_FSINFO] = 0, 136 [NFSPROC_PATHCONF] = 0, 137 [NFSPROC_COMMIT] = 0, 138 [NFSPROC_NOOP] = 0, 139 }; 140 141 #ifdef DEBUG 142 /* 143 * Avoid spamming the console with debugging messages. We only print 144 * the nfs timer and reply error debugs every 10 seconds. 145 */ 146 const struct timeval nfs_err_interval = { 10, 0 }; 147 struct timeval nfs_reply_last_err_time; 148 struct timeval nfs_timer_last_err_time; 149 #endif 150 151 /* 152 * There is a congestion window for outstanding rpcs maintained per mount 153 * point. The cwnd size is adjusted in roughly the way that: 154 * Van Jacobson, Congestion avoidance and Control, In "Proceedings of 155 * SIGCOMM '88". ACM, August 1988. 156 * describes for TCP. The cwnd size is chopped in half on a retransmit timeout 157 * and incremented by 1/cwnd when each rpc reply is received and a full cwnd 158 * of rpcs is in progress. 159 * (The sent count and cwnd are scaled for integer arith.) 160 * Variants of "slow start" were tried and were found to be too much of a 161 * performance hit (ave. rtt 3 times larger), 162 * I suspect due to the large rtt that nfs rpcs have. 163 */ 164 int nfsrtton = 0; 165 struct nfsrtt nfsrtt; 166 static const int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, }; 167 struct nfsreqhead nfs_reqq; 168 static callout_t nfs_timer_ch; 169 static struct evcnt nfs_timer_ev; 170 static struct evcnt nfs_timer_start_ev; 171 static struct evcnt nfs_timer_stop_ev; 172 static kmutex_t nfs_timer_lock; 173 static bool (*nfs_timer_srvvec)(void); 174 175 /* 176 * Initialize sockets and congestion for a new NFS connection. 177 * We do not free the sockaddr if error. 178 */ 179 int 180 nfs_connect(struct nfsmount *nmp, struct nfsreq *rep, struct lwp *l) 181 { 182 struct socket *so; 183 int error, rcvreserve, sndreserve; 184 struct sockaddr *saddr; 185 struct sockaddr_in sin; 186 struct sockaddr_in6 sin6; 187 int val; 188 189 nmp->nm_so = NULL; 190 saddr = mtod(nmp->nm_nam, struct sockaddr *); 191 error = socreate(saddr->sa_family, &nmp->nm_so, 192 nmp->nm_sotype, nmp->nm_soproto, l, NULL); 193 if (error) 194 goto bad; 195 so = nmp->nm_so; 196 #ifdef MBUFTRACE 197 so->so_mowner = &nfs_mowner; 198 so->so_rcv.sb_mowner = &nfs_mowner; 199 so->so_snd.sb_mowner = &nfs_mowner; 200 #endif 201 nmp->nm_soflags = so->so_proto->pr_flags; 202 203 /* 204 * Some servers require that the client port be a reserved port number. 205 */ 206 if (saddr->sa_family == AF_INET && (nmp->nm_flag & NFSMNT_RESVPORT)) { 207 val = IP_PORTRANGE_LOW; 208 209 if ((error = so_setsockopt(NULL, so, IPPROTO_IP, IP_PORTRANGE, 210 &val, sizeof(val)))) 211 goto bad; 212 sin.sin_len = sizeof(struct sockaddr_in); 213 sin.sin_family = AF_INET; 214 sin.sin_addr.s_addr = INADDR_ANY; 215 sin.sin_port = 0; 216 error = sobind(so, (struct sockaddr *)&sin, &lwp0); 217 if (error) 218 goto bad; 219 } 220 if (saddr->sa_family == AF_INET6 && (nmp->nm_flag & NFSMNT_RESVPORT)) { 221 val = IPV6_PORTRANGE_LOW; 222 223 if ((error = so_setsockopt(NULL, so, IPPROTO_IPV6, 224 IPV6_PORTRANGE, &val, sizeof(val)))) 225 goto bad; 226 memset(&sin6, 0, sizeof(sin6)); 227 sin6.sin6_len = sizeof(struct sockaddr_in6); 228 sin6.sin6_family = AF_INET6; 229 error = sobind(so, (struct sockaddr *)&sin6, &lwp0); 230 if (error) 231 goto bad; 232 } 233 234 /* 235 * Protocols that do not require connections may be optionally left 236 * unconnected for servers that reply from a port other than NFS_PORT. 237 */ 238 solock(so); 239 if (nmp->nm_flag & NFSMNT_NOCONN) { 240 if (nmp->nm_soflags & PR_CONNREQUIRED) { 241 sounlock(so); 242 error = ENOTCONN; 243 goto bad; 244 } 245 } else { 246 error = soconnect(so, nmp->nm_nam, l); 247 if (error) { 248 sounlock(so); 249 goto bad; 250 } 251 252 /* 253 * Wait for the connection to complete. Cribbed from the 254 * connect system call but with the wait timing out so 255 * that interruptible mounts don't hang here for a long time. 256 */ 257 while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { 258 (void)sowait(so, false, 2 * hz); 259 if ((so->so_state & SS_ISCONNECTING) && 260 so->so_error == 0 && rep && 261 (error = nfs_sigintr(nmp, rep, rep->r_lwp)) != 0){ 262 so->so_state &= ~SS_ISCONNECTING; 263 sounlock(so); 264 goto bad; 265 } 266 } 267 if (so->so_error) { 268 error = so->so_error; 269 so->so_error = 0; 270 sounlock(so); 271 goto bad; 272 } 273 } 274 if (nmp->nm_flag & (NFSMNT_SOFT | NFSMNT_INT)) { 275 so->so_rcv.sb_timeo = (5 * hz); 276 so->so_snd.sb_timeo = (5 * hz); 277 } else { 278 /* 279 * enable receive timeout to detect server crash and reconnect. 280 * otherwise, we can be stuck in soreceive forever. 281 */ 282 so->so_rcv.sb_timeo = (5 * hz); 283 so->so_snd.sb_timeo = 0; 284 } 285 if (nmp->nm_sotype == SOCK_DGRAM) { 286 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 3; 287 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) + 288 NFS_MAXPKTHDR) * 2; 289 } else if (nmp->nm_sotype == SOCK_SEQPACKET) { 290 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * 3; 291 rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) + 292 NFS_MAXPKTHDR) * 3; 293 } else { 294 sounlock(so); 295 if (nmp->nm_sotype != SOCK_STREAM) 296 panic("nfscon sotype"); 297 if (so->so_proto->pr_flags & PR_CONNREQUIRED) { 298 val = 1; 299 so_setsockopt(NULL, so, SOL_SOCKET, SO_KEEPALIVE, &val, 300 sizeof(val)); 301 } 302 if (so->so_proto->pr_protocol == IPPROTO_TCP) { 303 val = 1; 304 so_setsockopt(NULL, so, IPPROTO_TCP, TCP_NODELAY, &val, 305 sizeof(val)); 306 } 307 sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR + 308 sizeof (u_int32_t)) * 3; 309 rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR + 310 sizeof (u_int32_t)) * 3; 311 solock(so); 312 } 313 error = soreserve(so, sndreserve, rcvreserve); 314 if (error) { 315 sounlock(so); 316 goto bad; 317 } 318 so->so_rcv.sb_flags |= SB_NOINTR; 319 so->so_snd.sb_flags |= SB_NOINTR; 320 sounlock(so); 321 322 /* Initialize other non-zero congestion variables */ 323 nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = nmp->nm_srtt[3] = 324 NFS_TIMEO << 3; 325 nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = 326 nmp->nm_sdrtt[3] = 0; 327 nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */ 328 nmp->nm_sent = 0; 329 nmp->nm_timeouts = 0; 330 return (0); 331 332 bad: 333 nfs_disconnect(nmp); 334 return (error); 335 } 336 337 /* 338 * Reconnect routine: 339 * Called when a connection is broken on a reliable protocol. 340 * - clean up the old socket 341 * - nfs_connect() again 342 * - set R_MUSTRESEND for all outstanding requests on mount point 343 * If this fails the mount point is DEAD! 344 * nb: Must be called with the nfs_sndlock() set on the mount point. 345 */ 346 int 347 nfs_reconnect(struct nfsreq *rep) 348 { 349 struct nfsreq *rp; 350 struct nfsmount *nmp = rep->r_nmp; 351 int error; 352 353 nfs_disconnect(nmp); 354 while ((error = nfs_connect(nmp, rep, &lwp0)) != 0) { 355 if (error == EINTR || error == ERESTART) 356 return (EINTR); 357 kpause("nfscn2", false, hz, NULL); 358 } 359 360 /* 361 * Loop through outstanding request list and fix up all requests 362 * on old socket. 363 */ 364 TAILQ_FOREACH(rp, &nfs_reqq, r_chain) { 365 if (rp->r_nmp == nmp) { 366 if ((rp->r_flags & R_MUSTRESEND) == 0) 367 rp->r_flags |= R_MUSTRESEND | R_REXMITTED; 368 rp->r_rexmit = 0; 369 } 370 } 371 return (0); 372 } 373 374 /* 375 * NFS disconnect. Clean up and unlink. 376 */ 377 void 378 nfs_disconnect(struct nfsmount *nmp) 379 { 380 struct socket *so; 381 int drain = 0; 382 383 if (nmp->nm_so) { 384 so = nmp->nm_so; 385 nmp->nm_so = NULL; 386 solock(so); 387 soshutdown(so, SHUT_RDWR); 388 sounlock(so); 389 drain = (nmp->nm_iflag & NFSMNT_DISMNT) != 0; 390 if (drain) { 391 /* 392 * soshutdown() above should wake up the current 393 * listener. 394 * Now wake up those waiting for the receive lock, and 395 * wait for them to go away unhappy, to prevent *nmp 396 * from evaporating while they're sleeping. 397 */ 398 mutex_enter(&nmp->nm_lock); 399 while (nmp->nm_waiters > 0) { 400 cv_broadcast(&nmp->nm_rcvcv); 401 cv_broadcast(&nmp->nm_sndcv); 402 cv_wait(&nmp->nm_disconcv, &nmp->nm_lock); 403 } 404 mutex_exit(&nmp->nm_lock); 405 } 406 soclose(so); 407 } 408 #ifdef DIAGNOSTIC 409 if (drain && (nmp->nm_waiters > 0)) 410 panic("nfs_disconnect: waiters left after drain?"); 411 #endif 412 } 413 414 void 415 nfs_safedisconnect(struct nfsmount *nmp) 416 { 417 struct nfsreq dummyreq; 418 419 memset(&dummyreq, 0, sizeof(dummyreq)); 420 dummyreq.r_nmp = nmp; 421 nfs_rcvlock(nmp, &dummyreq); /* XXX ignored error return */ 422 nfs_disconnect(nmp); 423 nfs_rcvunlock(nmp); 424 } 425 426 /* 427 * This is the nfs send routine. For connection based socket types, it 428 * must be called with an nfs_sndlock() on the socket. 429 * "rep == NULL" indicates that it has been called from a server. 430 * For the client side: 431 * - return EINTR if the RPC is terminated, 0 otherwise 432 * - set R_MUSTRESEND if the send fails for any reason 433 * - do any cleanup required by recoverable socket errors (? ? ?) 434 * For the server side: 435 * - return EINTR or ERESTART if interrupted by a signal 436 * - return EPIPE if a connection is lost for connection based sockets (TCP...) 437 * - do any cleanup required by recoverable socket errors (? ? ?) 438 */ 439 int 440 nfs_send(struct socket *so, struct mbuf *nam, struct mbuf *top, struct nfsreq *rep, struct lwp *l) 441 { 442 struct mbuf *sendnam; 443 int error, soflags, flags; 444 445 /* XXX nfs_doio()/nfs_request() calls with rep->r_lwp == NULL */ 446 if (l == NULL && rep->r_lwp == NULL) 447 l = curlwp; 448 449 if (rep) { 450 if (rep->r_flags & R_SOFTTERM) { 451 m_freem(top); 452 return (EINTR); 453 } 454 if ((so = rep->r_nmp->nm_so) == NULL) { 455 rep->r_flags |= R_MUSTRESEND; 456 m_freem(top); 457 return (0); 458 } 459 rep->r_flags &= ~R_MUSTRESEND; 460 soflags = rep->r_nmp->nm_soflags; 461 } else 462 soflags = so->so_proto->pr_flags; 463 if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED)) 464 sendnam = NULL; 465 else 466 sendnam = nam; 467 if (so->so_type == SOCK_SEQPACKET) 468 flags = MSG_EOR; 469 else 470 flags = 0; 471 472 error = (*so->so_send)(so, sendnam, NULL, top, NULL, flags, l); 473 if (error) { 474 if (rep) { 475 if (error == ENOBUFS && so->so_type == SOCK_DGRAM) { 476 /* 477 * We're too fast for the network/driver, 478 * and UDP isn't flowcontrolled. 479 * We need to resend. This is not fatal, 480 * just try again. 481 * 482 * Could be smarter here by doing some sort 483 * of a backoff, but this is rare. 484 */ 485 rep->r_flags |= R_MUSTRESEND; 486 } else { 487 if (error != EPIPE) 488 log(LOG_INFO, 489 "nfs send error %d for %s\n", 490 error, 491 rep->r_nmp->nm_mountp-> 492 mnt_stat.f_mntfromname); 493 /* 494 * Deal with errors for the client side. 495 */ 496 if (rep->r_flags & R_SOFTTERM) 497 error = EINTR; 498 else if (error != EMSGSIZE) 499 rep->r_flags |= R_MUSTRESEND; 500 } 501 } else { 502 /* 503 * See above. This error can happen under normal 504 * circumstances and the log is too noisy. 505 * The error will still show up in nfsstat. 506 */ 507 if (error != ENOBUFS || so->so_type != SOCK_DGRAM) 508 log(LOG_INFO, "nfsd send error %d\n", error); 509 } 510 511 /* 512 * Handle any recoverable (soft) socket errors here. (? ? ?) 513 */ 514 if (error != EINTR && error != ERESTART && 515 error != EWOULDBLOCK && error != EPIPE && 516 error != EMSGSIZE) 517 error = 0; 518 } 519 return (error); 520 } 521 522 /* 523 * Generate the rpc reply header 524 * siz arg. is used to decide if adding a cluster is worthwhile 525 */ 526 int 527 nfs_rephead(int siz, struct nfsrv_descript *nd, struct nfssvc_sock *slp, int err, int cache, u_quad_t *frev, struct mbuf **mrq, struct mbuf **mbp, char **bposp) 528 { 529 u_int32_t *tl; 530 struct mbuf *mreq; 531 char *bpos; 532 struct mbuf *mb; 533 534 mreq = m_gethdr(M_WAIT, MT_DATA); 535 MCLAIM(mreq, &nfs_mowner); 536 mb = mreq; 537 /* 538 * If this is a big reply, use a cluster else 539 * try and leave leading space for the lower level headers. 540 */ 541 siz += RPC_REPLYSIZ; 542 if (siz >= max_datalen) { 543 m_clget(mreq, M_WAIT); 544 } else 545 mreq->m_data += max_hdr; 546 tl = mtod(mreq, u_int32_t *); 547 mreq->m_len = 6 * NFSX_UNSIGNED; 548 bpos = ((char *)tl) + mreq->m_len; 549 *tl++ = txdr_unsigned(nd->nd_retxid); 550 *tl++ = rpc_reply; 551 if (err == ERPCMISMATCH || (err & NFSERR_AUTHERR)) { 552 *tl++ = rpc_msgdenied; 553 if (err & NFSERR_AUTHERR) { 554 *tl++ = rpc_autherr; 555 *tl = txdr_unsigned(err & ~NFSERR_AUTHERR); 556 mreq->m_len -= NFSX_UNSIGNED; 557 bpos -= NFSX_UNSIGNED; 558 } else { 559 *tl++ = rpc_mismatch; 560 *tl++ = txdr_unsigned(RPC_VER2); 561 *tl = txdr_unsigned(RPC_VER2); 562 } 563 } else { 564 *tl++ = rpc_msgaccepted; 565 566 /* 567 * For Kerberos authentication, we must send the nickname 568 * verifier back, otherwise just RPCAUTH_NULL. 569 */ 570 if (nd->nd_flag & ND_KERBFULL) { 571 struct nfsuid *nuidp; 572 struct timeval ktvin, ktvout; 573 574 memset(&ktvout, 0, sizeof ktvout); /* XXX gcc */ 575 576 LIST_FOREACH(nuidp, 577 NUIDHASH(slp, kauth_cred_geteuid(nd->nd_cr)), 578 nu_hash) { 579 if (kauth_cred_geteuid(nuidp->nu_cr) == 580 kauth_cred_geteuid(nd->nd_cr) && 581 (!nd->nd_nam2 || netaddr_match( 582 NU_NETFAM(nuidp), &nuidp->nu_haddr, 583 nd->nd_nam2))) 584 break; 585 } 586 if (nuidp) { 587 ktvin.tv_sec = 588 txdr_unsigned(nuidp->nu_timestamp.tv_sec 589 - 1); 590 ktvin.tv_usec = 591 txdr_unsigned(nuidp->nu_timestamp.tv_usec); 592 593 /* 594 * Encrypt the timestamp in ecb mode using the 595 * session key. 596 */ 597 #ifdef NFSKERB 598 XXX 599 #else 600 (void)ktvin.tv_sec; 601 #endif 602 603 *tl++ = rpc_auth_kerb; 604 *tl++ = txdr_unsigned(3 * NFSX_UNSIGNED); 605 *tl = ktvout.tv_sec; 606 nfsm_build(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 607 *tl++ = ktvout.tv_usec; 608 *tl++ = txdr_unsigned( 609 kauth_cred_geteuid(nuidp->nu_cr)); 610 } else { 611 *tl++ = 0; 612 *tl++ = 0; 613 } 614 } else { 615 *tl++ = 0; 616 *tl++ = 0; 617 } 618 switch (err) { 619 case EPROGUNAVAIL: 620 *tl = txdr_unsigned(RPC_PROGUNAVAIL); 621 break; 622 case EPROGMISMATCH: 623 *tl = txdr_unsigned(RPC_PROGMISMATCH); 624 nfsm_build(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 625 *tl++ = txdr_unsigned(2); 626 *tl = txdr_unsigned(3); 627 break; 628 case EPROCUNAVAIL: 629 *tl = txdr_unsigned(RPC_PROCUNAVAIL); 630 break; 631 case EBADRPC: 632 *tl = txdr_unsigned(RPC_GARBAGE); 633 break; 634 default: 635 *tl = 0; 636 if (err != NFSERR_RETVOID) { 637 nfsm_build(tl, u_int32_t *, NFSX_UNSIGNED); 638 if (err) 639 *tl = txdr_unsigned(nfsrv_errmap(nd, err)); 640 else 641 *tl = 0; 642 } 643 break; 644 }; 645 } 646 647 if (mrq != NULL) 648 *mrq = mreq; 649 *mbp = mb; 650 *bposp = bpos; 651 if (err != 0 && err != NFSERR_RETVOID) 652 nfsstats.srvrpc_errs++; 653 return (0); 654 } 655 656 static void 657 nfs_timer_schedule(void) 658 { 659 660 callout_schedule(&nfs_timer_ch, nfs_ticks); 661 } 662 663 void 664 nfs_timer_start(void) 665 { 666 667 if (callout_pending(&nfs_timer_ch)) 668 return; 669 670 nfs_timer_start_ev.ev_count++; 671 nfs_timer_schedule(); 672 } 673 674 void 675 nfs_timer_init(void) 676 { 677 678 mutex_init(&nfs_timer_lock, MUTEX_DEFAULT, IPL_NONE); 679 callout_init(&nfs_timer_ch, 0); 680 callout_setfunc(&nfs_timer_ch, nfs_timer, NULL); 681 evcnt_attach_dynamic(&nfs_timer_ev, EVCNT_TYPE_MISC, NULL, 682 "nfs", "timer"); 683 evcnt_attach_dynamic(&nfs_timer_start_ev, EVCNT_TYPE_MISC, NULL, 684 "nfs", "timer start"); 685 evcnt_attach_dynamic(&nfs_timer_stop_ev, EVCNT_TYPE_MISC, NULL, 686 "nfs", "timer stop"); 687 } 688 689 void 690 nfs_timer_fini(void) 691 { 692 693 callout_halt(&nfs_timer_ch, NULL); 694 callout_destroy(&nfs_timer_ch); 695 mutex_destroy(&nfs_timer_lock); 696 evcnt_detach(&nfs_timer_ev); 697 evcnt_detach(&nfs_timer_start_ev); 698 evcnt_detach(&nfs_timer_stop_ev); 699 } 700 701 void 702 nfs_timer_srvinit(bool (*func)(void)) 703 { 704 705 nfs_timer_srvvec = func; 706 } 707 708 void 709 nfs_timer_srvfini(void) 710 { 711 712 mutex_enter(&nfs_timer_lock); 713 nfs_timer_srvvec = NULL; 714 mutex_exit(&nfs_timer_lock); 715 } 716 717 718 /* 719 * Nfs timer routine 720 * Scan the nfsreq list and retranmit any requests that have timed out 721 * To avoid retransmission attempts on STREAM sockets (in the future) make 722 * sure to set the r_retry field to 0 (implies nm_retry == 0). 723 */ 724 void 725 nfs_timer(void *arg) 726 { 727 struct nfsreq *rep; 728 struct mbuf *m; 729 struct socket *so; 730 struct nfsmount *nmp; 731 int timeo; 732 int error; 733 bool more = false; 734 735 nfs_timer_ev.ev_count++; 736 737 mutex_enter(softnet_lock); /* XXX PR 40491 */ 738 TAILQ_FOREACH(rep, &nfs_reqq, r_chain) { 739 more = true; 740 nmp = rep->r_nmp; 741 if (rep->r_mrep || (rep->r_flags & R_SOFTTERM)) 742 continue; 743 if (nfs_sigintr(nmp, rep, rep->r_lwp)) { 744 rep->r_flags |= R_SOFTTERM; 745 continue; 746 } 747 if (rep->r_rtt >= 0) { 748 rep->r_rtt++; 749 if (nmp->nm_flag & NFSMNT_DUMBTIMR) 750 timeo = nmp->nm_timeo; 751 else 752 timeo = NFS_RTO(nmp, nfs_proct[rep->r_procnum]); 753 if (nmp->nm_timeouts > 0) 754 timeo *= nfs_backoff[nmp->nm_timeouts - 1]; 755 if (timeo > NFS_MAXTIMEO) 756 timeo = NFS_MAXTIMEO; 757 if (rep->r_rtt <= timeo) 758 continue; 759 if (nmp->nm_timeouts < 760 (sizeof(nfs_backoff) / sizeof(nfs_backoff[0]))) 761 nmp->nm_timeouts++; 762 } 763 /* 764 * Check for server not responding 765 */ 766 if ((rep->r_flags & R_TPRINTFMSG) == 0 && 767 rep->r_rexmit > nmp->nm_deadthresh) { 768 nfs_msg(rep->r_lwp, 769 nmp->nm_mountp->mnt_stat.f_mntfromname, 770 "not responding"); 771 rep->r_flags |= R_TPRINTFMSG; 772 } 773 if (rep->r_rexmit >= rep->r_retry) { /* too many */ 774 nfsstats.rpctimeouts++; 775 rep->r_flags |= R_SOFTTERM; 776 continue; 777 } 778 if (nmp->nm_sotype != SOCK_DGRAM) { 779 if (++rep->r_rexmit > NFS_MAXREXMIT) 780 rep->r_rexmit = NFS_MAXREXMIT; 781 continue; 782 } 783 if ((so = nmp->nm_so) == NULL) 784 continue; 785 786 /* 787 * If there is enough space and the window allows.. 788 * Resend it 789 * Set r_rtt to -1 in case we fail to send it now. 790 */ 791 /* solock(so); XXX PR 40491 */ 792 rep->r_rtt = -1; 793 if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len && 794 ((nmp->nm_flag & NFSMNT_DUMBTIMR) || 795 (rep->r_flags & R_SENT) || 796 nmp->nm_sent < nmp->nm_cwnd) && 797 (m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){ 798 if (so->so_state & SS_ISCONNECTED) 799 error = (*so->so_proto->pr_usrreqs->pr_send)(so, 800 m, NULL, NULL, NULL); 801 else 802 error = (*so->so_proto->pr_usrreqs->pr_send)(so, 803 m, nmp->nm_nam, NULL, NULL); 804 if (error) { 805 if (NFSIGNORE_SOERROR(nmp->nm_soflags, error)) { 806 #ifdef DEBUG 807 if (ratecheck(&nfs_timer_last_err_time, 808 &nfs_err_interval)) 809 printf("%s: ignoring error " 810 "%d\n", __func__, error); 811 #endif 812 so->so_error = 0; 813 } 814 } else { 815 /* 816 * Iff first send, start timing 817 * else turn timing off, backoff timer 818 * and divide congestion window by 2. 819 */ 820 if (rep->r_flags & R_SENT) { 821 rep->r_flags &= ~R_TIMING; 822 if (++rep->r_rexmit > NFS_MAXREXMIT) 823 rep->r_rexmit = NFS_MAXREXMIT; 824 nmp->nm_cwnd >>= 1; 825 if (nmp->nm_cwnd < NFS_CWNDSCALE) 826 nmp->nm_cwnd = NFS_CWNDSCALE; 827 nfsstats.rpcretries++; 828 } else { 829 rep->r_flags |= R_SENT; 830 nmp->nm_sent += NFS_CWNDSCALE; 831 } 832 rep->r_rtt = 0; 833 } 834 } 835 /* sounlock(so); XXX PR 40491 */ 836 } 837 mutex_exit(softnet_lock); /* XXX PR 40491 */ 838 839 mutex_enter(&nfs_timer_lock); 840 if (nfs_timer_srvvec != NULL) { 841 more |= (*nfs_timer_srvvec)(); 842 } 843 mutex_exit(&nfs_timer_lock); 844 845 if (more) { 846 nfs_timer_schedule(); 847 } else { 848 nfs_timer_stop_ev.ev_count++; 849 } 850 } 851 852 /* 853 * Test for a termination condition pending on the process. 854 * This is used for NFSMNT_INT mounts. 855 */ 856 int 857 nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct lwp *l) 858 { 859 sigset_t ss; 860 861 if (rep && (rep->r_flags & R_SOFTTERM)) 862 return (EINTR); 863 if (!(nmp->nm_flag & NFSMNT_INT)) 864 return (0); 865 if (l) { 866 sigpending1(l, &ss); 867 #if 0 868 sigminusset(&l->l_proc->p_sigctx.ps_sigignore, &ss); 869 #endif 870 if (sigismember(&ss, SIGINT) || sigismember(&ss, SIGTERM) || 871 sigismember(&ss, SIGKILL) || sigismember(&ss, SIGHUP) || 872 sigismember(&ss, SIGQUIT)) 873 return (EINTR); 874 } 875 return (0); 876 } 877 878 int 879 nfs_rcvlock(struct nfsmount *nmp, struct nfsreq *rep) 880 { 881 int *flagp = &nmp->nm_iflag; 882 int slptimeo = 0; 883 bool catch_p; 884 int error = 0; 885 886 KASSERT(nmp == rep->r_nmp); 887 888 catch_p = (nmp->nm_flag & NFSMNT_INT) != 0; 889 mutex_enter(&nmp->nm_lock); 890 while (/* CONSTCOND */ true) { 891 if (*flagp & NFSMNT_DISMNT) { 892 cv_signal(&nmp->nm_disconcv); 893 error = EIO; 894 break; 895 } 896 /* If our reply was received while we were sleeping, 897 * then just return without taking the lock to avoid a 898 * situation where a single iod could 'capture' the 899 * receive lock. 900 */ 901 if (rep->r_mrep != NULL) { 902 cv_signal(&nmp->nm_rcvcv); 903 error = EALREADY; 904 break; 905 } 906 if (nfs_sigintr(rep->r_nmp, rep, rep->r_lwp)) { 907 cv_signal(&nmp->nm_rcvcv); 908 error = EINTR; 909 break; 910 } 911 if ((*flagp & NFSMNT_RCVLOCK) == 0) { 912 *flagp |= NFSMNT_RCVLOCK; 913 break; 914 } 915 if (catch_p) { 916 cv_timedwait_sig(&nmp->nm_rcvcv, &nmp->nm_lock, 917 slptimeo); 918 } else { 919 cv_timedwait(&nmp->nm_rcvcv, &nmp->nm_lock, 920 slptimeo); 921 } 922 if (catch_p) { 923 catch_p = false; 924 slptimeo = 2 * hz; 925 } 926 } 927 mutex_exit(&nmp->nm_lock); 928 return error; 929 } 930 931 /* 932 * Unlock the stream socket for others. 933 */ 934 void 935 nfs_rcvunlock(struct nfsmount *nmp) 936 { 937 938 mutex_enter(&nmp->nm_lock); 939 if ((nmp->nm_iflag & NFSMNT_RCVLOCK) == 0) 940 panic("nfs rcvunlock"); 941 nmp->nm_iflag &= ~NFSMNT_RCVLOCK; 942 cv_signal(&nmp->nm_rcvcv); 943 mutex_exit(&nmp->nm_lock); 944 } 945 946 /* 947 * Parse an RPC request 948 * - verify it 949 * - allocate and fill in the cred. 950 */ 951 int 952 nfs_getreq(struct nfsrv_descript *nd, struct nfsd *nfsd, int has_header) 953 { 954 int len, i; 955 u_int32_t *tl; 956 int32_t t1; 957 struct uio uio; 958 struct iovec iov; 959 char *dpos, *cp2, *cp; 960 u_int32_t nfsvers, auth_type; 961 uid_t nickuid; 962 int error = 0, ticklen; 963 struct mbuf *mrep, *md; 964 struct nfsuid *nuidp; 965 struct timeval tvin, tvout; 966 967 memset(&tvout, 0, sizeof tvout); /* XXX gcc */ 968 969 KASSERT(nd->nd_cr == NULL); 970 mrep = nd->nd_mrep; 971 md = nd->nd_md; 972 dpos = nd->nd_dpos; 973 if (has_header) { 974 nfsm_dissect(tl, u_int32_t *, 10 * NFSX_UNSIGNED); 975 nd->nd_retxid = fxdr_unsigned(u_int32_t, *tl++); 976 if (*tl++ != rpc_call) { 977 m_freem(mrep); 978 return (EBADRPC); 979 } 980 } else 981 nfsm_dissect(tl, u_int32_t *, 8 * NFSX_UNSIGNED); 982 nd->nd_repstat = 0; 983 nd->nd_flag = 0; 984 if (*tl++ != rpc_vers) { 985 nd->nd_repstat = ERPCMISMATCH; 986 nd->nd_procnum = NFSPROC_NOOP; 987 return (0); 988 } 989 if (*tl != nfs_prog) { 990 nd->nd_repstat = EPROGUNAVAIL; 991 nd->nd_procnum = NFSPROC_NOOP; 992 return (0); 993 } 994 tl++; 995 nfsvers = fxdr_unsigned(u_int32_t, *tl++); 996 if (nfsvers < NFS_VER2 || nfsvers > NFS_VER3) { 997 nd->nd_repstat = EPROGMISMATCH; 998 nd->nd_procnum = NFSPROC_NOOP; 999 return (0); 1000 } 1001 if (nfsvers == NFS_VER3) 1002 nd->nd_flag = ND_NFSV3; 1003 nd->nd_procnum = fxdr_unsigned(u_int32_t, *tl++); 1004 if (nd->nd_procnum == NFSPROC_NULL) 1005 return (0); 1006 if (nd->nd_procnum > NFSPROC_COMMIT || 1007 (!nd->nd_flag && nd->nd_procnum > NFSV2PROC_STATFS)) { 1008 nd->nd_repstat = EPROCUNAVAIL; 1009 nd->nd_procnum = NFSPROC_NOOP; 1010 return (0); 1011 } 1012 if ((nd->nd_flag & ND_NFSV3) == 0) 1013 nd->nd_procnum = nfsv3_procid[nd->nd_procnum]; 1014 auth_type = *tl++; 1015 len = fxdr_unsigned(int, *tl++); 1016 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1017 m_freem(mrep); 1018 return (EBADRPC); 1019 } 1020 1021 nd->nd_flag &= ~ND_KERBAUTH; 1022 /* 1023 * Handle auth_unix or auth_kerb. 1024 */ 1025 if (auth_type == rpc_auth_unix) { 1026 uid_t uid; 1027 gid_t gid; 1028 1029 nd->nd_cr = kauth_cred_alloc(); 1030 len = fxdr_unsigned(int, *++tl); 1031 if (len < 0 || len > NFS_MAXNAMLEN) { 1032 m_freem(mrep); 1033 error = EBADRPC; 1034 goto errout; 1035 } 1036 nfsm_adv(nfsm_rndup(len)); 1037 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1038 1039 uid = fxdr_unsigned(uid_t, *tl++); 1040 gid = fxdr_unsigned(gid_t, *tl++); 1041 kauth_cred_setuid(nd->nd_cr, uid); 1042 kauth_cred_seteuid(nd->nd_cr, uid); 1043 kauth_cred_setsvuid(nd->nd_cr, uid); 1044 kauth_cred_setgid(nd->nd_cr, gid); 1045 kauth_cred_setegid(nd->nd_cr, gid); 1046 kauth_cred_setsvgid(nd->nd_cr, gid); 1047 1048 len = fxdr_unsigned(int, *tl); 1049 if (len < 0 || len > RPCAUTH_UNIXGIDS) { 1050 m_freem(mrep); 1051 error = EBADRPC; 1052 goto errout; 1053 } 1054 nfsm_dissect(tl, u_int32_t *, (len + 2) * NFSX_UNSIGNED); 1055 1056 if (len > 0) { 1057 size_t grbuf_size = min(len, NGROUPS) * sizeof(gid_t); 1058 gid_t *grbuf = kmem_alloc(grbuf_size, KM_SLEEP); 1059 1060 for (i = 0; i < len; i++) { 1061 if (i < NGROUPS) /* XXX elad */ 1062 grbuf[i] = fxdr_unsigned(gid_t, *tl++); 1063 else 1064 tl++; 1065 } 1066 kauth_cred_setgroups(nd->nd_cr, grbuf, 1067 min(len, NGROUPS), -1, UIO_SYSSPACE); 1068 kmem_free(grbuf, grbuf_size); 1069 } 1070 1071 len = fxdr_unsigned(int, *++tl); 1072 if (len < 0 || len > RPCAUTH_MAXSIZ) { 1073 m_freem(mrep); 1074 error = EBADRPC; 1075 goto errout; 1076 } 1077 if (len > 0) 1078 nfsm_adv(nfsm_rndup(len)); 1079 } else if (auth_type == rpc_auth_kerb) { 1080 switch (fxdr_unsigned(int, *tl++)) { 1081 case RPCAKN_FULLNAME: 1082 ticklen = fxdr_unsigned(int, *tl); 1083 *((u_int32_t *)nfsd->nfsd_authstr) = *tl; 1084 uio.uio_resid = nfsm_rndup(ticklen) + NFSX_UNSIGNED; 1085 nfsd->nfsd_authlen = uio.uio_resid + NFSX_UNSIGNED; 1086 if (uio.uio_resid > (len - 2 * NFSX_UNSIGNED)) { 1087 m_freem(mrep); 1088 error = EBADRPC; 1089 goto errout; 1090 } 1091 uio.uio_offset = 0; 1092 uio.uio_iov = &iov; 1093 uio.uio_iovcnt = 1; 1094 UIO_SETUP_SYSSPACE(&uio); 1095 iov.iov_base = (void *)&nfsd->nfsd_authstr[4]; 1096 iov.iov_len = RPCAUTH_MAXSIZ - 4; 1097 nfsm_mtouio(&uio, uio.uio_resid); 1098 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1099 if (*tl++ != rpc_auth_kerb || 1100 fxdr_unsigned(int, *tl) != 4 * NFSX_UNSIGNED) { 1101 printf("Bad kerb verifier\n"); 1102 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1103 nd->nd_procnum = NFSPROC_NOOP; 1104 return (0); 1105 } 1106 nfsm_dissect(cp, void *, 4 * NFSX_UNSIGNED); 1107 tl = (u_int32_t *)cp; 1108 if (fxdr_unsigned(int, *tl) != RPCAKN_FULLNAME) { 1109 printf("Not fullname kerb verifier\n"); 1110 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1111 nd->nd_procnum = NFSPROC_NOOP; 1112 return (0); 1113 } 1114 cp += NFSX_UNSIGNED; 1115 memcpy(nfsd->nfsd_verfstr, cp, 3 * NFSX_UNSIGNED); 1116 nfsd->nfsd_verflen = 3 * NFSX_UNSIGNED; 1117 nd->nd_flag |= ND_KERBFULL; 1118 nfsd->nfsd_flag |= NFSD_NEEDAUTH; 1119 break; 1120 case RPCAKN_NICKNAME: 1121 if (len != 2 * NFSX_UNSIGNED) { 1122 printf("Kerb nickname short\n"); 1123 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADCRED); 1124 nd->nd_procnum = NFSPROC_NOOP; 1125 return (0); 1126 } 1127 nickuid = fxdr_unsigned(uid_t, *tl); 1128 nfsm_dissect(tl, u_int32_t *, 2 * NFSX_UNSIGNED); 1129 if (*tl++ != rpc_auth_kerb || 1130 fxdr_unsigned(int, *tl) != 3 * NFSX_UNSIGNED) { 1131 printf("Kerb nick verifier bad\n"); 1132 nd->nd_repstat = (NFSERR_AUTHERR|AUTH_BADVERF); 1133 nd->nd_procnum = NFSPROC_NOOP; 1134 return (0); 1135 } 1136 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1137 tvin.tv_sec = *tl++; 1138 tvin.tv_usec = *tl; 1139 1140 LIST_FOREACH(nuidp, NUIDHASH(nfsd->nfsd_slp, nickuid), 1141 nu_hash) { 1142 if (kauth_cred_geteuid(nuidp->nu_cr) == nickuid && 1143 (!nd->nd_nam2 || 1144 netaddr_match(NU_NETFAM(nuidp), 1145 &nuidp->nu_haddr, nd->nd_nam2))) 1146 break; 1147 } 1148 if (!nuidp) { 1149 nd->nd_repstat = 1150 (NFSERR_AUTHERR|AUTH_REJECTCRED); 1151 nd->nd_procnum = NFSPROC_NOOP; 1152 return (0); 1153 } 1154 1155 /* 1156 * Now, decrypt the timestamp using the session key 1157 * and validate it. 1158 */ 1159 #ifdef NFSKERB 1160 XXX 1161 #else 1162 (void)tvin.tv_sec; 1163 #endif 1164 1165 tvout.tv_sec = fxdr_unsigned(long, tvout.tv_sec); 1166 tvout.tv_usec = fxdr_unsigned(long, tvout.tv_usec); 1167 if (nuidp->nu_expire < time_second || 1168 nuidp->nu_timestamp.tv_sec > tvout.tv_sec || 1169 (nuidp->nu_timestamp.tv_sec == tvout.tv_sec && 1170 nuidp->nu_timestamp.tv_usec > tvout.tv_usec)) { 1171 nuidp->nu_expire = 0; 1172 nd->nd_repstat = 1173 (NFSERR_AUTHERR|AUTH_REJECTVERF); 1174 nd->nd_procnum = NFSPROC_NOOP; 1175 return (0); 1176 } 1177 kauth_cred_hold(nuidp->nu_cr); 1178 nd->nd_cr = nuidp->nu_cr; 1179 nd->nd_flag |= ND_KERBNICK; 1180 } 1181 } else { 1182 nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED); 1183 nd->nd_procnum = NFSPROC_NOOP; 1184 return (0); 1185 } 1186 1187 nd->nd_md = md; 1188 nd->nd_dpos = dpos; 1189 KASSERT((nd->nd_cr == NULL && (nfsd->nfsd_flag & NFSD_NEEDAUTH) != 0) 1190 || (nd->nd_cr != NULL && (nfsd->nfsd_flag & NFSD_NEEDAUTH) == 0)); 1191 return (0); 1192 nfsmout: 1193 errout: 1194 KASSERT(error != 0); 1195 if (nd->nd_cr != NULL) { 1196 kauth_cred_free(nd->nd_cr); 1197 nd->nd_cr = NULL; 1198 } 1199 return (error); 1200 } 1201 1202 int 1203 nfs_msg(struct lwp *l, const char *server, const char *msg) 1204 { 1205 tpr_t tpr; 1206 1207 #if 0 /* XXX nfs_timer can't block on proc_lock */ 1208 if (l) 1209 tpr = tprintf_open(l->l_proc); 1210 else 1211 #endif 1212 tpr = NULL; 1213 tprintf(tpr, "nfs server %s: %s\n", server, msg); 1214 tprintf_close(tpr); 1215 return (0); 1216 } 1217 1218 static struct pool nfs_srvdesc_pool; 1219 1220 void 1221 nfsdreq_init(void) 1222 { 1223 1224 pool_init(&nfs_srvdesc_pool, sizeof(struct nfsrv_descript), 1225 0, 0, 0, "nfsrvdescpl", &pool_allocator_nointr, IPL_NONE); 1226 } 1227 1228 void 1229 nfsdreq_fini(void) 1230 { 1231 1232 pool_destroy(&nfs_srvdesc_pool); 1233 } 1234 1235 struct nfsrv_descript * 1236 nfsdreq_alloc(void) 1237 { 1238 struct nfsrv_descript *nd; 1239 1240 nd = pool_get(&nfs_srvdesc_pool, PR_WAITOK); 1241 nd->nd_cr = NULL; 1242 return nd; 1243 } 1244 1245 void 1246 nfsdreq_free(struct nfsrv_descript *nd) 1247 { 1248 kauth_cred_t cr; 1249 1250 cr = nd->nd_cr; 1251 if (cr != NULL) { 1252 kauth_cred_free(cr); 1253 } 1254 pool_put(&nfs_srvdesc_pool, nd); 1255 } 1256