1 /* $NetBSD: nfs_syscalls.c,v 1.135 2008/04/28 15:06:51 yamt Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Rick Macklem at The University of Guelph. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)nfs_syscalls.c 8.5 (Berkeley) 3/30/95 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: nfs_syscalls.c,v 1.135 2008/04/28 15:06:51 yamt Exp $"); 39 40 #include "fs_nfs.h" 41 #include "opt_nfs.h" 42 #include "opt_nfsserver.h" 43 #include "opt_iso.h" 44 #include "opt_inet.h" 45 #include "opt_compat_netbsd.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 #include <sys/file.h> 51 #include <sys/stat.h> 52 #include <sys/vnode.h> 53 #include <sys/mount.h> 54 #include <sys/proc.h> 55 #include <sys/uio.h> 56 #include <sys/malloc.h> 57 #include <sys/kmem.h> 58 #include <sys/buf.h> 59 #include <sys/mbuf.h> 60 #include <sys/socket.h> 61 #include <sys/socketvar.h> 62 #include <sys/signalvar.h> 63 #include <sys/domain.h> 64 #include <sys/protosw.h> 65 #include <sys/namei.h> 66 #include <sys/syslog.h> 67 #include <sys/filedesc.h> 68 #include <sys/kthread.h> 69 #include <sys/kauth.h> 70 #include <sys/syscallargs.h> 71 72 #include <netinet/in.h> 73 #include <netinet/tcp.h> 74 #ifdef ISO 75 #include <netiso/iso.h> 76 #endif 77 #include <nfs/xdr_subs.h> 78 #include <nfs/rpcv2.h> 79 #include <nfs/nfsproto.h> 80 #include <nfs/nfs.h> 81 #include <nfs/nfsm_subs.h> 82 #include <nfs/nfsrvcache.h> 83 #include <nfs/nfsmount.h> 84 #include <nfs/nfsnode.h> 85 #include <nfs/nfsrtt.h> 86 #include <nfs/nfs_var.h> 87 88 /* Global defs. */ 89 extern int32_t (*nfsrv3_procs[NFS_NPROCS]) __P((struct nfsrv_descript *, 90 struct nfssvc_sock *, 91 struct lwp *, struct mbuf **)); 92 extern int nfsrvw_procrastinate; 93 94 struct nfssvc_sock *nfs_udpsock; 95 #ifdef ISO 96 struct nfssvc_sock *nfs_cltpsock; 97 #endif 98 #ifdef INET6 99 struct nfssvc_sock *nfs_udp6sock; 100 #endif 101 int nuidhash_max = NFS_MAXUIDHASH; 102 #ifdef NFSSERVER 103 static int nfs_numnfsd = 0; 104 static struct nfsdrt nfsdrt; 105 #endif 106 107 #ifdef NFSSERVER 108 kmutex_t nfsd_lock; 109 struct nfssvc_sockhead nfssvc_sockhead; 110 kcondvar_t nfsd_initcv; 111 struct nfssvc_sockhead nfssvc_sockpending; 112 struct nfsdhead nfsd_head; 113 struct nfsdidlehead nfsd_idle_head; 114 115 int nfssvc_sockhead_flag; 116 int nfsd_head_flag; 117 #endif 118 119 #ifdef NFS 120 /* 121 * locking order: 122 * nfs_iodlist_lock -> nid_lock -> nm_lock 123 */ 124 kmutex_t nfs_iodlist_lock; 125 struct nfs_iodlist nfs_iodlist_idle; 126 struct nfs_iodlist nfs_iodlist_all; 127 int nfs_niothreads = -1; /* == "0, and has never been set" */ 128 #endif 129 130 #ifdef NFSSERVER 131 static struct nfssvc_sock *nfsrv_sockalloc __P((void)); 132 static void nfsrv_sockfree __P((struct nfssvc_sock *)); 133 static void nfsd_rt __P((int, struct nfsrv_descript *, int)); 134 #endif 135 136 /* 137 * NFS server system calls 138 */ 139 140 141 /* 142 * Nfs server pseudo system call for the nfsd's 143 * Based on the flag value it either: 144 * - adds a socket to the selection list 145 * - remains in the kernel as an nfsd 146 * - remains in the kernel as an nfsiod 147 */ 148 int 149 sys_nfssvc(struct lwp *l, const struct sys_nfssvc_args *uap, register_t *retval) 150 { 151 /* { 152 syscallarg(int) flag; 153 syscallarg(void *) argp; 154 } */ 155 int error; 156 #ifdef NFSSERVER 157 file_t *fp; 158 struct mbuf *nam; 159 struct nfsd_args nfsdarg; 160 struct nfsd_srvargs nfsd_srvargs, *nsd = &nfsd_srvargs; 161 struct nfsd *nfsd; 162 struct nfssvc_sock *slp; 163 struct nfsuid *nuidp; 164 #endif 165 166 /* 167 * Must be super user 168 */ 169 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_NFS, 170 KAUTH_REQ_NETWORK_NFS_SVC, NULL, NULL, NULL); 171 if (error) 172 return (error); 173 174 /* Initialize NFS server / client shared data. */ 175 nfs_init(); 176 177 #ifdef NFSSERVER 178 mutex_enter(&nfsd_lock); 179 while (nfssvc_sockhead_flag & SLP_INIT) { 180 cv_wait(&nfsd_initcv, &nfsd_lock); 181 } 182 mutex_exit(&nfsd_lock); 183 #endif 184 if (SCARG(uap, flag) & NFSSVC_BIOD) { 185 #if defined(NFS) && defined(COMPAT_14) 186 error = kpause("nfsbiod", true, 0, NULL); /* dummy impl */ 187 #else 188 error = ENOSYS; 189 #endif 190 } else if (SCARG(uap, flag) & NFSSVC_MNTD) { 191 error = ENOSYS; 192 } else if (SCARG(uap, flag) & NFSSVC_ADDSOCK) { 193 #ifndef NFSSERVER 194 error = ENOSYS; 195 #else 196 error = copyin(SCARG(uap, argp), (void *)&nfsdarg, 197 sizeof(nfsdarg)); 198 if (error) 199 return (error); 200 /* getsock() will use the descriptor for us */ 201 error = getsock(nfsdarg.sock, &fp); 202 if (error) 203 return (error); 204 /* 205 * Get the client address for connected sockets. 206 */ 207 if (nfsdarg.name == NULL || nfsdarg.namelen == 0) 208 nam = (struct mbuf *)0; 209 else { 210 error = sockargs(&nam, nfsdarg.name, nfsdarg.namelen, 211 MT_SONAME); 212 if (error) { 213 fd_putfile(nfsdarg.sock); 214 return (error); 215 } 216 } 217 error = nfssvc_addsock(fp, nam); 218 fd_putfile(nfsdarg.sock); 219 #endif /* !NFSSERVER */ 220 } else if (SCARG(uap, flag) & NFSSVC_SETEXPORTSLIST) { 221 #ifndef NFSSERVER 222 error = ENOSYS; 223 #else 224 struct export_args *args; 225 struct mountd_exports_list mel; 226 227 error = copyin(SCARG(uap, argp), &mel, sizeof(mel)); 228 if (error != 0) 229 return error; 230 231 args = (struct export_args *)malloc(mel.mel_nexports * 232 sizeof(struct export_args), M_TEMP, M_WAITOK); 233 error = copyin(mel.mel_exports, args, mel.mel_nexports * 234 sizeof(struct export_args)); 235 if (error != 0) { 236 free(args, M_TEMP); 237 return error; 238 } 239 mel.mel_exports = args; 240 241 error = mountd_set_exports_list(&mel, l); 242 243 free(args, M_TEMP); 244 #endif /* !NFSSERVER */ 245 } else { 246 #ifndef NFSSERVER 247 error = ENOSYS; 248 #else 249 error = copyin(SCARG(uap, argp), (void *)nsd, sizeof (*nsd)); 250 if (error) 251 return (error); 252 if ((SCARG(uap, flag) & NFSSVC_AUTHIN) && 253 ((nfsd = nsd->nsd_nfsd)) != NULL && 254 (nfsd->nfsd_slp->ns_flags & SLP_VALID)) { 255 slp = nfsd->nfsd_slp; 256 257 /* 258 * First check to see if another nfsd has already 259 * added this credential. 260 */ 261 LIST_FOREACH(nuidp, NUIDHASH(slp, nsd->nsd_cr.cr_uid), 262 nu_hash) { 263 if (kauth_cred_geteuid(nuidp->nu_cr) == 264 nsd->nsd_cr.cr_uid && 265 (!nfsd->nfsd_nd->nd_nam2 || 266 netaddr_match(NU_NETFAM(nuidp), 267 &nuidp->nu_haddr, nfsd->nfsd_nd->nd_nam2))) 268 break; 269 } 270 if (nuidp) { 271 kauth_cred_hold(nuidp->nu_cr); 272 nfsd->nfsd_nd->nd_cr = nuidp->nu_cr; 273 nfsd->nfsd_nd->nd_flag |= ND_KERBFULL; 274 } else { 275 /* 276 * Nope, so we will. 277 */ 278 if (slp->ns_numuids < nuidhash_max) { 279 slp->ns_numuids++; 280 nuidp = kmem_alloc(sizeof(*nuidp), KM_SLEEP); 281 } else 282 nuidp = (struct nfsuid *)0; 283 if ((slp->ns_flags & SLP_VALID) == 0) { 284 if (nuidp) 285 kmem_free(nuidp, sizeof(*nuidp)); 286 } else { 287 if (nuidp == (struct nfsuid *)0) { 288 nuidp = TAILQ_FIRST(&slp->ns_uidlruhead); 289 LIST_REMOVE(nuidp, nu_hash); 290 TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp, 291 nu_lru); 292 if (nuidp->nu_flag & NU_NAM) 293 m_freem(nuidp->nu_nam); 294 } 295 nuidp->nu_flag = 0; 296 kauth_uucred_to_cred(nuidp->nu_cr, 297 &nsd->nsd_cr); 298 nuidp->nu_timestamp = nsd->nsd_timestamp; 299 nuidp->nu_expire = time_second + nsd->nsd_ttl; 300 /* 301 * and save the session key in nu_key. 302 */ 303 memcpy(nuidp->nu_key, nsd->nsd_key, 304 sizeof(nsd->nsd_key)); 305 if (nfsd->nfsd_nd->nd_nam2) { 306 struct sockaddr_in *saddr; 307 308 saddr = mtod(nfsd->nfsd_nd->nd_nam2, 309 struct sockaddr_in *); 310 switch (saddr->sin_family) { 311 case AF_INET: 312 nuidp->nu_flag |= NU_INETADDR; 313 nuidp->nu_inetaddr = 314 saddr->sin_addr.s_addr; 315 break; 316 case AF_ISO: 317 default: 318 nuidp->nu_flag |= NU_NAM; 319 nuidp->nu_nam = m_copym( 320 nfsd->nfsd_nd->nd_nam2, 0, 321 M_COPYALL, M_WAIT); 322 break; 323 }; 324 } 325 TAILQ_INSERT_TAIL(&slp->ns_uidlruhead, nuidp, 326 nu_lru); 327 LIST_INSERT_HEAD(NUIDHASH(slp, nsd->nsd_uid), 328 nuidp, nu_hash); 329 kauth_cred_hold(nuidp->nu_cr); 330 nfsd->nfsd_nd->nd_cr = nuidp->nu_cr; 331 nfsd->nfsd_nd->nd_flag |= ND_KERBFULL; 332 } 333 } 334 } 335 if ((SCARG(uap, flag) & NFSSVC_AUTHINFAIL) && 336 (nfsd = nsd->nsd_nfsd)) 337 nfsd->nfsd_flag |= NFSD_AUTHFAIL; 338 error = nfssvc_nfsd(nsd, SCARG(uap, argp), l); 339 #endif /* !NFSSERVER */ 340 } 341 if (error == EINTR || error == ERESTART) 342 error = 0; 343 return (error); 344 } 345 346 #ifdef NFSSERVER 347 MALLOC_DEFINE(M_NFSD, "NFS daemon", "Nfs server daemon structure"); 348 349 static struct nfssvc_sock * 350 nfsrv_sockalloc() 351 { 352 struct nfssvc_sock *slp; 353 354 slp = kmem_alloc(sizeof(*slp), KM_SLEEP); 355 memset(slp, 0, sizeof (struct nfssvc_sock)); 356 mutex_init(&slp->ns_lock, MUTEX_DRIVER, IPL_SOFTNET); 357 mutex_init(&slp->ns_alock, MUTEX_DRIVER, IPL_SOFTNET); 358 cv_init(&slp->ns_cv, "nfsdsock"); 359 TAILQ_INIT(&slp->ns_uidlruhead); 360 LIST_INIT(&slp->ns_tq); 361 SIMPLEQ_INIT(&slp->ns_sendq); 362 mutex_enter(&nfsd_lock); 363 TAILQ_INSERT_TAIL(&nfssvc_sockhead, slp, ns_chain); 364 mutex_exit(&nfsd_lock); 365 366 return slp; 367 } 368 369 static void 370 nfsrv_sockfree(struct nfssvc_sock *slp) 371 { 372 373 KASSERT(slp->ns_so == NULL); 374 KASSERT(slp->ns_fp == NULL); 375 KASSERT((slp->ns_flags & SLP_VALID) == 0); 376 mutex_destroy(&slp->ns_lock); 377 mutex_destroy(&slp->ns_alock); 378 cv_destroy(&slp->ns_cv); 379 kmem_free(slp, sizeof(*slp)); 380 } 381 382 /* 383 * Adds a socket to the list for servicing by nfsds. 384 */ 385 int 386 nfssvc_addsock(fp, mynam) 387 file_t *fp; 388 struct mbuf *mynam; 389 { 390 struct mbuf *m; 391 int siz; 392 struct nfssvc_sock *slp; 393 struct socket *so; 394 struct nfssvc_sock *tslp; 395 int error; 396 397 so = (struct socket *)fp->f_data; 398 tslp = (struct nfssvc_sock *)0; 399 /* 400 * Add it to the list, as required. 401 */ 402 if (so->so_proto->pr_protocol == IPPROTO_UDP) { 403 #ifdef INET6 404 if (so->so_proto->pr_domain->dom_family == AF_INET6) 405 tslp = nfs_udp6sock; 406 else 407 #endif 408 tslp = nfs_udpsock; 409 if (tslp->ns_flags & SLP_VALID) { 410 m_freem(mynam); 411 return (EPERM); 412 } 413 #ifdef ISO 414 } else if (so->so_proto->pr_protocol == ISOPROTO_CLTP) { 415 tslp = nfs_cltpsock; 416 if (tslp->ns_flags & SLP_VALID) { 417 m_freem(mynam); 418 return (EPERM); 419 } 420 #endif /* ISO */ 421 } 422 if (so->so_type == SOCK_STREAM) 423 siz = NFS_MAXPACKET + sizeof (u_long); 424 else 425 siz = NFS_MAXPACKET; 426 solock(so); 427 error = soreserve(so, siz, siz); 428 sounlock(so); 429 if (error) { 430 m_freem(mynam); 431 return (error); 432 } 433 434 /* 435 * Set protocol specific options { for now TCP only } and 436 * reserve some space. For datagram sockets, this can get called 437 * repeatedly for the same socket, but that isn't harmful. 438 */ 439 if (so->so_type == SOCK_STREAM) { 440 m = m_get(M_WAIT, MT_SOOPTS); 441 MCLAIM(m, &nfs_mowner); 442 *mtod(m, int32_t *) = 1; 443 m->m_len = sizeof(int32_t); 444 sosetopt(so, SOL_SOCKET, SO_KEEPALIVE, m); 445 } 446 if ((so->so_proto->pr_domain->dom_family == AF_INET 447 #ifdef INET6 448 || so->so_proto->pr_domain->dom_family == AF_INET6 449 #endif 450 ) && 451 so->so_proto->pr_protocol == IPPROTO_TCP) { 452 m = m_get(M_WAIT, MT_SOOPTS); 453 MCLAIM(m, &nfs_mowner); 454 *mtod(m, int32_t *) = 1; 455 m->m_len = sizeof(int32_t); 456 sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m); 457 } 458 solock(so); 459 so->so_rcv.sb_flags &= ~SB_NOINTR; 460 so->so_rcv.sb_timeo = 0; 461 so->so_snd.sb_flags &= ~SB_NOINTR; 462 so->so_snd.sb_timeo = 0; 463 sounlock(so); 464 if (tslp) { 465 slp = tslp; 466 } else { 467 slp = nfsrv_sockalloc(); 468 } 469 slp->ns_so = so; 470 slp->ns_nam = mynam; 471 mutex_enter(&fp->f_lock); 472 fp->f_count++; 473 mutex_exit(&fp->f_lock); 474 slp->ns_fp = fp; 475 slp->ns_flags = SLP_VALID; 476 slp->ns_aflags = SLP_A_NEEDQ; 477 slp->ns_gflags = 0; 478 slp->ns_sflags = 0; 479 solock(so); 480 so->so_upcallarg = (void *)slp; 481 so->so_upcall = nfsrv_soupcall; 482 so->so_rcv.sb_flags |= SB_UPCALL; 483 sounlock(so); 484 nfsrv_wakenfsd(slp); 485 return (0); 486 } 487 488 /* 489 * Called by nfssvc() for nfsds. Just loops around servicing rpc requests 490 * until it is killed by a signal. 491 */ 492 int 493 nfssvc_nfsd(nsd, argp, l) 494 struct nfsd_srvargs *nsd; 495 void *argp; 496 struct lwp *l; 497 { 498 struct timeval tv; 499 struct mbuf *m; 500 struct nfssvc_sock *slp; 501 struct nfsd *nfsd = nsd->nsd_nfsd; 502 struct nfsrv_descript *nd = NULL; 503 struct mbuf *mreq; 504 u_quad_t cur_usec; 505 int error = 0, cacherep, siz, sotype, writes_todo; 506 struct proc *p = l->l_proc; 507 int s; 508 bool doreinit; 509 510 #ifndef nolint 511 cacherep = RC_DOIT; 512 writes_todo = 0; 513 #endif 514 uvm_lwp_hold(l); 515 if (nfsd == NULL) { 516 nsd->nsd_nfsd = nfsd = kmem_alloc(sizeof(*nfsd), KM_SLEEP); 517 memset(nfsd, 0, sizeof (struct nfsd)); 518 cv_init(&nfsd->nfsd_cv, "nfsd"); 519 nfsd->nfsd_procp = p; 520 mutex_enter(&nfsd_lock); 521 while ((nfssvc_sockhead_flag & SLP_INIT) != 0) { 522 KASSERT(nfs_numnfsd == 0); 523 cv_wait(&nfsd_initcv, &nfsd_lock); 524 } 525 TAILQ_INSERT_TAIL(&nfsd_head, nfsd, nfsd_chain); 526 nfs_numnfsd++; 527 mutex_exit(&nfsd_lock); 528 } 529 /* 530 * Loop getting rpc requests until SIGKILL. 531 */ 532 for (;;) { 533 bool dummy; 534 535 if ((curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 536 != 0) { 537 preempt(); 538 } 539 if (nfsd->nfsd_slp == NULL) { 540 mutex_enter(&nfsd_lock); 541 while (nfsd->nfsd_slp == NULL && 542 (nfsd_head_flag & NFSD_CHECKSLP) == 0) { 543 SLIST_INSERT_HEAD(&nfsd_idle_head, nfsd, 544 nfsd_idle); 545 error = cv_wait_sig(&nfsd->nfsd_cv, &nfsd_lock); 546 if (error) { 547 slp = nfsd->nfsd_slp; 548 nfsd->nfsd_slp = NULL; 549 if (!slp) 550 SLIST_REMOVE(&nfsd_idle_head, 551 nfsd, nfsd, nfsd_idle); 552 mutex_exit(&nfsd_lock); 553 if (slp) { 554 nfsrv_wakenfsd(slp); 555 nfsrv_slpderef(slp); 556 } 557 goto done; 558 } 559 } 560 if (nfsd->nfsd_slp == NULL && 561 (nfsd_head_flag & NFSD_CHECKSLP) != 0) { 562 slp = TAILQ_FIRST(&nfssvc_sockpending); 563 if (slp) { 564 KASSERT((slp->ns_gflags & SLP_G_DOREC) 565 != 0); 566 TAILQ_REMOVE(&nfssvc_sockpending, slp, 567 ns_pending); 568 slp->ns_gflags &= ~SLP_G_DOREC; 569 slp->ns_sref++; 570 nfsd->nfsd_slp = slp; 571 } else 572 nfsd_head_flag &= ~NFSD_CHECKSLP; 573 } 574 KASSERT(nfsd->nfsd_slp == NULL || 575 nfsd->nfsd_slp->ns_sref > 0); 576 mutex_exit(&nfsd_lock); 577 if ((slp = nfsd->nfsd_slp) == NULL) 578 continue; 579 if (slp->ns_flags & SLP_VALID) { 580 bool more; 581 582 if (nfsdsock_testbits(slp, SLP_A_NEEDQ)) { 583 nfsrv_rcv(slp); 584 } 585 if (nfsdsock_testbits(slp, SLP_A_DISCONN)) { 586 nfsrv_zapsock(slp); 587 } 588 error = nfsrv_dorec(slp, nfsd, &nd, &more); 589 getmicrotime(&tv); 590 cur_usec = (u_quad_t)tv.tv_sec * 1000000 + 591 (u_quad_t)tv.tv_usec; 592 writes_todo = 0; 593 if (error) { 594 struct nfsrv_descript *nd2; 595 596 mutex_enter(&nfsd_lock); 597 nd2 = LIST_FIRST(&slp->ns_tq); 598 if (nd2 != NULL && 599 nd2->nd_time <= cur_usec) { 600 error = 0; 601 cacherep = RC_DOIT; 602 writes_todo = 1; 603 } 604 mutex_exit(&nfsd_lock); 605 } 606 if (error == 0 && more) { 607 nfsrv_wakenfsd(slp); 608 } 609 } 610 } else { 611 error = 0; 612 slp = nfsd->nfsd_slp; 613 } 614 KASSERT(slp != NULL); 615 KASSERT(nfsd->nfsd_slp == slp); 616 if (error || (slp->ns_flags & SLP_VALID) == 0) { 617 if (nd) { 618 nfsdreq_free(nd); 619 nd = NULL; 620 } 621 nfsd->nfsd_slp = NULL; 622 nfsrv_slpderef(slp); 623 continue; 624 } 625 sotype = slp->ns_so->so_type; 626 if (nd) { 627 getmicrotime(&nd->nd_starttime); 628 if (nd->nd_nam2) 629 nd->nd_nam = nd->nd_nam2; 630 else 631 nd->nd_nam = slp->ns_nam; 632 633 /* 634 * Check to see if authorization is needed. 635 */ 636 if (nfsd->nfsd_flag & NFSD_NEEDAUTH) { 637 nfsd->nfsd_flag &= ~NFSD_NEEDAUTH; 638 nsd->nsd_haddr = mtod(nd->nd_nam, 639 struct sockaddr_in *)->sin_addr.s_addr; 640 nsd->nsd_authlen = nfsd->nfsd_authlen; 641 nsd->nsd_verflen = nfsd->nfsd_verflen; 642 if (!copyout(nfsd->nfsd_authstr, 643 nsd->nsd_authstr, nfsd->nfsd_authlen) && 644 !copyout(nfsd->nfsd_verfstr, 645 nsd->nsd_verfstr, nfsd->nfsd_verflen) && 646 !copyout(nsd, argp, sizeof (*nsd))) { 647 uvm_lwp_rele(l); 648 return (ENEEDAUTH); 649 } 650 cacherep = RC_DROPIT; 651 } else 652 cacherep = nfsrv_getcache(nd, slp, &mreq); 653 654 if (nfsd->nfsd_flag & NFSD_AUTHFAIL) { 655 nfsd->nfsd_flag &= ~NFSD_AUTHFAIL; 656 nd->nd_procnum = NFSPROC_NOOP; 657 nd->nd_repstat = 658 (NFSERR_AUTHERR | AUTH_TOOWEAK); 659 cacherep = RC_DOIT; 660 } 661 } 662 663 /* 664 * Loop to get all the write rpc relies that have been 665 * gathered together. 666 */ 667 do { 668 switch (cacherep) { 669 case RC_DOIT: 670 mreq = NULL; 671 netexport_rdlock(); 672 if (writes_todo || nd == NULL || 673 (!(nd->nd_flag & ND_NFSV3) && 674 nd->nd_procnum == NFSPROC_WRITE && 675 nfsrvw_procrastinate > 0)) 676 error = nfsrv_writegather(&nd, slp, 677 l, &mreq); 678 else 679 error = 680 (*(nfsrv3_procs[nd->nd_procnum])) 681 (nd, slp, l, &mreq); 682 netexport_rdunlock(); 683 if (mreq == NULL) { 684 if (nd != NULL) { 685 if (nd->nd_nam2) 686 m_free(nd->nd_nam2); 687 if (nd->nd_mrep) 688 m_freem(nd->nd_mrep); 689 } 690 break; 691 } 692 if (error) { 693 nfsstats.srv_errs++; 694 nfsrv_updatecache(nd, false, mreq); 695 if (nd->nd_nam2) 696 m_freem(nd->nd_nam2); 697 break; 698 } 699 nfsstats.srvrpccnt[nd->nd_procnum]++; 700 nfsrv_updatecache(nd, true, mreq); 701 nd->nd_mrep = (struct mbuf *)0; 702 case RC_REPLY: 703 m = mreq; 704 siz = 0; 705 while (m) { 706 siz += m->m_len; 707 m = m->m_next; 708 } 709 if (siz <= 0 || siz > NFS_MAXPACKET) { 710 printf("mbuf siz=%d\n",siz); 711 panic("Bad nfs svc reply"); 712 } 713 m = mreq; 714 m->m_pkthdr.len = siz; 715 m->m_pkthdr.rcvif = (struct ifnet *)0; 716 /* 717 * For stream protocols, prepend a Sun RPC 718 * Record Mark. 719 */ 720 if (sotype == SOCK_STREAM) { 721 M_PREPEND(m, NFSX_UNSIGNED, M_WAIT); 722 *mtod(m, u_int32_t *) = 723 htonl(0x80000000 | siz); 724 } 725 nd->nd_mreq = m; 726 if (nfsrtton) { 727 nfsd_rt(slp->ns_so->so_type, nd, 728 cacherep); 729 } 730 error = nfsdsock_sendreply(slp, nd); 731 nd = NULL; 732 if (error == EPIPE) 733 nfsrv_zapsock(slp); 734 if (error == EINTR || error == ERESTART) { 735 nfsd->nfsd_slp = NULL; 736 nfsrv_slpderef(slp); 737 goto done; 738 } 739 break; 740 case RC_DROPIT: 741 if (nfsrtton) 742 nfsd_rt(sotype, nd, cacherep); 743 m_freem(nd->nd_mrep); 744 m_freem(nd->nd_nam2); 745 break; 746 } 747 if (nd) { 748 nfsdreq_free(nd); 749 nd = NULL; 750 } 751 752 /* 753 * Check to see if there are outstanding writes that 754 * need to be serviced. 755 */ 756 getmicrotime(&tv); 757 cur_usec = (u_quad_t)tv.tv_sec * 1000000 + 758 (u_quad_t)tv.tv_usec; 759 s = splsoftclock(); 760 if (LIST_FIRST(&slp->ns_tq) && 761 LIST_FIRST(&slp->ns_tq)->nd_time <= cur_usec) { 762 cacherep = RC_DOIT; 763 writes_todo = 1; 764 } else 765 writes_todo = 0; 766 splx(s); 767 } while (writes_todo); 768 if (nfsrv_dorec(slp, nfsd, &nd, &dummy)) { 769 nfsd->nfsd_slp = NULL; 770 nfsrv_slpderef(slp); 771 } 772 } 773 done: 774 mutex_enter(&nfsd_lock); 775 TAILQ_REMOVE(&nfsd_head, nfsd, nfsd_chain); 776 doreinit = --nfs_numnfsd == 0; 777 if (doreinit) 778 nfssvc_sockhead_flag |= SLP_INIT; 779 mutex_exit(&nfsd_lock); 780 cv_destroy(&nfsd->nfsd_cv); 781 kmem_free(nfsd, sizeof(*nfsd)); 782 nsd->nsd_nfsd = NULL; 783 if (doreinit) 784 nfsrv_init(true); /* Reinitialize everything */ 785 uvm_lwp_rele(l); 786 return (error); 787 } 788 789 /* 790 * Shut down a socket associated with an nfssvc_sock structure. 791 * Should be called with the send lock set, if required. 792 * The trick here is to increment the sref at the start, so that the nfsds 793 * will stop using it and clear ns_flag at the end so that it will not be 794 * reassigned during cleanup. 795 * 796 * called at splsoftnet. 797 */ 798 void 799 nfsrv_zapsock(slp) 800 struct nfssvc_sock *slp; 801 { 802 struct nfsuid *nuidp, *nnuidp; 803 struct nfsrv_descript *nwp; 804 struct socket *so; 805 struct mbuf *m; 806 807 if (nfsdsock_drain(slp)) { 808 return; 809 } 810 mutex_enter(&nfsd_lock); 811 if (slp->ns_gflags & SLP_G_DOREC) { 812 TAILQ_REMOVE(&nfssvc_sockpending, slp, ns_pending); 813 slp->ns_gflags &= ~SLP_G_DOREC; 814 } 815 mutex_exit(&nfsd_lock); 816 817 so = slp->ns_so; 818 KASSERT(so != NULL); 819 solock(so); 820 so->so_upcall = NULL; 821 so->so_upcallarg = NULL; 822 so->so_rcv.sb_flags &= ~SB_UPCALL; 823 soshutdown(so, SHUT_RDWR); 824 sounlock(so); 825 826 if (slp->ns_nam) 827 m_free(slp->ns_nam); 828 m_freem(slp->ns_raw); 829 m = slp->ns_rec; 830 while (m != NULL) { 831 struct mbuf *n; 832 833 n = m->m_nextpkt; 834 m_freem(m); 835 m = n; 836 } 837 for (nuidp = TAILQ_FIRST(&slp->ns_uidlruhead); nuidp != 0; 838 nuidp = nnuidp) { 839 nnuidp = TAILQ_NEXT(nuidp, nu_lru); 840 LIST_REMOVE(nuidp, nu_hash); 841 TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp, nu_lru); 842 if (nuidp->nu_flag & NU_NAM) 843 m_freem(nuidp->nu_nam); 844 kmem_free(nuidp, sizeof(*nuidp)); 845 } 846 mutex_enter(&nfsd_lock); 847 while ((nwp = LIST_FIRST(&slp->ns_tq)) != NULL) { 848 LIST_REMOVE(nwp, nd_tq); 849 mutex_exit(&nfsd_lock); 850 nfsdreq_free(nwp); 851 mutex_enter(&nfsd_lock); 852 } 853 mutex_exit(&nfsd_lock); 854 } 855 856 /* 857 * Derefence a server socket structure. If it has no more references and 858 * is no longer valid, you can throw it away. 859 */ 860 void 861 nfsrv_slpderef(slp) 862 struct nfssvc_sock *slp; 863 { 864 uint32_t ref; 865 866 mutex_enter(&nfsd_lock); 867 KASSERT(slp->ns_sref > 0); 868 ref = --slp->ns_sref; 869 mutex_exit(&nfsd_lock); 870 if (ref == 0 && (slp->ns_flags & SLP_VALID) == 0) { 871 file_t *fp; 872 873 mutex_enter(&nfsd_lock); 874 KASSERT((slp->ns_gflags & SLP_G_DOREC) == 0); 875 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain); 876 mutex_exit(&nfsd_lock); 877 878 fp = slp->ns_fp; 879 if (fp != NULL) { 880 slp->ns_fp = NULL; 881 KASSERT(fp != NULL); 882 KASSERT(fp->f_data == slp->ns_so); 883 KASSERT(fp->f_count > 0); 884 closef(fp); 885 slp->ns_so = NULL; 886 } 887 888 nfsrv_sockfree(slp); 889 } 890 } 891 892 /* 893 * Initialize the data structures for the server. 894 * Handshake with any new nfsds starting up to avoid any chance of 895 * corruption. 896 */ 897 void 898 nfsrv_init(terminating) 899 int terminating; 900 { 901 struct nfssvc_sock *slp; 902 903 if (!terminating) { 904 mutex_init(&nfsd_lock, MUTEX_DRIVER, IPL_SOFTNET); 905 cv_init(&nfsd_initcv, "nfsdinit"); 906 } 907 908 mutex_enter(&nfsd_lock); 909 if (!terminating && (nfssvc_sockhead_flag & SLP_INIT) != 0) 910 panic("nfsd init"); 911 nfssvc_sockhead_flag |= SLP_INIT; 912 913 if (terminating) { 914 KASSERT(SLIST_EMPTY(&nfsd_idle_head)); 915 KASSERT(TAILQ_EMPTY(&nfsd_head)); 916 while ((slp = TAILQ_FIRST(&nfssvc_sockhead)) != NULL) { 917 mutex_exit(&nfsd_lock); 918 KASSERT(slp->ns_sref == 0); 919 slp->ns_sref++; 920 nfsrv_zapsock(slp); 921 nfsrv_slpderef(slp); 922 mutex_enter(&nfsd_lock); 923 } 924 KASSERT(TAILQ_EMPTY(&nfssvc_sockpending)); 925 mutex_exit(&nfsd_lock); 926 nfsrv_cleancache(); /* And clear out server cache */ 927 } else { 928 mutex_exit(&nfsd_lock); 929 nfs_pub.np_valid = 0; 930 } 931 932 TAILQ_INIT(&nfssvc_sockhead); 933 TAILQ_INIT(&nfssvc_sockpending); 934 935 TAILQ_INIT(&nfsd_head); 936 SLIST_INIT(&nfsd_idle_head); 937 nfsd_head_flag &= ~NFSD_CHECKSLP; 938 939 nfs_udpsock = nfsrv_sockalloc(); 940 941 #ifdef INET6 942 nfs_udp6sock = nfsrv_sockalloc(); 943 #endif 944 945 #ifdef ISO 946 nfs_cltpsock = nfsrv_sockalloc(); 947 #endif 948 949 mutex_enter(&nfsd_lock); 950 nfssvc_sockhead_flag &= ~SLP_INIT; 951 cv_broadcast(&nfsd_initcv); 952 mutex_exit(&nfsd_lock); 953 } 954 955 /* 956 * Add entries to the server monitor log. 957 */ 958 static void 959 nfsd_rt(sotype, nd, cacherep) 960 int sotype; 961 struct nfsrv_descript *nd; 962 int cacherep; 963 { 964 struct timeval tv; 965 struct drt *rt; 966 967 rt = &nfsdrt.drt[nfsdrt.pos]; 968 if (cacherep == RC_DOIT) 969 rt->flag = 0; 970 else if (cacherep == RC_REPLY) 971 rt->flag = DRT_CACHEREPLY; 972 else 973 rt->flag = DRT_CACHEDROP; 974 if (sotype == SOCK_STREAM) 975 rt->flag |= DRT_TCP; 976 if (nd->nd_flag & ND_NFSV3) 977 rt->flag |= DRT_NFSV3; 978 rt->proc = nd->nd_procnum; 979 if (mtod(nd->nd_nam, struct sockaddr *)->sa_family == AF_INET) 980 rt->ipadr = mtod(nd->nd_nam, struct sockaddr_in *)->sin_addr.s_addr; 981 else 982 rt->ipadr = INADDR_ANY; 983 getmicrotime(&tv); 984 rt->resptime = ((tv.tv_sec - nd->nd_starttime.tv_sec) * 1000000) + 985 (tv.tv_usec - nd->nd_starttime.tv_usec); 986 rt->tstamp = tv; 987 nfsdrt.pos = (nfsdrt.pos + 1) % NFSRTTLOGSIZ; 988 } 989 #endif /* NFSSERVER */ 990 991 #ifdef NFS 992 993 int nfs_defect = 0; 994 /* 995 * Asynchronous I/O threads for client nfs. 996 * They do read-ahead and write-behind operations on the block I/O cache. 997 * Never returns unless it fails or gets killed. 998 */ 999 1000 static void 1001 nfssvc_iod(void *arg) 1002 { 1003 struct buf *bp; 1004 struct nfs_iod *myiod; 1005 struct nfsmount *nmp; 1006 1007 myiod = kmem_alloc(sizeof(*myiod), KM_SLEEP); 1008 mutex_init(&myiod->nid_lock, MUTEX_DEFAULT, IPL_NONE); 1009 cv_init(&myiod->nid_cv, "nfsiod"); 1010 myiod->nid_exiting = false; 1011 myiod->nid_mount = NULL; 1012 mutex_enter(&nfs_iodlist_lock); 1013 LIST_INSERT_HEAD(&nfs_iodlist_all, myiod, nid_all); 1014 mutex_exit(&nfs_iodlist_lock); 1015 1016 for (;;) { 1017 mutex_enter(&nfs_iodlist_lock); 1018 LIST_INSERT_HEAD(&nfs_iodlist_idle, myiod, nid_idle); 1019 mutex_exit(&nfs_iodlist_lock); 1020 1021 mutex_enter(&myiod->nid_lock); 1022 while (/*CONSTCOND*/ true) { 1023 nmp = myiod->nid_mount; 1024 if (nmp) { 1025 myiod->nid_mount = NULL; 1026 break; 1027 } 1028 if (__predict_false(myiod->nid_exiting)) { 1029 /* 1030 * drop nid_lock to preserve locking order. 1031 */ 1032 mutex_exit(&myiod->nid_lock); 1033 mutex_enter(&nfs_iodlist_lock); 1034 mutex_enter(&myiod->nid_lock); 1035 /* 1036 * recheck nid_mount because nfs_asyncio can 1037 * pick us in the meantime as we are still on 1038 * nfs_iodlist_lock. 1039 */ 1040 if (myiod->nid_mount != NULL) { 1041 mutex_exit(&nfs_iodlist_lock); 1042 continue; 1043 } 1044 LIST_REMOVE(myiod, nid_idle); 1045 mutex_exit(&nfs_iodlist_lock); 1046 goto quit; 1047 } 1048 cv_wait(&myiod->nid_cv, &myiod->nid_lock); 1049 } 1050 mutex_exit(&myiod->nid_lock); 1051 1052 mutex_enter(&nmp->nm_lock); 1053 while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) { 1054 /* Take one off the front of the list */ 1055 TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist); 1056 nmp->nm_bufqlen--; 1057 if (nmp->nm_bufqlen < 2 * nmp->nm_bufqiods) { 1058 cv_broadcast(&nmp->nm_aiocv); 1059 } 1060 mutex_exit(&nmp->nm_lock); 1061 KERNEL_LOCK(1, curlwp); 1062 (void)nfs_doio(bp); 1063 KERNEL_UNLOCK_LAST(curlwp); 1064 mutex_enter(&nmp->nm_lock); 1065 /* 1066 * If there are more than one iod on this mount, 1067 * then defect so that the iods can be shared out 1068 * fairly between the mounts 1069 */ 1070 if (nfs_defect && nmp->nm_bufqiods > 1) { 1071 break; 1072 } 1073 } 1074 KASSERT(nmp->nm_bufqiods > 0); 1075 nmp->nm_bufqiods--; 1076 mutex_exit(&nmp->nm_lock); 1077 } 1078 quit: 1079 KASSERT(myiod->nid_mount == NULL); 1080 mutex_exit(&myiod->nid_lock); 1081 1082 cv_destroy(&myiod->nid_cv); 1083 mutex_destroy(&myiod->nid_lock); 1084 kmem_free(myiod, sizeof(*myiod)); 1085 1086 kthread_exit(0); 1087 } 1088 1089 void 1090 nfs_iodinit() 1091 { 1092 1093 mutex_init(&nfs_iodlist_lock, MUTEX_DEFAULT, IPL_NONE); 1094 LIST_INIT(&nfs_iodlist_all); 1095 LIST_INIT(&nfs_iodlist_idle); 1096 } 1097 1098 int 1099 nfs_set_niothreads(int newval) 1100 { 1101 struct nfs_iod *nid; 1102 int error = 0; 1103 1104 #if defined(MULTIPROCESSOR) 1105 int hold_count; 1106 #endif /* defined(MULTIPROCESSOR) */ 1107 1108 KERNEL_UNLOCK_ALL(curlwp, &hold_count); 1109 1110 mutex_enter(&nfs_iodlist_lock); 1111 /* clamp to sane range */ 1112 nfs_niothreads = max(0, min(newval, NFS_MAXASYNCDAEMON)); 1113 1114 while (nfs_numasync != nfs_niothreads && error == 0) { 1115 while (nfs_numasync < nfs_niothreads) { 1116 1117 /* 1118 * kthread_create can wait for pagedaemon and 1119 * pagedaemon can wait for nfsiod which needs to acquire 1120 * nfs_iodlist_lock. 1121 */ 1122 1123 mutex_exit(&nfs_iodlist_lock); 1124 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 1125 nfssvc_iod, NULL, NULL, "nfsio"); 1126 mutex_enter(&nfs_iodlist_lock); 1127 if (error) { 1128 /* give up */ 1129 nfs_niothreads = nfs_numasync; 1130 break; 1131 } 1132 nfs_numasync++; 1133 } 1134 while (nfs_numasync > nfs_niothreads) { 1135 nid = LIST_FIRST(&nfs_iodlist_all); 1136 if (nid == NULL) { 1137 /* iod has not started yet. */ 1138 kpause("nfsiorm", false, hz, &nfs_iodlist_lock); 1139 continue; 1140 } 1141 LIST_REMOVE(nid, nid_all); 1142 mutex_enter(&nid->nid_lock); 1143 KASSERT(!nid->nid_exiting); 1144 nid->nid_exiting = true; 1145 cv_signal(&nid->nid_cv); 1146 mutex_exit(&nid->nid_lock); 1147 nfs_numasync--; 1148 } 1149 } 1150 mutex_exit(&nfs_iodlist_lock); 1151 1152 KERNEL_LOCK(hold_count, curlwp); 1153 return error; 1154 } 1155 1156 /* 1157 * Get an authorization string for the uid by having the mount_nfs sitting 1158 * on this mount point porpous out of the kernel and do it. 1159 */ 1160 int 1161 nfs_getauth(nmp, rep, cred, auth_str, auth_len, verf_str, verf_len, key) 1162 struct nfsmount *nmp; 1163 struct nfsreq *rep; 1164 kauth_cred_t cred; 1165 char **auth_str; 1166 int *auth_len; 1167 char *verf_str; 1168 int *verf_len; 1169 NFSKERBKEY_T key; /* return session key */ 1170 { 1171 int error = 0; 1172 1173 while ((nmp->nm_iflag & NFSMNT_WAITAUTH) == 0) { 1174 nmp->nm_iflag |= NFSMNT_WANTAUTH; 1175 (void) tsleep((void *)&nmp->nm_authtype, PSOCK, 1176 "nfsauth1", 2 * hz); 1177 error = nfs_sigintr(nmp, rep, rep->r_lwp); 1178 if (error) { 1179 nmp->nm_iflag &= ~NFSMNT_WANTAUTH; 1180 return (error); 1181 } 1182 } 1183 nmp->nm_iflag &= ~(NFSMNT_WAITAUTH | NFSMNT_WANTAUTH); 1184 nmp->nm_authstr = *auth_str = (char *)malloc(RPCAUTH_MAXSIZ, M_TEMP, M_WAITOK); 1185 nmp->nm_authlen = RPCAUTH_MAXSIZ; 1186 nmp->nm_verfstr = verf_str; 1187 nmp->nm_verflen = *verf_len; 1188 nmp->nm_authuid = kauth_cred_geteuid(cred); 1189 wakeup((void *)&nmp->nm_authstr); 1190 1191 /* 1192 * And wait for mount_nfs to do its stuff. 1193 */ 1194 while ((nmp->nm_iflag & NFSMNT_HASAUTH) == 0 && error == 0) { 1195 (void) tsleep((void *)&nmp->nm_authlen, PSOCK, 1196 "nfsauth2", 2 * hz); 1197 error = nfs_sigintr(nmp, rep, rep->r_lwp); 1198 } 1199 if (nmp->nm_iflag & NFSMNT_AUTHERR) { 1200 nmp->nm_iflag &= ~NFSMNT_AUTHERR; 1201 error = EAUTH; 1202 } 1203 if (error) 1204 free((void *)*auth_str, M_TEMP); 1205 else { 1206 *auth_len = nmp->nm_authlen; 1207 *verf_len = nmp->nm_verflen; 1208 memcpy(key, nmp->nm_key, sizeof (NFSKERBKEY_T)); 1209 } 1210 nmp->nm_iflag &= ~NFSMNT_HASAUTH; 1211 nmp->nm_iflag |= NFSMNT_WAITAUTH; 1212 if (nmp->nm_iflag & NFSMNT_WANTAUTH) { 1213 nmp->nm_iflag &= ~NFSMNT_WANTAUTH; 1214 wakeup((void *)&nmp->nm_authtype); 1215 } 1216 return (error); 1217 } 1218 1219 /* 1220 * Get a nickname authenticator and verifier. 1221 */ 1222 int 1223 nfs_getnickauth(struct nfsmount *nmp, kauth_cred_t cred, char **auth_str, 1224 int *auth_len, char *verf_str, int verf_len) 1225 { 1226 struct timeval ktvin, ktvout, tv; 1227 struct nfsuid *nuidp; 1228 u_int32_t *nickp, *verfp; 1229 1230 memset(&ktvout, 0, sizeof ktvout); /* XXX gcc */ 1231 1232 #ifdef DIAGNOSTIC 1233 if (verf_len < (4 * NFSX_UNSIGNED)) 1234 panic("nfs_getnickauth verf too small"); 1235 #endif 1236 LIST_FOREACH(nuidp, NMUIDHASH(nmp, kauth_cred_geteuid(cred)), nu_hash) { 1237 if (kauth_cred_geteuid(nuidp->nu_cr) == kauth_cred_geteuid(cred)) 1238 break; 1239 } 1240 if (!nuidp || nuidp->nu_expire < time_second) 1241 return (EACCES); 1242 1243 /* 1244 * Move to the end of the lru list (end of lru == most recently used). 1245 */ 1246 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru); 1247 TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, nu_lru); 1248 1249 nickp = (u_int32_t *)malloc(2 * NFSX_UNSIGNED, M_TEMP, M_WAITOK); 1250 *nickp++ = txdr_unsigned(RPCAKN_NICKNAME); 1251 *nickp = txdr_unsigned(nuidp->nu_nickname); 1252 *auth_str = (char *)nickp; 1253 *auth_len = 2 * NFSX_UNSIGNED; 1254 1255 /* 1256 * Now we must encrypt the verifier and package it up. 1257 */ 1258 verfp = (u_int32_t *)verf_str; 1259 *verfp++ = txdr_unsigned(RPCAKN_NICKNAME); 1260 getmicrotime(&tv); 1261 if (tv.tv_sec > nuidp->nu_timestamp.tv_sec || 1262 (tv.tv_sec == nuidp->nu_timestamp.tv_sec && 1263 tv.tv_usec > nuidp->nu_timestamp.tv_usec)) 1264 nuidp->nu_timestamp = tv; 1265 else 1266 nuidp->nu_timestamp.tv_usec++; 1267 ktvin.tv_sec = txdr_unsigned(nuidp->nu_timestamp.tv_sec); 1268 ktvin.tv_usec = txdr_unsigned(nuidp->nu_timestamp.tv_usec); 1269 1270 /* 1271 * Now encrypt the timestamp verifier in ecb mode using the session 1272 * key. 1273 */ 1274 #ifdef NFSKERB 1275 XXX 1276 #endif 1277 1278 *verfp++ = ktvout.tv_sec; 1279 *verfp++ = ktvout.tv_usec; 1280 *verfp = 0; 1281 return (0); 1282 } 1283 1284 /* 1285 * Save the current nickname in a hash list entry on the mount point. 1286 */ 1287 int 1288 nfs_savenickauth(nmp, cred, len, key, mdp, dposp, mrep) 1289 struct nfsmount *nmp; 1290 kauth_cred_t cred; 1291 int len; 1292 NFSKERBKEY_T key; 1293 struct mbuf **mdp; 1294 char **dposp; 1295 struct mbuf *mrep; 1296 { 1297 struct nfsuid *nuidp; 1298 u_int32_t *tl; 1299 int32_t t1; 1300 struct mbuf *md = *mdp; 1301 struct timeval ktvin, ktvout; 1302 u_int32_t nick; 1303 char *dpos = *dposp, *cp2; 1304 int deltasec, error = 0; 1305 1306 memset(&ktvout, 0, sizeof ktvout); /* XXX gcc */ 1307 1308 if (len == (3 * NFSX_UNSIGNED)) { 1309 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1310 ktvin.tv_sec = *tl++; 1311 ktvin.tv_usec = *tl++; 1312 nick = fxdr_unsigned(u_int32_t, *tl); 1313 1314 /* 1315 * Decrypt the timestamp in ecb mode. 1316 */ 1317 #ifdef NFSKERB 1318 XXX 1319 #endif 1320 ktvout.tv_sec = fxdr_unsigned(long, ktvout.tv_sec); 1321 ktvout.tv_usec = fxdr_unsigned(long, ktvout.tv_usec); 1322 deltasec = time_second - ktvout.tv_sec; 1323 if (deltasec < 0) 1324 deltasec = -deltasec; 1325 /* 1326 * If ok, add it to the hash list for the mount point. 1327 */ 1328 if (deltasec <= NFS_KERBCLOCKSKEW) { 1329 if (nmp->nm_numuids < nuidhash_max) { 1330 nmp->nm_numuids++; 1331 nuidp = kmem_alloc(sizeof(*nuidp), KM_SLEEP); 1332 } else { 1333 nuidp = TAILQ_FIRST(&nmp->nm_uidlruhead); 1334 LIST_REMOVE(nuidp, nu_hash); 1335 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, 1336 nu_lru); 1337 } 1338 nuidp->nu_flag = 0; 1339 kauth_cred_seteuid(nuidp->nu_cr, kauth_cred_geteuid(cred)); 1340 nuidp->nu_expire = time_second + NFS_KERBTTL; 1341 nuidp->nu_timestamp = ktvout; 1342 nuidp->nu_nickname = nick; 1343 memcpy(nuidp->nu_key, key, sizeof (NFSKERBKEY_T)); 1344 TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, 1345 nu_lru); 1346 LIST_INSERT_HEAD(NMUIDHASH(nmp, kauth_cred_geteuid(cred)), 1347 nuidp, nu_hash); 1348 } 1349 } else 1350 nfsm_adv(nfsm_rndup(len)); 1351 nfsmout: 1352 *mdp = md; 1353 *dposp = dpos; 1354 return (error); 1355 } 1356 #endif /* NFS */ 1357