1 /* $NetBSD: nfs_syscalls.c,v 1.134 2008/04/24 11:38:39 ad Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Rick Macklem at The University of Guelph. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)nfs_syscalls.c 8.5 (Berkeley) 3/30/95 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: nfs_syscalls.c,v 1.134 2008/04/24 11:38:39 ad Exp $"); 39 40 #include "fs_nfs.h" 41 #include "opt_nfs.h" 42 #include "opt_nfsserver.h" 43 #include "opt_iso.h" 44 #include "opt_inet.h" 45 #include "opt_compat_netbsd.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/kernel.h> 50 #include <sys/file.h> 51 #include <sys/stat.h> 52 #include <sys/vnode.h> 53 #include <sys/mount.h> 54 #include <sys/proc.h> 55 #include <sys/uio.h> 56 #include <sys/malloc.h> 57 #include <sys/kmem.h> 58 #include <sys/buf.h> 59 #include <sys/mbuf.h> 60 #include <sys/socket.h> 61 #include <sys/socketvar.h> 62 #include <sys/signalvar.h> 63 #include <sys/domain.h> 64 #include <sys/protosw.h> 65 #include <sys/namei.h> 66 #include <sys/syslog.h> 67 #include <sys/filedesc.h> 68 #include <sys/kthread.h> 69 #include <sys/kauth.h> 70 #include <sys/syscallargs.h> 71 72 #include <netinet/in.h> 73 #include <netinet/tcp.h> 74 #ifdef ISO 75 #include <netiso/iso.h> 76 #endif 77 #include <nfs/xdr_subs.h> 78 #include <nfs/rpcv2.h> 79 #include <nfs/nfsproto.h> 80 #include <nfs/nfs.h> 81 #include <nfs/nfsm_subs.h> 82 #include <nfs/nfsrvcache.h> 83 #include <nfs/nfsmount.h> 84 #include <nfs/nfsnode.h> 85 #include <nfs/nfsrtt.h> 86 #include <nfs/nfs_var.h> 87 88 /* Global defs. */ 89 extern int32_t (*nfsrv3_procs[NFS_NPROCS]) __P((struct nfsrv_descript *, 90 struct nfssvc_sock *, 91 struct lwp *, struct mbuf **)); 92 extern int nfsrvw_procrastinate; 93 94 struct nfssvc_sock *nfs_udpsock; 95 #ifdef ISO 96 struct nfssvc_sock *nfs_cltpsock; 97 #endif 98 #ifdef INET6 99 struct nfssvc_sock *nfs_udp6sock; 100 #endif 101 int nuidhash_max = NFS_MAXUIDHASH; 102 #ifdef NFSSERVER 103 static int nfs_numnfsd = 0; 104 static struct nfsdrt nfsdrt; 105 #endif 106 107 #ifdef NFSSERVER 108 kmutex_t nfsd_lock; 109 struct nfssvc_sockhead nfssvc_sockhead; 110 kcondvar_t nfsd_initcv; 111 struct nfssvc_sockhead nfssvc_sockpending; 112 struct nfsdhead nfsd_head; 113 struct nfsdidlehead nfsd_idle_head; 114 115 int nfssvc_sockhead_flag; 116 int nfsd_head_flag; 117 #endif 118 119 #ifdef NFS 120 /* 121 * locking order: 122 * nfs_iodlist_lock -> nid_lock -> nm_lock 123 */ 124 kmutex_t nfs_iodlist_lock; 125 struct nfs_iodlist nfs_iodlist_idle; 126 struct nfs_iodlist nfs_iodlist_all; 127 int nfs_niothreads = -1; /* == "0, and has never been set" */ 128 #endif 129 130 #ifdef NFSSERVER 131 static struct nfssvc_sock *nfsrv_sockalloc __P((void)); 132 static void nfsrv_sockfree __P((struct nfssvc_sock *)); 133 static void nfsd_rt __P((int, struct nfsrv_descript *, int)); 134 #endif 135 136 /* 137 * NFS server system calls 138 */ 139 140 141 /* 142 * Nfs server pseudo system call for the nfsd's 143 * Based on the flag value it either: 144 * - adds a socket to the selection list 145 * - remains in the kernel as an nfsd 146 * - remains in the kernel as an nfsiod 147 */ 148 int 149 sys_nfssvc(struct lwp *l, const struct sys_nfssvc_args *uap, register_t *retval) 150 { 151 /* { 152 syscallarg(int) flag; 153 syscallarg(void *) argp; 154 } */ 155 int error; 156 #ifdef NFSSERVER 157 file_t *fp; 158 struct mbuf *nam; 159 struct nfsd_args nfsdarg; 160 struct nfsd_srvargs nfsd_srvargs, *nsd = &nfsd_srvargs; 161 struct nfsd *nfsd; 162 struct nfssvc_sock *slp; 163 struct nfsuid *nuidp; 164 #endif 165 166 /* 167 * Must be super user 168 */ 169 error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_NFS, 170 KAUTH_REQ_NETWORK_NFS_SVC, NULL, NULL, NULL); 171 if (error) 172 return (error); 173 174 /* Initialize NFS server / client shared data. */ 175 nfs_init(); 176 177 #ifdef NFSSERVER 178 mutex_enter(&nfsd_lock); 179 while (nfssvc_sockhead_flag & SLP_INIT) { 180 cv_wait(&nfsd_initcv, &nfsd_lock); 181 } 182 mutex_exit(&nfsd_lock); 183 #endif 184 if (SCARG(uap, flag) & NFSSVC_BIOD) { 185 #if defined(NFS) && defined(COMPAT_14) 186 error = kpause("nfsbiod", true, 0, NULL); /* dummy impl */ 187 #else 188 error = ENOSYS; 189 #endif 190 } else if (SCARG(uap, flag) & NFSSVC_MNTD) { 191 error = ENOSYS; 192 } else if (SCARG(uap, flag) & NFSSVC_ADDSOCK) { 193 #ifndef NFSSERVER 194 error = ENOSYS; 195 #else 196 error = copyin(SCARG(uap, argp), (void *)&nfsdarg, 197 sizeof(nfsdarg)); 198 if (error) 199 return (error); 200 /* getsock() will use the descriptor for us */ 201 error = getsock(nfsdarg.sock, &fp); 202 if (error) 203 return (error); 204 /* 205 * Get the client address for connected sockets. 206 */ 207 if (nfsdarg.name == NULL || nfsdarg.namelen == 0) 208 nam = (struct mbuf *)0; 209 else { 210 error = sockargs(&nam, nfsdarg.name, nfsdarg.namelen, 211 MT_SONAME); 212 if (error) { 213 fd_putfile(nfsdarg.sock); 214 return (error); 215 } 216 } 217 error = nfssvc_addsock(fp, nam); 218 fd_putfile(nfsdarg.sock); 219 #endif /* !NFSSERVER */ 220 } else if (SCARG(uap, flag) & NFSSVC_SETEXPORTSLIST) { 221 #ifndef NFSSERVER 222 error = ENOSYS; 223 #else 224 struct export_args *args; 225 struct mountd_exports_list mel; 226 227 error = copyin(SCARG(uap, argp), &mel, sizeof(mel)); 228 if (error != 0) 229 return error; 230 231 args = (struct export_args *)malloc(mel.mel_nexports * 232 sizeof(struct export_args), M_TEMP, M_WAITOK); 233 error = copyin(mel.mel_exports, args, mel.mel_nexports * 234 sizeof(struct export_args)); 235 if (error != 0) { 236 free(args, M_TEMP); 237 return error; 238 } 239 mel.mel_exports = args; 240 241 error = mountd_set_exports_list(&mel, l); 242 243 free(args, M_TEMP); 244 #endif /* !NFSSERVER */ 245 } else { 246 #ifndef NFSSERVER 247 error = ENOSYS; 248 #else 249 error = copyin(SCARG(uap, argp), (void *)nsd, sizeof (*nsd)); 250 if (error) 251 return (error); 252 if ((SCARG(uap, flag) & NFSSVC_AUTHIN) && 253 ((nfsd = nsd->nsd_nfsd)) != NULL && 254 (nfsd->nfsd_slp->ns_flags & SLP_VALID)) { 255 slp = nfsd->nfsd_slp; 256 257 /* 258 * First check to see if another nfsd has already 259 * added this credential. 260 */ 261 LIST_FOREACH(nuidp, NUIDHASH(slp, nsd->nsd_cr.cr_uid), 262 nu_hash) { 263 if (kauth_cred_geteuid(nuidp->nu_cr) == 264 nsd->nsd_cr.cr_uid && 265 (!nfsd->nfsd_nd->nd_nam2 || 266 netaddr_match(NU_NETFAM(nuidp), 267 &nuidp->nu_haddr, nfsd->nfsd_nd->nd_nam2))) 268 break; 269 } 270 if (nuidp) { 271 kauth_cred_hold(nuidp->nu_cr); 272 nfsd->nfsd_nd->nd_cr = nuidp->nu_cr; 273 nfsd->nfsd_nd->nd_flag |= ND_KERBFULL; 274 } else { 275 /* 276 * Nope, so we will. 277 */ 278 if (slp->ns_numuids < nuidhash_max) { 279 slp->ns_numuids++; 280 nuidp = kmem_alloc(sizeof(*nuidp), KM_SLEEP); 281 } else 282 nuidp = (struct nfsuid *)0; 283 if ((slp->ns_flags & SLP_VALID) == 0) { 284 if (nuidp) 285 kmem_free(nuidp, sizeof(*nuidp)); 286 } else { 287 if (nuidp == (struct nfsuid *)0) { 288 nuidp = TAILQ_FIRST(&slp->ns_uidlruhead); 289 LIST_REMOVE(nuidp, nu_hash); 290 TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp, 291 nu_lru); 292 if (nuidp->nu_flag & NU_NAM) 293 m_freem(nuidp->nu_nam); 294 } 295 nuidp->nu_flag = 0; 296 kauth_uucred_to_cred(nuidp->nu_cr, 297 &nsd->nsd_cr); 298 nuidp->nu_timestamp = nsd->nsd_timestamp; 299 nuidp->nu_expire = time_second + nsd->nsd_ttl; 300 /* 301 * and save the session key in nu_key. 302 */ 303 memcpy(nuidp->nu_key, nsd->nsd_key, 304 sizeof(nsd->nsd_key)); 305 if (nfsd->nfsd_nd->nd_nam2) { 306 struct sockaddr_in *saddr; 307 308 saddr = mtod(nfsd->nfsd_nd->nd_nam2, 309 struct sockaddr_in *); 310 switch (saddr->sin_family) { 311 case AF_INET: 312 nuidp->nu_flag |= NU_INETADDR; 313 nuidp->nu_inetaddr = 314 saddr->sin_addr.s_addr; 315 break; 316 case AF_ISO: 317 default: 318 nuidp->nu_flag |= NU_NAM; 319 nuidp->nu_nam = m_copym( 320 nfsd->nfsd_nd->nd_nam2, 0, 321 M_COPYALL, M_WAIT); 322 break; 323 }; 324 } 325 TAILQ_INSERT_TAIL(&slp->ns_uidlruhead, nuidp, 326 nu_lru); 327 LIST_INSERT_HEAD(NUIDHASH(slp, nsd->nsd_uid), 328 nuidp, nu_hash); 329 kauth_cred_hold(nuidp->nu_cr); 330 nfsd->nfsd_nd->nd_cr = nuidp->nu_cr; 331 nfsd->nfsd_nd->nd_flag |= ND_KERBFULL; 332 } 333 } 334 } 335 if ((SCARG(uap, flag) & NFSSVC_AUTHINFAIL) && 336 (nfsd = nsd->nsd_nfsd)) 337 nfsd->nfsd_flag |= NFSD_AUTHFAIL; 338 error = nfssvc_nfsd(nsd, SCARG(uap, argp), l); 339 #endif /* !NFSSERVER */ 340 } 341 if (error == EINTR || error == ERESTART) 342 error = 0; 343 return (error); 344 } 345 346 #ifdef NFSSERVER 347 MALLOC_DEFINE(M_NFSD, "NFS daemon", "Nfs server daemon structure"); 348 349 static struct nfssvc_sock * 350 nfsrv_sockalloc() 351 { 352 struct nfssvc_sock *slp; 353 354 slp = kmem_alloc(sizeof(*slp), KM_SLEEP); 355 memset(slp, 0, sizeof (struct nfssvc_sock)); 356 /* XXX could be IPL_SOFTNET */ 357 mutex_init(&slp->ns_lock, MUTEX_DRIVER, IPL_VM); 358 mutex_init(&slp->ns_alock, MUTEX_DRIVER, IPL_VM); 359 cv_init(&slp->ns_cv, "nfsdsock"); 360 TAILQ_INIT(&slp->ns_uidlruhead); 361 LIST_INIT(&slp->ns_tq); 362 SIMPLEQ_INIT(&slp->ns_sendq); 363 mutex_enter(&nfsd_lock); 364 TAILQ_INSERT_TAIL(&nfssvc_sockhead, slp, ns_chain); 365 mutex_exit(&nfsd_lock); 366 367 return slp; 368 } 369 370 static void 371 nfsrv_sockfree(struct nfssvc_sock *slp) 372 { 373 374 KASSERT(slp->ns_so == NULL); 375 KASSERT(slp->ns_fp == NULL); 376 KASSERT((slp->ns_flags & SLP_VALID) == 0); 377 mutex_destroy(&slp->ns_lock); 378 mutex_destroy(&slp->ns_alock); 379 cv_destroy(&slp->ns_cv); 380 kmem_free(slp, sizeof(*slp)); 381 } 382 383 /* 384 * Adds a socket to the list for servicing by nfsds. 385 */ 386 int 387 nfssvc_addsock(fp, mynam) 388 file_t *fp; 389 struct mbuf *mynam; 390 { 391 struct mbuf *m; 392 int siz; 393 struct nfssvc_sock *slp; 394 struct socket *so; 395 struct nfssvc_sock *tslp; 396 int error; 397 398 so = (struct socket *)fp->f_data; 399 tslp = (struct nfssvc_sock *)0; 400 /* 401 * Add it to the list, as required. 402 */ 403 if (so->so_proto->pr_protocol == IPPROTO_UDP) { 404 #ifdef INET6 405 if (so->so_proto->pr_domain->dom_family == AF_INET6) 406 tslp = nfs_udp6sock; 407 else 408 #endif 409 tslp = nfs_udpsock; 410 if (tslp->ns_flags & SLP_VALID) { 411 m_freem(mynam); 412 return (EPERM); 413 } 414 #ifdef ISO 415 } else if (so->so_proto->pr_protocol == ISOPROTO_CLTP) { 416 tslp = nfs_cltpsock; 417 if (tslp->ns_flags & SLP_VALID) { 418 m_freem(mynam); 419 return (EPERM); 420 } 421 #endif /* ISO */ 422 } 423 if (so->so_type == SOCK_STREAM) 424 siz = NFS_MAXPACKET + sizeof (u_long); 425 else 426 siz = NFS_MAXPACKET; 427 solock(so); 428 error = soreserve(so, siz, siz); 429 sounlock(so); 430 if (error) { 431 m_freem(mynam); 432 return (error); 433 } 434 435 /* 436 * Set protocol specific options { for now TCP only } and 437 * reserve some space. For datagram sockets, this can get called 438 * repeatedly for the same socket, but that isn't harmful. 439 */ 440 if (so->so_type == SOCK_STREAM) { 441 m = m_get(M_WAIT, MT_SOOPTS); 442 MCLAIM(m, &nfs_mowner); 443 *mtod(m, int32_t *) = 1; 444 m->m_len = sizeof(int32_t); 445 sosetopt(so, SOL_SOCKET, SO_KEEPALIVE, m); 446 } 447 if ((so->so_proto->pr_domain->dom_family == AF_INET 448 #ifdef INET6 449 || so->so_proto->pr_domain->dom_family == AF_INET6 450 #endif 451 ) && 452 so->so_proto->pr_protocol == IPPROTO_TCP) { 453 m = m_get(M_WAIT, MT_SOOPTS); 454 MCLAIM(m, &nfs_mowner); 455 *mtod(m, int32_t *) = 1; 456 m->m_len = sizeof(int32_t); 457 sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m); 458 } 459 solock(so); 460 so->so_rcv.sb_flags &= ~SB_NOINTR; 461 so->so_rcv.sb_timeo = 0; 462 so->so_snd.sb_flags &= ~SB_NOINTR; 463 so->so_snd.sb_timeo = 0; 464 sounlock(so); 465 if (tslp) { 466 slp = tslp; 467 } else { 468 slp = nfsrv_sockalloc(); 469 } 470 slp->ns_so = so; 471 slp->ns_nam = mynam; 472 mutex_enter(&fp->f_lock); 473 fp->f_count++; 474 mutex_exit(&fp->f_lock); 475 slp->ns_fp = fp; 476 slp->ns_flags = SLP_VALID; 477 slp->ns_aflags = SLP_A_NEEDQ; 478 slp->ns_gflags = 0; 479 slp->ns_sflags = 0; 480 solock(so); 481 so->so_upcallarg = (void *)slp; 482 so->so_upcall = nfsrv_soupcall; 483 so->so_rcv.sb_flags |= SB_UPCALL; 484 sounlock(so); 485 nfsrv_wakenfsd(slp); 486 return (0); 487 } 488 489 /* 490 * Called by nfssvc() for nfsds. Just loops around servicing rpc requests 491 * until it is killed by a signal. 492 */ 493 int 494 nfssvc_nfsd(nsd, argp, l) 495 struct nfsd_srvargs *nsd; 496 void *argp; 497 struct lwp *l; 498 { 499 struct timeval tv; 500 struct mbuf *m; 501 struct nfssvc_sock *slp; 502 struct nfsd *nfsd = nsd->nsd_nfsd; 503 struct nfsrv_descript *nd = NULL; 504 struct mbuf *mreq; 505 u_quad_t cur_usec; 506 int error = 0, cacherep, siz, sotype, writes_todo; 507 struct proc *p = l->l_proc; 508 int s; 509 bool doreinit; 510 511 #ifndef nolint 512 cacherep = RC_DOIT; 513 writes_todo = 0; 514 #endif 515 uvm_lwp_hold(l); 516 if (nfsd == NULL) { 517 nsd->nsd_nfsd = nfsd = kmem_alloc(sizeof(*nfsd), KM_SLEEP); 518 memset(nfsd, 0, sizeof (struct nfsd)); 519 cv_init(&nfsd->nfsd_cv, "nfsd"); 520 nfsd->nfsd_procp = p; 521 mutex_enter(&nfsd_lock); 522 while ((nfssvc_sockhead_flag & SLP_INIT) != 0) { 523 KASSERT(nfs_numnfsd == 0); 524 cv_wait(&nfsd_initcv, &nfsd_lock); 525 } 526 TAILQ_INSERT_TAIL(&nfsd_head, nfsd, nfsd_chain); 527 nfs_numnfsd++; 528 mutex_exit(&nfsd_lock); 529 } 530 /* 531 * Loop getting rpc requests until SIGKILL. 532 */ 533 for (;;) { 534 bool dummy; 535 536 if ((curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 537 != 0) { 538 preempt(); 539 } 540 if (nfsd->nfsd_slp == NULL) { 541 mutex_enter(&nfsd_lock); 542 while (nfsd->nfsd_slp == NULL && 543 (nfsd_head_flag & NFSD_CHECKSLP) == 0) { 544 SLIST_INSERT_HEAD(&nfsd_idle_head, nfsd, 545 nfsd_idle); 546 error = cv_wait_sig(&nfsd->nfsd_cv, &nfsd_lock); 547 if (error) { 548 slp = nfsd->nfsd_slp; 549 nfsd->nfsd_slp = NULL; 550 if (!slp) 551 SLIST_REMOVE(&nfsd_idle_head, 552 nfsd, nfsd, nfsd_idle); 553 mutex_exit(&nfsd_lock); 554 if (slp) { 555 nfsrv_wakenfsd(slp); 556 nfsrv_slpderef(slp); 557 } 558 goto done; 559 } 560 } 561 if (nfsd->nfsd_slp == NULL && 562 (nfsd_head_flag & NFSD_CHECKSLP) != 0) { 563 slp = TAILQ_FIRST(&nfssvc_sockpending); 564 if (slp) { 565 KASSERT((slp->ns_gflags & SLP_G_DOREC) 566 != 0); 567 TAILQ_REMOVE(&nfssvc_sockpending, slp, 568 ns_pending); 569 slp->ns_gflags &= ~SLP_G_DOREC; 570 slp->ns_sref++; 571 nfsd->nfsd_slp = slp; 572 } else 573 nfsd_head_flag &= ~NFSD_CHECKSLP; 574 } 575 KASSERT(nfsd->nfsd_slp == NULL || 576 nfsd->nfsd_slp->ns_sref > 0); 577 mutex_exit(&nfsd_lock); 578 if ((slp = nfsd->nfsd_slp) == NULL) 579 continue; 580 if (slp->ns_flags & SLP_VALID) { 581 bool more; 582 583 if (nfsdsock_testbits(slp, SLP_A_NEEDQ)) { 584 nfsrv_rcv(slp); 585 } 586 if (nfsdsock_testbits(slp, SLP_A_DISCONN)) { 587 nfsrv_zapsock(slp); 588 } 589 error = nfsrv_dorec(slp, nfsd, &nd, &more); 590 getmicrotime(&tv); 591 cur_usec = (u_quad_t)tv.tv_sec * 1000000 + 592 (u_quad_t)tv.tv_usec; 593 writes_todo = 0; 594 if (error) { 595 struct nfsrv_descript *nd2; 596 597 mutex_enter(&nfsd_lock); 598 nd2 = LIST_FIRST(&slp->ns_tq); 599 if (nd2 != NULL && 600 nd2->nd_time <= cur_usec) { 601 error = 0; 602 cacherep = RC_DOIT; 603 writes_todo = 1; 604 } 605 mutex_exit(&nfsd_lock); 606 } 607 if (error == 0 && more) { 608 nfsrv_wakenfsd(slp); 609 } 610 } 611 } else { 612 error = 0; 613 slp = nfsd->nfsd_slp; 614 } 615 KASSERT(slp != NULL); 616 KASSERT(nfsd->nfsd_slp == slp); 617 if (error || (slp->ns_flags & SLP_VALID) == 0) { 618 if (nd) { 619 nfsdreq_free(nd); 620 nd = NULL; 621 } 622 nfsd->nfsd_slp = NULL; 623 nfsrv_slpderef(slp); 624 continue; 625 } 626 sotype = slp->ns_so->so_type; 627 if (nd) { 628 getmicrotime(&nd->nd_starttime); 629 if (nd->nd_nam2) 630 nd->nd_nam = nd->nd_nam2; 631 else 632 nd->nd_nam = slp->ns_nam; 633 634 /* 635 * Check to see if authorization is needed. 636 */ 637 if (nfsd->nfsd_flag & NFSD_NEEDAUTH) { 638 nfsd->nfsd_flag &= ~NFSD_NEEDAUTH; 639 nsd->nsd_haddr = mtod(nd->nd_nam, 640 struct sockaddr_in *)->sin_addr.s_addr; 641 nsd->nsd_authlen = nfsd->nfsd_authlen; 642 nsd->nsd_verflen = nfsd->nfsd_verflen; 643 if (!copyout(nfsd->nfsd_authstr, 644 nsd->nsd_authstr, nfsd->nfsd_authlen) && 645 !copyout(nfsd->nfsd_verfstr, 646 nsd->nsd_verfstr, nfsd->nfsd_verflen) && 647 !copyout(nsd, argp, sizeof (*nsd))) { 648 uvm_lwp_rele(l); 649 return (ENEEDAUTH); 650 } 651 cacherep = RC_DROPIT; 652 } else 653 cacherep = nfsrv_getcache(nd, slp, &mreq); 654 655 if (nfsd->nfsd_flag & NFSD_AUTHFAIL) { 656 nfsd->nfsd_flag &= ~NFSD_AUTHFAIL; 657 nd->nd_procnum = NFSPROC_NOOP; 658 nd->nd_repstat = 659 (NFSERR_AUTHERR | AUTH_TOOWEAK); 660 cacherep = RC_DOIT; 661 } 662 } 663 664 /* 665 * Loop to get all the write rpc relies that have been 666 * gathered together. 667 */ 668 do { 669 switch (cacherep) { 670 case RC_DOIT: 671 mreq = NULL; 672 netexport_rdlock(); 673 if (writes_todo || nd == NULL || 674 (!(nd->nd_flag & ND_NFSV3) && 675 nd->nd_procnum == NFSPROC_WRITE && 676 nfsrvw_procrastinate > 0)) 677 error = nfsrv_writegather(&nd, slp, 678 l, &mreq); 679 else 680 error = 681 (*(nfsrv3_procs[nd->nd_procnum])) 682 (nd, slp, l, &mreq); 683 netexport_rdunlock(); 684 if (mreq == NULL) { 685 if (nd != NULL) { 686 if (nd->nd_nam2) 687 m_free(nd->nd_nam2); 688 if (nd->nd_mrep) 689 m_freem(nd->nd_mrep); 690 } 691 break; 692 } 693 if (error) { 694 nfsstats.srv_errs++; 695 nfsrv_updatecache(nd, false, mreq); 696 if (nd->nd_nam2) 697 m_freem(nd->nd_nam2); 698 break; 699 } 700 nfsstats.srvrpccnt[nd->nd_procnum]++; 701 nfsrv_updatecache(nd, true, mreq); 702 nd->nd_mrep = (struct mbuf *)0; 703 case RC_REPLY: 704 m = mreq; 705 siz = 0; 706 while (m) { 707 siz += m->m_len; 708 m = m->m_next; 709 } 710 if (siz <= 0 || siz > NFS_MAXPACKET) { 711 printf("mbuf siz=%d\n",siz); 712 panic("Bad nfs svc reply"); 713 } 714 m = mreq; 715 m->m_pkthdr.len = siz; 716 m->m_pkthdr.rcvif = (struct ifnet *)0; 717 /* 718 * For stream protocols, prepend a Sun RPC 719 * Record Mark. 720 */ 721 if (sotype == SOCK_STREAM) { 722 M_PREPEND(m, NFSX_UNSIGNED, M_WAIT); 723 *mtod(m, u_int32_t *) = 724 htonl(0x80000000 | siz); 725 } 726 nd->nd_mreq = m; 727 if (nfsrtton) { 728 nfsd_rt(slp->ns_so->so_type, nd, 729 cacherep); 730 } 731 error = nfsdsock_sendreply(slp, nd); 732 nd = NULL; 733 if (error == EPIPE) 734 nfsrv_zapsock(slp); 735 if (error == EINTR || error == ERESTART) { 736 nfsd->nfsd_slp = NULL; 737 nfsrv_slpderef(slp); 738 goto done; 739 } 740 break; 741 case RC_DROPIT: 742 if (nfsrtton) 743 nfsd_rt(sotype, nd, cacherep); 744 m_freem(nd->nd_mrep); 745 m_freem(nd->nd_nam2); 746 break; 747 } 748 if (nd) { 749 nfsdreq_free(nd); 750 nd = NULL; 751 } 752 753 /* 754 * Check to see if there are outstanding writes that 755 * need to be serviced. 756 */ 757 getmicrotime(&tv); 758 cur_usec = (u_quad_t)tv.tv_sec * 1000000 + 759 (u_quad_t)tv.tv_usec; 760 s = splsoftclock(); 761 if (LIST_FIRST(&slp->ns_tq) && 762 LIST_FIRST(&slp->ns_tq)->nd_time <= cur_usec) { 763 cacherep = RC_DOIT; 764 writes_todo = 1; 765 } else 766 writes_todo = 0; 767 splx(s); 768 } while (writes_todo); 769 if (nfsrv_dorec(slp, nfsd, &nd, &dummy)) { 770 nfsd->nfsd_slp = NULL; 771 nfsrv_slpderef(slp); 772 } 773 } 774 done: 775 mutex_enter(&nfsd_lock); 776 TAILQ_REMOVE(&nfsd_head, nfsd, nfsd_chain); 777 doreinit = --nfs_numnfsd == 0; 778 if (doreinit) 779 nfssvc_sockhead_flag |= SLP_INIT; 780 mutex_exit(&nfsd_lock); 781 cv_destroy(&nfsd->nfsd_cv); 782 kmem_free(nfsd, sizeof(*nfsd)); 783 nsd->nsd_nfsd = NULL; 784 if (doreinit) 785 nfsrv_init(true); /* Reinitialize everything */ 786 uvm_lwp_rele(l); 787 return (error); 788 } 789 790 /* 791 * Shut down a socket associated with an nfssvc_sock structure. 792 * Should be called with the send lock set, if required. 793 * The trick here is to increment the sref at the start, so that the nfsds 794 * will stop using it and clear ns_flag at the end so that it will not be 795 * reassigned during cleanup. 796 * 797 * called at splsoftnet. 798 */ 799 void 800 nfsrv_zapsock(slp) 801 struct nfssvc_sock *slp; 802 { 803 struct nfsuid *nuidp, *nnuidp; 804 struct nfsrv_descript *nwp; 805 struct socket *so; 806 struct mbuf *m; 807 808 if (nfsdsock_drain(slp)) { 809 return; 810 } 811 mutex_enter(&nfsd_lock); 812 if (slp->ns_gflags & SLP_G_DOREC) { 813 TAILQ_REMOVE(&nfssvc_sockpending, slp, ns_pending); 814 slp->ns_gflags &= ~SLP_G_DOREC; 815 } 816 mutex_exit(&nfsd_lock); 817 818 so = slp->ns_so; 819 KASSERT(so != NULL); 820 solock(so); 821 so->so_upcall = NULL; 822 so->so_upcallarg = NULL; 823 so->so_rcv.sb_flags &= ~SB_UPCALL; 824 soshutdown(so, SHUT_RDWR); 825 sounlock(so); 826 827 if (slp->ns_nam) 828 m_free(slp->ns_nam); 829 m_freem(slp->ns_raw); 830 m = slp->ns_rec; 831 while (m != NULL) { 832 struct mbuf *n; 833 834 n = m->m_nextpkt; 835 m_freem(m); 836 m = n; 837 } 838 for (nuidp = TAILQ_FIRST(&slp->ns_uidlruhead); nuidp != 0; 839 nuidp = nnuidp) { 840 nnuidp = TAILQ_NEXT(nuidp, nu_lru); 841 LIST_REMOVE(nuidp, nu_hash); 842 TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp, nu_lru); 843 if (nuidp->nu_flag & NU_NAM) 844 m_freem(nuidp->nu_nam); 845 kmem_free(nuidp, sizeof(*nuidp)); 846 } 847 mutex_enter(&nfsd_lock); 848 while ((nwp = LIST_FIRST(&slp->ns_tq)) != NULL) { 849 LIST_REMOVE(nwp, nd_tq); 850 mutex_exit(&nfsd_lock); 851 nfsdreq_free(nwp); 852 mutex_enter(&nfsd_lock); 853 } 854 mutex_exit(&nfsd_lock); 855 } 856 857 /* 858 * Derefence a server socket structure. If it has no more references and 859 * is no longer valid, you can throw it away. 860 */ 861 void 862 nfsrv_slpderef(slp) 863 struct nfssvc_sock *slp; 864 { 865 uint32_t ref; 866 867 mutex_enter(&nfsd_lock); 868 KASSERT(slp->ns_sref > 0); 869 ref = --slp->ns_sref; 870 mutex_exit(&nfsd_lock); 871 if (ref == 0 && (slp->ns_flags & SLP_VALID) == 0) { 872 file_t *fp; 873 874 mutex_enter(&nfsd_lock); 875 KASSERT((slp->ns_gflags & SLP_G_DOREC) == 0); 876 TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain); 877 mutex_exit(&nfsd_lock); 878 879 fp = slp->ns_fp; 880 if (fp != NULL) { 881 slp->ns_fp = NULL; 882 KASSERT(fp != NULL); 883 KASSERT(fp->f_data == slp->ns_so); 884 KASSERT(fp->f_count > 0); 885 closef(fp); 886 slp->ns_so = NULL; 887 } 888 889 nfsrv_sockfree(slp); 890 } 891 } 892 893 /* 894 * Initialize the data structures for the server. 895 * Handshake with any new nfsds starting up to avoid any chance of 896 * corruption. 897 */ 898 void 899 nfsrv_init(terminating) 900 int terminating; 901 { 902 struct nfssvc_sock *slp; 903 904 if (!terminating) { 905 /* XXX could be IPL_SOFTNET */ 906 mutex_init(&nfsd_lock, MUTEX_DRIVER, IPL_VM); 907 cv_init(&nfsd_initcv, "nfsdinit"); 908 } 909 910 mutex_enter(&nfsd_lock); 911 if (!terminating && (nfssvc_sockhead_flag & SLP_INIT) != 0) 912 panic("nfsd init"); 913 nfssvc_sockhead_flag |= SLP_INIT; 914 915 if (terminating) { 916 KASSERT(SLIST_EMPTY(&nfsd_idle_head)); 917 KASSERT(TAILQ_EMPTY(&nfsd_head)); 918 while ((slp = TAILQ_FIRST(&nfssvc_sockhead)) != NULL) { 919 mutex_exit(&nfsd_lock); 920 KASSERT(slp->ns_sref == 0); 921 slp->ns_sref++; 922 nfsrv_zapsock(slp); 923 nfsrv_slpderef(slp); 924 mutex_enter(&nfsd_lock); 925 } 926 KASSERT(TAILQ_EMPTY(&nfssvc_sockpending)); 927 mutex_exit(&nfsd_lock); 928 nfsrv_cleancache(); /* And clear out server cache */ 929 } else { 930 mutex_exit(&nfsd_lock); 931 nfs_pub.np_valid = 0; 932 } 933 934 TAILQ_INIT(&nfssvc_sockhead); 935 TAILQ_INIT(&nfssvc_sockpending); 936 937 TAILQ_INIT(&nfsd_head); 938 SLIST_INIT(&nfsd_idle_head); 939 nfsd_head_flag &= ~NFSD_CHECKSLP; 940 941 nfs_udpsock = nfsrv_sockalloc(); 942 943 #ifdef INET6 944 nfs_udp6sock = nfsrv_sockalloc(); 945 #endif 946 947 #ifdef ISO 948 nfs_cltpsock = nfsrv_sockalloc(); 949 #endif 950 951 mutex_enter(&nfsd_lock); 952 nfssvc_sockhead_flag &= ~SLP_INIT; 953 cv_broadcast(&nfsd_initcv); 954 mutex_exit(&nfsd_lock); 955 } 956 957 /* 958 * Add entries to the server monitor log. 959 */ 960 static void 961 nfsd_rt(sotype, nd, cacherep) 962 int sotype; 963 struct nfsrv_descript *nd; 964 int cacherep; 965 { 966 struct timeval tv; 967 struct drt *rt; 968 969 rt = &nfsdrt.drt[nfsdrt.pos]; 970 if (cacherep == RC_DOIT) 971 rt->flag = 0; 972 else if (cacherep == RC_REPLY) 973 rt->flag = DRT_CACHEREPLY; 974 else 975 rt->flag = DRT_CACHEDROP; 976 if (sotype == SOCK_STREAM) 977 rt->flag |= DRT_TCP; 978 if (nd->nd_flag & ND_NFSV3) 979 rt->flag |= DRT_NFSV3; 980 rt->proc = nd->nd_procnum; 981 if (mtod(nd->nd_nam, struct sockaddr *)->sa_family == AF_INET) 982 rt->ipadr = mtod(nd->nd_nam, struct sockaddr_in *)->sin_addr.s_addr; 983 else 984 rt->ipadr = INADDR_ANY; 985 getmicrotime(&tv); 986 rt->resptime = ((tv.tv_sec - nd->nd_starttime.tv_sec) * 1000000) + 987 (tv.tv_usec - nd->nd_starttime.tv_usec); 988 rt->tstamp = tv; 989 nfsdrt.pos = (nfsdrt.pos + 1) % NFSRTTLOGSIZ; 990 } 991 #endif /* NFSSERVER */ 992 993 #ifdef NFS 994 995 int nfs_defect = 0; 996 /* 997 * Asynchronous I/O threads for client nfs. 998 * They do read-ahead and write-behind operations on the block I/O cache. 999 * Never returns unless it fails or gets killed. 1000 */ 1001 1002 static void 1003 nfssvc_iod(void *arg) 1004 { 1005 struct buf *bp; 1006 struct nfs_iod *myiod; 1007 struct nfsmount *nmp; 1008 1009 myiod = kmem_alloc(sizeof(*myiod), KM_SLEEP); 1010 mutex_init(&myiod->nid_lock, MUTEX_DEFAULT, IPL_NONE); 1011 cv_init(&myiod->nid_cv, "nfsiod"); 1012 myiod->nid_exiting = false; 1013 myiod->nid_mount = NULL; 1014 mutex_enter(&nfs_iodlist_lock); 1015 LIST_INSERT_HEAD(&nfs_iodlist_all, myiod, nid_all); 1016 mutex_exit(&nfs_iodlist_lock); 1017 1018 for (;;) { 1019 mutex_enter(&nfs_iodlist_lock); 1020 LIST_INSERT_HEAD(&nfs_iodlist_idle, myiod, nid_idle); 1021 mutex_exit(&nfs_iodlist_lock); 1022 1023 mutex_enter(&myiod->nid_lock); 1024 while (/*CONSTCOND*/ true) { 1025 nmp = myiod->nid_mount; 1026 if (nmp) { 1027 myiod->nid_mount = NULL; 1028 break; 1029 } 1030 if (__predict_false(myiod->nid_exiting)) { 1031 /* 1032 * drop nid_lock to preserve locking order. 1033 */ 1034 mutex_exit(&myiod->nid_lock); 1035 mutex_enter(&nfs_iodlist_lock); 1036 mutex_enter(&myiod->nid_lock); 1037 /* 1038 * recheck nid_mount because nfs_asyncio can 1039 * pick us in the meantime as we are still on 1040 * nfs_iodlist_lock. 1041 */ 1042 if (myiod->nid_mount != NULL) { 1043 mutex_exit(&nfs_iodlist_lock); 1044 continue; 1045 } 1046 LIST_REMOVE(myiod, nid_idle); 1047 mutex_exit(&nfs_iodlist_lock); 1048 goto quit; 1049 } 1050 cv_wait(&myiod->nid_cv, &myiod->nid_lock); 1051 } 1052 mutex_exit(&myiod->nid_lock); 1053 1054 mutex_enter(&nmp->nm_lock); 1055 while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) { 1056 /* Take one off the front of the list */ 1057 TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist); 1058 nmp->nm_bufqlen--; 1059 if (nmp->nm_bufqlen < 2 * nmp->nm_bufqiods) { 1060 cv_broadcast(&nmp->nm_aiocv); 1061 } 1062 mutex_exit(&nmp->nm_lock); 1063 KERNEL_LOCK(1, curlwp); 1064 (void)nfs_doio(bp); 1065 KERNEL_UNLOCK_LAST(curlwp); 1066 mutex_enter(&nmp->nm_lock); 1067 /* 1068 * If there are more than one iod on this mount, 1069 * then defect so that the iods can be shared out 1070 * fairly between the mounts 1071 */ 1072 if (nfs_defect && nmp->nm_bufqiods > 1) { 1073 break; 1074 } 1075 } 1076 KASSERT(nmp->nm_bufqiods > 0); 1077 nmp->nm_bufqiods--; 1078 mutex_exit(&nmp->nm_lock); 1079 } 1080 quit: 1081 KASSERT(myiod->nid_mount == NULL); 1082 mutex_exit(&myiod->nid_lock); 1083 1084 cv_destroy(&myiod->nid_cv); 1085 mutex_destroy(&myiod->nid_lock); 1086 kmem_free(myiod, sizeof(*myiod)); 1087 1088 kthread_exit(0); 1089 } 1090 1091 void 1092 nfs_iodinit() 1093 { 1094 1095 mutex_init(&nfs_iodlist_lock, MUTEX_DEFAULT, IPL_NONE); 1096 LIST_INIT(&nfs_iodlist_all); 1097 LIST_INIT(&nfs_iodlist_idle); 1098 } 1099 1100 int 1101 nfs_set_niothreads(int newval) 1102 { 1103 struct nfs_iod *nid; 1104 int error = 0; 1105 1106 #if defined(MULTIPROCESSOR) 1107 int hold_count; 1108 #endif /* defined(MULTIPROCESSOR) */ 1109 1110 KERNEL_UNLOCK_ALL(curlwp, &hold_count); 1111 1112 mutex_enter(&nfs_iodlist_lock); 1113 /* clamp to sane range */ 1114 nfs_niothreads = max(0, min(newval, NFS_MAXASYNCDAEMON)); 1115 1116 while (nfs_numasync != nfs_niothreads && error == 0) { 1117 while (nfs_numasync < nfs_niothreads) { 1118 1119 /* 1120 * kthread_create can wait for pagedaemon and 1121 * pagedaemon can wait for nfsiod which needs to acquire 1122 * nfs_iodlist_lock. 1123 */ 1124 1125 mutex_exit(&nfs_iodlist_lock); 1126 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 1127 nfssvc_iod, NULL, NULL, "nfsio"); 1128 mutex_enter(&nfs_iodlist_lock); 1129 if (error) { 1130 /* give up */ 1131 nfs_niothreads = nfs_numasync; 1132 break; 1133 } 1134 nfs_numasync++; 1135 } 1136 while (nfs_numasync > nfs_niothreads) { 1137 nid = LIST_FIRST(&nfs_iodlist_all); 1138 if (nid == NULL) { 1139 /* iod has not started yet. */ 1140 kpause("nfsiorm", false, hz, &nfs_iodlist_lock); 1141 continue; 1142 } 1143 LIST_REMOVE(nid, nid_all); 1144 mutex_enter(&nid->nid_lock); 1145 KASSERT(!nid->nid_exiting); 1146 nid->nid_exiting = true; 1147 cv_signal(&nid->nid_cv); 1148 mutex_exit(&nid->nid_lock); 1149 nfs_numasync--; 1150 } 1151 } 1152 mutex_exit(&nfs_iodlist_lock); 1153 1154 KERNEL_LOCK(hold_count, curlwp); 1155 return error; 1156 } 1157 1158 /* 1159 * Get an authorization string for the uid by having the mount_nfs sitting 1160 * on this mount point porpous out of the kernel and do it. 1161 */ 1162 int 1163 nfs_getauth(nmp, rep, cred, auth_str, auth_len, verf_str, verf_len, key) 1164 struct nfsmount *nmp; 1165 struct nfsreq *rep; 1166 kauth_cred_t cred; 1167 char **auth_str; 1168 int *auth_len; 1169 char *verf_str; 1170 int *verf_len; 1171 NFSKERBKEY_T key; /* return session key */ 1172 { 1173 int error = 0; 1174 1175 while ((nmp->nm_iflag & NFSMNT_WAITAUTH) == 0) { 1176 nmp->nm_iflag |= NFSMNT_WANTAUTH; 1177 (void) tsleep((void *)&nmp->nm_authtype, PSOCK, 1178 "nfsauth1", 2 * hz); 1179 error = nfs_sigintr(nmp, rep, rep->r_lwp); 1180 if (error) { 1181 nmp->nm_iflag &= ~NFSMNT_WANTAUTH; 1182 return (error); 1183 } 1184 } 1185 nmp->nm_iflag &= ~(NFSMNT_WAITAUTH | NFSMNT_WANTAUTH); 1186 nmp->nm_authstr = *auth_str = (char *)malloc(RPCAUTH_MAXSIZ, M_TEMP, M_WAITOK); 1187 nmp->nm_authlen = RPCAUTH_MAXSIZ; 1188 nmp->nm_verfstr = verf_str; 1189 nmp->nm_verflen = *verf_len; 1190 nmp->nm_authuid = kauth_cred_geteuid(cred); 1191 wakeup((void *)&nmp->nm_authstr); 1192 1193 /* 1194 * And wait for mount_nfs to do its stuff. 1195 */ 1196 while ((nmp->nm_iflag & NFSMNT_HASAUTH) == 0 && error == 0) { 1197 (void) tsleep((void *)&nmp->nm_authlen, PSOCK, 1198 "nfsauth2", 2 * hz); 1199 error = nfs_sigintr(nmp, rep, rep->r_lwp); 1200 } 1201 if (nmp->nm_iflag & NFSMNT_AUTHERR) { 1202 nmp->nm_iflag &= ~NFSMNT_AUTHERR; 1203 error = EAUTH; 1204 } 1205 if (error) 1206 free((void *)*auth_str, M_TEMP); 1207 else { 1208 *auth_len = nmp->nm_authlen; 1209 *verf_len = nmp->nm_verflen; 1210 memcpy(key, nmp->nm_key, sizeof (NFSKERBKEY_T)); 1211 } 1212 nmp->nm_iflag &= ~NFSMNT_HASAUTH; 1213 nmp->nm_iflag |= NFSMNT_WAITAUTH; 1214 if (nmp->nm_iflag & NFSMNT_WANTAUTH) { 1215 nmp->nm_iflag &= ~NFSMNT_WANTAUTH; 1216 wakeup((void *)&nmp->nm_authtype); 1217 } 1218 return (error); 1219 } 1220 1221 /* 1222 * Get a nickname authenticator and verifier. 1223 */ 1224 int 1225 nfs_getnickauth(struct nfsmount *nmp, kauth_cred_t cred, char **auth_str, 1226 int *auth_len, char *verf_str, int verf_len) 1227 { 1228 struct timeval ktvin, ktvout, tv; 1229 struct nfsuid *nuidp; 1230 u_int32_t *nickp, *verfp; 1231 1232 memset(&ktvout, 0, sizeof ktvout); /* XXX gcc */ 1233 1234 #ifdef DIAGNOSTIC 1235 if (verf_len < (4 * NFSX_UNSIGNED)) 1236 panic("nfs_getnickauth verf too small"); 1237 #endif 1238 LIST_FOREACH(nuidp, NMUIDHASH(nmp, kauth_cred_geteuid(cred)), nu_hash) { 1239 if (kauth_cred_geteuid(nuidp->nu_cr) == kauth_cred_geteuid(cred)) 1240 break; 1241 } 1242 if (!nuidp || nuidp->nu_expire < time_second) 1243 return (EACCES); 1244 1245 /* 1246 * Move to the end of the lru list (end of lru == most recently used). 1247 */ 1248 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru); 1249 TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, nu_lru); 1250 1251 nickp = (u_int32_t *)malloc(2 * NFSX_UNSIGNED, M_TEMP, M_WAITOK); 1252 *nickp++ = txdr_unsigned(RPCAKN_NICKNAME); 1253 *nickp = txdr_unsigned(nuidp->nu_nickname); 1254 *auth_str = (char *)nickp; 1255 *auth_len = 2 * NFSX_UNSIGNED; 1256 1257 /* 1258 * Now we must encrypt the verifier and package it up. 1259 */ 1260 verfp = (u_int32_t *)verf_str; 1261 *verfp++ = txdr_unsigned(RPCAKN_NICKNAME); 1262 getmicrotime(&tv); 1263 if (tv.tv_sec > nuidp->nu_timestamp.tv_sec || 1264 (tv.tv_sec == nuidp->nu_timestamp.tv_sec && 1265 tv.tv_usec > nuidp->nu_timestamp.tv_usec)) 1266 nuidp->nu_timestamp = tv; 1267 else 1268 nuidp->nu_timestamp.tv_usec++; 1269 ktvin.tv_sec = txdr_unsigned(nuidp->nu_timestamp.tv_sec); 1270 ktvin.tv_usec = txdr_unsigned(nuidp->nu_timestamp.tv_usec); 1271 1272 /* 1273 * Now encrypt the timestamp verifier in ecb mode using the session 1274 * key. 1275 */ 1276 #ifdef NFSKERB 1277 XXX 1278 #endif 1279 1280 *verfp++ = ktvout.tv_sec; 1281 *verfp++ = ktvout.tv_usec; 1282 *verfp = 0; 1283 return (0); 1284 } 1285 1286 /* 1287 * Save the current nickname in a hash list entry on the mount point. 1288 */ 1289 int 1290 nfs_savenickauth(nmp, cred, len, key, mdp, dposp, mrep) 1291 struct nfsmount *nmp; 1292 kauth_cred_t cred; 1293 int len; 1294 NFSKERBKEY_T key; 1295 struct mbuf **mdp; 1296 char **dposp; 1297 struct mbuf *mrep; 1298 { 1299 struct nfsuid *nuidp; 1300 u_int32_t *tl; 1301 int32_t t1; 1302 struct mbuf *md = *mdp; 1303 struct timeval ktvin, ktvout; 1304 u_int32_t nick; 1305 char *dpos = *dposp, *cp2; 1306 int deltasec, error = 0; 1307 1308 memset(&ktvout, 0, sizeof ktvout); /* XXX gcc */ 1309 1310 if (len == (3 * NFSX_UNSIGNED)) { 1311 nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED); 1312 ktvin.tv_sec = *tl++; 1313 ktvin.tv_usec = *tl++; 1314 nick = fxdr_unsigned(u_int32_t, *tl); 1315 1316 /* 1317 * Decrypt the timestamp in ecb mode. 1318 */ 1319 #ifdef NFSKERB 1320 XXX 1321 #endif 1322 ktvout.tv_sec = fxdr_unsigned(long, ktvout.tv_sec); 1323 ktvout.tv_usec = fxdr_unsigned(long, ktvout.tv_usec); 1324 deltasec = time_second - ktvout.tv_sec; 1325 if (deltasec < 0) 1326 deltasec = -deltasec; 1327 /* 1328 * If ok, add it to the hash list for the mount point. 1329 */ 1330 if (deltasec <= NFS_KERBCLOCKSKEW) { 1331 if (nmp->nm_numuids < nuidhash_max) { 1332 nmp->nm_numuids++; 1333 nuidp = kmem_alloc(sizeof(*nuidp), KM_SLEEP); 1334 } else { 1335 nuidp = TAILQ_FIRST(&nmp->nm_uidlruhead); 1336 LIST_REMOVE(nuidp, nu_hash); 1337 TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, 1338 nu_lru); 1339 } 1340 nuidp->nu_flag = 0; 1341 kauth_cred_seteuid(nuidp->nu_cr, kauth_cred_geteuid(cred)); 1342 nuidp->nu_expire = time_second + NFS_KERBTTL; 1343 nuidp->nu_timestamp = ktvout; 1344 nuidp->nu_nickname = nick; 1345 memcpy(nuidp->nu_key, key, sizeof (NFSKERBKEY_T)); 1346 TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, 1347 nu_lru); 1348 LIST_INSERT_HEAD(NMUIDHASH(nmp, kauth_cred_geteuid(cred)), 1349 nuidp, nu_hash); 1350 } 1351 } else 1352 nfsm_adv(nfsm_rndup(len)); 1353 nfsmout: 1354 *mdp = md; 1355 *dposp = dpos; 1356 return (error); 1357 } 1358 #endif /* NFS */ 1359