xref: /netbsd-src/sys/nfs/nfs_syscalls.c (revision cac8e449158efc7261bebc8657cbb0125a2cfdde)
1 /*	$NetBSD: nfs_syscalls.c,v 1.137 2008/06/24 11:18:14 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Rick Macklem at The University of Guelph.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)nfs_syscalls.c	8.5 (Berkeley) 3/30/95
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: nfs_syscalls.c,v 1.137 2008/06/24 11:18:14 ad Exp $");
39 
40 #include "fs_nfs.h"
41 #include "opt_nfs.h"
42 #include "opt_nfsserver.h"
43 #include "opt_iso.h"
44 #include "opt_inet.h"
45 #include "opt_compat_netbsd.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/file.h>
51 #include <sys/stat.h>
52 #include <sys/vnode.h>
53 #include <sys/mount.h>
54 #include <sys/proc.h>
55 #include <sys/uio.h>
56 #include <sys/malloc.h>
57 #include <sys/kmem.h>
58 #include <sys/buf.h>
59 #include <sys/mbuf.h>
60 #include <sys/socket.h>
61 #include <sys/socketvar.h>
62 #include <sys/signalvar.h>
63 #include <sys/domain.h>
64 #include <sys/protosw.h>
65 #include <sys/namei.h>
66 #include <sys/syslog.h>
67 #include <sys/filedesc.h>
68 #include <sys/kthread.h>
69 #include <sys/kauth.h>
70 #include <sys/syscallargs.h>
71 
72 #include <netinet/in.h>
73 #include <netinet/tcp.h>
74 #ifdef ISO
75 #include <netiso/iso.h>
76 #endif
77 #include <nfs/xdr_subs.h>
78 #include <nfs/rpcv2.h>
79 #include <nfs/nfsproto.h>
80 #include <nfs/nfs.h>
81 #include <nfs/nfsm_subs.h>
82 #include <nfs/nfsrvcache.h>
83 #include <nfs/nfsmount.h>
84 #include <nfs/nfsnode.h>
85 #include <nfs/nfsrtt.h>
86 #include <nfs/nfs_var.h>
87 
88 /* Global defs. */
89 extern int32_t (*nfsrv3_procs[NFS_NPROCS]) __P((struct nfsrv_descript *,
90 						struct nfssvc_sock *,
91 						struct lwp *, struct mbuf **));
92 extern int nfsrvw_procrastinate;
93 
94 struct nfssvc_sock *nfs_udpsock;
95 #ifdef ISO
96 struct nfssvc_sock *nfs_cltpsock;
97 #endif
98 #ifdef INET6
99 struct nfssvc_sock *nfs_udp6sock;
100 #endif
101 int nuidhash_max = NFS_MAXUIDHASH;
102 #ifdef NFSSERVER
103 static int nfs_numnfsd = 0;
104 static struct nfsdrt nfsdrt;
105 #endif
106 
107 #ifdef NFSSERVER
108 kmutex_t nfsd_lock;
109 struct nfssvc_sockhead nfssvc_sockhead;
110 kcondvar_t nfsd_initcv;
111 struct nfssvc_sockhead nfssvc_sockpending;
112 struct nfsdhead nfsd_head;
113 struct nfsdidlehead nfsd_idle_head;
114 
115 int nfssvc_sockhead_flag;
116 int nfsd_head_flag;
117 #endif
118 
119 #ifdef NFS
120 /*
121  * locking order:
122  *	nfs_iodlist_lock -> nid_lock -> nm_lock
123  */
124 kmutex_t nfs_iodlist_lock;
125 struct nfs_iodlist nfs_iodlist_idle;
126 struct nfs_iodlist nfs_iodlist_all;
127 int nfs_niothreads = -1; /* == "0, and has never been set" */
128 #endif
129 
130 #ifdef NFSSERVER
131 static struct nfssvc_sock *nfsrv_sockalloc __P((void));
132 static void nfsrv_sockfree __P((struct nfssvc_sock *));
133 static void nfsd_rt __P((int, struct nfsrv_descript *, int));
134 #endif
135 
136 /*
137  * NFS server system calls
138  */
139 
140 
141 /*
142  * Nfs server pseudo system call for the nfsd's
143  * Based on the flag value it either:
144  * - adds a socket to the selection list
145  * - remains in the kernel as an nfsd
146  * - remains in the kernel as an nfsiod
147  */
148 int
149 sys_nfssvc(struct lwp *l, const struct sys_nfssvc_args *uap, register_t *retval)
150 {
151 	/* {
152 		syscallarg(int) flag;
153 		syscallarg(void *) argp;
154 	} */
155 	int error;
156 #ifdef NFSSERVER
157 	file_t *fp;
158 	struct mbuf *nam;
159 	struct nfsd_args nfsdarg;
160 	struct nfsd_srvargs nfsd_srvargs, *nsd = &nfsd_srvargs;
161 	struct nfsd *nfsd;
162 	struct nfssvc_sock *slp;
163 	struct nfsuid *nuidp;
164 #endif
165 
166 	/*
167 	 * Must be super user
168 	 */
169 	error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_NFS,
170 	    KAUTH_REQ_NETWORK_NFS_SVC, NULL, NULL, NULL);
171 	if (error)
172 		return (error);
173 
174 	/* Initialize NFS server / client shared data. */
175 	nfs_init();
176 
177 #ifdef NFSSERVER
178 	mutex_enter(&nfsd_lock);
179 	while (nfssvc_sockhead_flag & SLP_INIT) {
180 		cv_wait(&nfsd_initcv, &nfsd_lock);
181 	}
182 	mutex_exit(&nfsd_lock);
183 #endif
184 	if (SCARG(uap, flag) & NFSSVC_BIOD) {
185 #if defined(NFS) && defined(COMPAT_14)
186 		error = kpause("nfsbiod", true, 0, NULL); /* dummy impl */
187 #else
188 		error = ENOSYS;
189 #endif
190 	} else if (SCARG(uap, flag) & NFSSVC_MNTD) {
191 		error = ENOSYS;
192 	} else if (SCARG(uap, flag) & NFSSVC_ADDSOCK) {
193 #ifndef NFSSERVER
194 		error = ENOSYS;
195 #else
196 		error = copyin(SCARG(uap, argp), (void *)&nfsdarg,
197 		    sizeof(nfsdarg));
198 		if (error)
199 			return (error);
200 		/* getsock() will use the descriptor for us */
201 		if ((fp = fd_getfile(nfsdarg.sock)) == NULL)
202 			return (EBADF);
203 		if (fp->f_type != DTYPE_SOCKET) {
204 			fd_putfile(nfsdarg.sock);
205 			return (ENOTSOCK);
206 		}
207 		if (error)
208 			return (error);
209 		/*
210 		 * Get the client address for connected sockets.
211 		 */
212 		if (nfsdarg.name == NULL || nfsdarg.namelen == 0)
213 			nam = (struct mbuf *)0;
214 		else {
215 			error = sockargs(&nam, nfsdarg.name, nfsdarg.namelen,
216 				MT_SONAME);
217 			if (error) {
218 				fd_putfile(nfsdarg.sock);
219 				return (error);
220 			}
221 		}
222 		error = nfssvc_addsock(fp, nam);
223 		fd_putfile(nfsdarg.sock);
224 #endif /* !NFSSERVER */
225 	} else if (SCARG(uap, flag) & NFSSVC_SETEXPORTSLIST) {
226 #ifndef NFSSERVER
227 		error = ENOSYS;
228 #else
229 		struct export_args *args;
230 		struct mountd_exports_list mel;
231 
232 		error = copyin(SCARG(uap, argp), &mel, sizeof(mel));
233 		if (error != 0)
234 			return error;
235 
236 		args = (struct export_args *)malloc(mel.mel_nexports *
237 		    sizeof(struct export_args), M_TEMP, M_WAITOK);
238 		error = copyin(mel.mel_exports, args, mel.mel_nexports *
239 		    sizeof(struct export_args));
240 		if (error != 0) {
241 			free(args, M_TEMP);
242 			return error;
243 		}
244 		mel.mel_exports = args;
245 
246 		error = mountd_set_exports_list(&mel, l);
247 
248 		free(args, M_TEMP);
249 #endif /* !NFSSERVER */
250 	} else {
251 #ifndef NFSSERVER
252 		error = ENOSYS;
253 #else
254 		error = copyin(SCARG(uap, argp), (void *)nsd, sizeof (*nsd));
255 		if (error)
256 			return (error);
257 		if ((SCARG(uap, flag) & NFSSVC_AUTHIN) &&
258 		    ((nfsd = nsd->nsd_nfsd)) != NULL &&
259 		    (nfsd->nfsd_slp->ns_flags & SLP_VALID)) {
260 			slp = nfsd->nfsd_slp;
261 
262 			/*
263 			 * First check to see if another nfsd has already
264 			 * added this credential.
265 			 */
266 			LIST_FOREACH(nuidp, NUIDHASH(slp, nsd->nsd_cr.cr_uid),
267 			    nu_hash) {
268 				if (kauth_cred_geteuid(nuidp->nu_cr) ==
269 				    nsd->nsd_cr.cr_uid &&
270 				    (!nfsd->nfsd_nd->nd_nam2 ||
271 				     netaddr_match(NU_NETFAM(nuidp),
272 				     &nuidp->nu_haddr, nfsd->nfsd_nd->nd_nam2)))
273 					break;
274 			}
275 			if (nuidp) {
276 			    kauth_cred_hold(nuidp->nu_cr);
277 			    nfsd->nfsd_nd->nd_cr = nuidp->nu_cr;
278 			    nfsd->nfsd_nd->nd_flag |= ND_KERBFULL;
279 			} else {
280 			    /*
281 			     * Nope, so we will.
282 			     */
283 			    if (slp->ns_numuids < nuidhash_max) {
284 				slp->ns_numuids++;
285 				nuidp = kmem_alloc(sizeof(*nuidp), KM_SLEEP);
286 			    } else
287 				nuidp = (struct nfsuid *)0;
288 			    if ((slp->ns_flags & SLP_VALID) == 0) {
289 				if (nuidp)
290 				    kmem_free(nuidp, sizeof(*nuidp));
291 			    } else {
292 				if (nuidp == (struct nfsuid *)0) {
293 				    nuidp = TAILQ_FIRST(&slp->ns_uidlruhead);
294 				    LIST_REMOVE(nuidp, nu_hash);
295 				    TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp,
296 					nu_lru);
297 				    if (nuidp->nu_flag & NU_NAM)
298 					m_freem(nuidp->nu_nam);
299 			        }
300 				nuidp->nu_flag = 0;
301 				kauth_uucred_to_cred(nuidp->nu_cr,
302 				    &nsd->nsd_cr);
303 				nuidp->nu_timestamp = nsd->nsd_timestamp;
304 				nuidp->nu_expire = time_second + nsd->nsd_ttl;
305 				/*
306 				 * and save the session key in nu_key.
307 				 */
308 				memcpy(nuidp->nu_key, nsd->nsd_key,
309 				    sizeof(nsd->nsd_key));
310 				if (nfsd->nfsd_nd->nd_nam2) {
311 				    struct sockaddr_in *saddr;
312 
313 				    saddr = mtod(nfsd->nfsd_nd->nd_nam2,
314 					 struct sockaddr_in *);
315 				    switch (saddr->sin_family) {
316 				    case AF_INET:
317 					nuidp->nu_flag |= NU_INETADDR;
318 					nuidp->nu_inetaddr =
319 					     saddr->sin_addr.s_addr;
320 					break;
321 				    case AF_ISO:
322 				    default:
323 					nuidp->nu_flag |= NU_NAM;
324 					nuidp->nu_nam = m_copym(
325 					    nfsd->nfsd_nd->nd_nam2, 0,
326 					     M_COPYALL, M_WAIT);
327 					break;
328 				    };
329 				}
330 				TAILQ_INSERT_TAIL(&slp->ns_uidlruhead, nuidp,
331 					nu_lru);
332 				LIST_INSERT_HEAD(NUIDHASH(slp, nsd->nsd_uid),
333 					nuidp, nu_hash);
334 				kauth_cred_hold(nuidp->nu_cr);
335 				nfsd->nfsd_nd->nd_cr = nuidp->nu_cr;
336 				nfsd->nfsd_nd->nd_flag |= ND_KERBFULL;
337 			    }
338 			}
339 		}
340 		if ((SCARG(uap, flag) & NFSSVC_AUTHINFAIL) &&
341 		    (nfsd = nsd->nsd_nfsd))
342 			nfsd->nfsd_flag |= NFSD_AUTHFAIL;
343 		error = nfssvc_nfsd(nsd, SCARG(uap, argp), l);
344 #endif /* !NFSSERVER */
345 	}
346 	if (error == EINTR || error == ERESTART)
347 		error = 0;
348 	return (error);
349 }
350 
351 #ifdef NFSSERVER
352 MALLOC_DEFINE(M_NFSD, "NFS daemon", "Nfs server daemon structure");
353 
354 static struct nfssvc_sock *
355 nfsrv_sockalloc()
356 {
357 	struct nfssvc_sock *slp;
358 
359 	slp = kmem_alloc(sizeof(*slp), KM_SLEEP);
360 	memset(slp, 0, sizeof (struct nfssvc_sock));
361 	mutex_init(&slp->ns_lock, MUTEX_DRIVER, IPL_SOFTNET);
362 	mutex_init(&slp->ns_alock, MUTEX_DRIVER, IPL_SOFTNET);
363 	cv_init(&slp->ns_cv, "nfsdsock");
364 	TAILQ_INIT(&slp->ns_uidlruhead);
365 	LIST_INIT(&slp->ns_tq);
366 	SIMPLEQ_INIT(&slp->ns_sendq);
367 	mutex_enter(&nfsd_lock);
368 	TAILQ_INSERT_TAIL(&nfssvc_sockhead, slp, ns_chain);
369 	mutex_exit(&nfsd_lock);
370 
371 	return slp;
372 }
373 
374 static void
375 nfsrv_sockfree(struct nfssvc_sock *slp)
376 {
377 
378 	KASSERT(slp->ns_so == NULL);
379 	KASSERT(slp->ns_fp == NULL);
380 	KASSERT((slp->ns_flags & SLP_VALID) == 0);
381 	mutex_destroy(&slp->ns_lock);
382 	mutex_destroy(&slp->ns_alock);
383 	cv_destroy(&slp->ns_cv);
384 	kmem_free(slp, sizeof(*slp));
385 }
386 
387 /*
388  * Adds a socket to the list for servicing by nfsds.
389  */
390 int
391 nfssvc_addsock(fp, mynam)
392 	file_t *fp;
393 	struct mbuf *mynam;
394 {
395 	struct mbuf *m;
396 	int siz;
397 	struct nfssvc_sock *slp;
398 	struct socket *so;
399 	struct nfssvc_sock *tslp;
400 	int error;
401 
402 	so = (struct socket *)fp->f_data;
403 	tslp = (struct nfssvc_sock *)0;
404 	/*
405 	 * Add it to the list, as required.
406 	 */
407 	if (so->so_proto->pr_protocol == IPPROTO_UDP) {
408 #ifdef INET6
409 		if (so->so_proto->pr_domain->dom_family == AF_INET6)
410 			tslp = nfs_udp6sock;
411 		else
412 #endif
413 		tslp = nfs_udpsock;
414 		if (tslp->ns_flags & SLP_VALID) {
415 			m_freem(mynam);
416 			return (EPERM);
417 		}
418 #ifdef ISO
419 	} else if (so->so_proto->pr_protocol == ISOPROTO_CLTP) {
420 		tslp = nfs_cltpsock;
421 		if (tslp->ns_flags & SLP_VALID) {
422 			m_freem(mynam);
423 			return (EPERM);
424 		}
425 #endif /* ISO */
426 	}
427 	if (so->so_type == SOCK_STREAM)
428 		siz = NFS_MAXPACKET + sizeof (u_long);
429 	else
430 		siz = NFS_MAXPACKET;
431 	solock(so);
432 	error = soreserve(so, siz, siz);
433 	sounlock(so);
434 	if (error) {
435 		m_freem(mynam);
436 		return (error);
437 	}
438 
439 	/*
440 	 * Set protocol specific options { for now TCP only } and
441 	 * reserve some space. For datagram sockets, this can get called
442 	 * repeatedly for the same socket, but that isn't harmful.
443 	 */
444 	if (so->so_type == SOCK_STREAM) {
445 		m = m_get(M_WAIT, MT_SOOPTS);
446 		MCLAIM(m, &nfs_mowner);
447 		*mtod(m, int32_t *) = 1;
448 		m->m_len = sizeof(int32_t);
449 		sosetopt(so, SOL_SOCKET, SO_KEEPALIVE, m);
450 	}
451 	if ((so->so_proto->pr_domain->dom_family == AF_INET
452 #ifdef INET6
453 	    || so->so_proto->pr_domain->dom_family == AF_INET6
454 #endif
455 	    ) &&
456 	    so->so_proto->pr_protocol == IPPROTO_TCP) {
457 		m = m_get(M_WAIT, MT_SOOPTS);
458 		MCLAIM(m, &nfs_mowner);
459 		*mtod(m, int32_t *) = 1;
460 		m->m_len = sizeof(int32_t);
461 		sosetopt(so, IPPROTO_TCP, TCP_NODELAY, m);
462 	}
463 	solock(so);
464 	so->so_rcv.sb_flags &= ~SB_NOINTR;
465 	so->so_rcv.sb_timeo = 0;
466 	so->so_snd.sb_flags &= ~SB_NOINTR;
467 	so->so_snd.sb_timeo = 0;
468 	sounlock(so);
469 	if (tslp) {
470 		slp = tslp;
471 	} else {
472 		slp = nfsrv_sockalloc();
473 	}
474 	slp->ns_so = so;
475 	slp->ns_nam = mynam;
476 	mutex_enter(&fp->f_lock);
477 	fp->f_count++;
478 	mutex_exit(&fp->f_lock);
479 	slp->ns_fp = fp;
480 	slp->ns_flags = SLP_VALID;
481 	slp->ns_aflags = SLP_A_NEEDQ;
482 	slp->ns_gflags = 0;
483 	slp->ns_sflags = 0;
484 	solock(so);
485 	so->so_upcallarg = (void *)slp;
486 	so->so_upcall = nfsrv_soupcall;
487 	so->so_rcv.sb_flags |= SB_UPCALL;
488 	sounlock(so);
489 	nfsrv_wakenfsd(slp);
490 	return (0);
491 }
492 
493 /*
494  * Called by nfssvc() for nfsds. Just loops around servicing rpc requests
495  * until it is killed by a signal.
496  */
497 int
498 nfssvc_nfsd(nsd, argp, l)
499 	struct nfsd_srvargs *nsd;
500 	void *argp;
501 	struct lwp *l;
502 {
503 	struct timeval tv;
504 	struct mbuf *m;
505 	struct nfssvc_sock *slp;
506 	struct nfsd *nfsd = nsd->nsd_nfsd;
507 	struct nfsrv_descript *nd = NULL;
508 	struct mbuf *mreq;
509 	u_quad_t cur_usec;
510 	int error = 0, cacherep, siz, sotype, writes_todo;
511 	struct proc *p = l->l_proc;
512 	int s;
513 	bool doreinit;
514 
515 #ifndef nolint
516 	cacherep = RC_DOIT;
517 	writes_todo = 0;
518 #endif
519 	uvm_lwp_hold(l);
520 	if (nfsd == NULL) {
521 		nsd->nsd_nfsd = nfsd = kmem_alloc(sizeof(*nfsd), KM_SLEEP);
522 		memset(nfsd, 0, sizeof (struct nfsd));
523 		cv_init(&nfsd->nfsd_cv, "nfsd");
524 		nfsd->nfsd_procp = p;
525 		mutex_enter(&nfsd_lock);
526 		while ((nfssvc_sockhead_flag & SLP_INIT) != 0) {
527 			KASSERT(nfs_numnfsd == 0);
528 			cv_wait(&nfsd_initcv, &nfsd_lock);
529 		}
530 		TAILQ_INSERT_TAIL(&nfsd_head, nfsd, nfsd_chain);
531 		nfs_numnfsd++;
532 		mutex_exit(&nfsd_lock);
533 	}
534 	/*
535 	 * Loop getting rpc requests until SIGKILL.
536 	 */
537 	for (;;) {
538 		bool dummy;
539 
540 		if ((curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
541 		    != 0) {
542 			preempt();
543 		}
544 		if (nfsd->nfsd_slp == NULL) {
545 			mutex_enter(&nfsd_lock);
546 			while (nfsd->nfsd_slp == NULL &&
547 			    (nfsd_head_flag & NFSD_CHECKSLP) == 0) {
548 				SLIST_INSERT_HEAD(&nfsd_idle_head, nfsd,
549 				    nfsd_idle);
550 				error = cv_wait_sig(&nfsd->nfsd_cv, &nfsd_lock);
551 				if (error) {
552 					slp = nfsd->nfsd_slp;
553 					nfsd->nfsd_slp = NULL;
554 					if (!slp)
555 						SLIST_REMOVE(&nfsd_idle_head,
556 						    nfsd, nfsd, nfsd_idle);
557 					mutex_exit(&nfsd_lock);
558 					if (slp) {
559 						nfsrv_wakenfsd(slp);
560 						nfsrv_slpderef(slp);
561 					}
562 					goto done;
563 				}
564 			}
565 			if (nfsd->nfsd_slp == NULL &&
566 			    (nfsd_head_flag & NFSD_CHECKSLP) != 0) {
567 				slp = TAILQ_FIRST(&nfssvc_sockpending);
568 				if (slp) {
569 					KASSERT((slp->ns_gflags & SLP_G_DOREC)
570 					    != 0);
571 					TAILQ_REMOVE(&nfssvc_sockpending, slp,
572 					    ns_pending);
573 					slp->ns_gflags &= ~SLP_G_DOREC;
574 					slp->ns_sref++;
575 					nfsd->nfsd_slp = slp;
576 				} else
577 					nfsd_head_flag &= ~NFSD_CHECKSLP;
578 			}
579 			KASSERT(nfsd->nfsd_slp == NULL ||
580 			    nfsd->nfsd_slp->ns_sref > 0);
581 			mutex_exit(&nfsd_lock);
582 			if ((slp = nfsd->nfsd_slp) == NULL)
583 				continue;
584 			if (slp->ns_flags & SLP_VALID) {
585 				bool more;
586 
587 				if (nfsdsock_testbits(slp, SLP_A_NEEDQ)) {
588 					nfsrv_rcv(slp);
589 				}
590 				if (nfsdsock_testbits(slp, SLP_A_DISCONN)) {
591 					nfsrv_zapsock(slp);
592 				}
593 				error = nfsrv_dorec(slp, nfsd, &nd, &more);
594 				getmicrotime(&tv);
595 				cur_usec = (u_quad_t)tv.tv_sec * 1000000 +
596 					(u_quad_t)tv.tv_usec;
597 				writes_todo = 0;
598 				if (error) {
599 					struct nfsrv_descript *nd2;
600 
601 					mutex_enter(&nfsd_lock);
602 					nd2 = LIST_FIRST(&slp->ns_tq);
603 					if (nd2 != NULL &&
604 					    nd2->nd_time <= cur_usec) {
605 						error = 0;
606 						cacherep = RC_DOIT;
607 						writes_todo = 1;
608 					}
609 					mutex_exit(&nfsd_lock);
610 				}
611 				if (error == 0 && more) {
612 					nfsrv_wakenfsd(slp);
613 				}
614 			}
615 		} else {
616 			error = 0;
617 			slp = nfsd->nfsd_slp;
618 		}
619 		KASSERT(slp != NULL);
620 		KASSERT(nfsd->nfsd_slp == slp);
621 		if (error || (slp->ns_flags & SLP_VALID) == 0) {
622 			if (nd) {
623 				nfsdreq_free(nd);
624 				nd = NULL;
625 			}
626 			nfsd->nfsd_slp = NULL;
627 			nfsrv_slpderef(slp);
628 			continue;
629 		}
630 		sotype = slp->ns_so->so_type;
631 		if (nd) {
632 			getmicrotime(&nd->nd_starttime);
633 			if (nd->nd_nam2)
634 				nd->nd_nam = nd->nd_nam2;
635 			else
636 				nd->nd_nam = slp->ns_nam;
637 
638 			/*
639 			 * Check to see if authorization is needed.
640 			 */
641 			if (nfsd->nfsd_flag & NFSD_NEEDAUTH) {
642 				nfsd->nfsd_flag &= ~NFSD_NEEDAUTH;
643 				nsd->nsd_haddr = mtod(nd->nd_nam,
644 				    struct sockaddr_in *)->sin_addr.s_addr;
645 				nsd->nsd_authlen = nfsd->nfsd_authlen;
646 				nsd->nsd_verflen = nfsd->nfsd_verflen;
647 				if (!copyout(nfsd->nfsd_authstr,
648 				    nsd->nsd_authstr, nfsd->nfsd_authlen) &&
649 				    !copyout(nfsd->nfsd_verfstr,
650 				    nsd->nsd_verfstr, nfsd->nfsd_verflen) &&
651 				    !copyout(nsd, argp, sizeof (*nsd))) {
652 					uvm_lwp_rele(l);
653 					return (ENEEDAUTH);
654 				}
655 				cacherep = RC_DROPIT;
656 			} else
657 				cacherep = nfsrv_getcache(nd, slp, &mreq);
658 
659 			if (nfsd->nfsd_flag & NFSD_AUTHFAIL) {
660 				nfsd->nfsd_flag &= ~NFSD_AUTHFAIL;
661 				nd->nd_procnum = NFSPROC_NOOP;
662 				nd->nd_repstat =
663 				    (NFSERR_AUTHERR | AUTH_TOOWEAK);
664 				cacherep = RC_DOIT;
665 			}
666 		}
667 
668 		/*
669 		 * Loop to get all the write rpc relies that have been
670 		 * gathered together.
671 		 */
672 		do {
673 			switch (cacherep) {
674 			case RC_DOIT:
675 				mreq = NULL;
676 				netexport_rdlock();
677 				if (writes_todo || nd == NULL ||
678 				     (!(nd->nd_flag & ND_NFSV3) &&
679 				     nd->nd_procnum == NFSPROC_WRITE &&
680 				     nfsrvw_procrastinate > 0))
681 					error = nfsrv_writegather(&nd, slp,
682 					    l, &mreq);
683 				else
684 					error =
685 					    (*(nfsrv3_procs[nd->nd_procnum]))
686 					    (nd, slp, l, &mreq);
687 				netexport_rdunlock();
688 				if (mreq == NULL) {
689 					if (nd != NULL) {
690 						if (nd->nd_nam2)
691 							m_free(nd->nd_nam2);
692 						if (nd->nd_mrep)
693 							m_freem(nd->nd_mrep);
694 					}
695 					break;
696 				}
697 				if (error) {
698 					nfsstats.srv_errs++;
699 					nfsrv_updatecache(nd, false, mreq);
700 					if (nd->nd_nam2)
701 						m_freem(nd->nd_nam2);
702 					break;
703 				}
704 				nfsstats.srvrpccnt[nd->nd_procnum]++;
705 				nfsrv_updatecache(nd, true, mreq);
706 				nd->nd_mrep = (struct mbuf *)0;
707 			case RC_REPLY:
708 				m = mreq;
709 				siz = 0;
710 				while (m) {
711 					siz += m->m_len;
712 					m = m->m_next;
713 				}
714 				if (siz <= 0 || siz > NFS_MAXPACKET) {
715 					printf("mbuf siz=%d\n",siz);
716 					panic("Bad nfs svc reply");
717 				}
718 				m = mreq;
719 				m->m_pkthdr.len = siz;
720 				m->m_pkthdr.rcvif = (struct ifnet *)0;
721 				/*
722 				 * For stream protocols, prepend a Sun RPC
723 				 * Record Mark.
724 				 */
725 				if (sotype == SOCK_STREAM) {
726 					M_PREPEND(m, NFSX_UNSIGNED, M_WAIT);
727 					*mtod(m, u_int32_t *) =
728 					    htonl(0x80000000 | siz);
729 				}
730 				nd->nd_mreq = m;
731 				if (nfsrtton) {
732 					nfsd_rt(slp->ns_so->so_type, nd,
733 					    cacherep);
734 				}
735 				error = nfsdsock_sendreply(slp, nd);
736 				nd = NULL;
737 				if (error == EPIPE)
738 					nfsrv_zapsock(slp);
739 				if (error == EINTR || error == ERESTART) {
740 					nfsd->nfsd_slp = NULL;
741 					nfsrv_slpderef(slp);
742 					goto done;
743 				}
744 				break;
745 			case RC_DROPIT:
746 				if (nfsrtton)
747 					nfsd_rt(sotype, nd, cacherep);
748 				m_freem(nd->nd_mrep);
749 				m_freem(nd->nd_nam2);
750 				break;
751 			}
752 			if (nd) {
753 				nfsdreq_free(nd);
754 				nd = NULL;
755 			}
756 
757 			/*
758 			 * Check to see if there are outstanding writes that
759 			 * need to be serviced.
760 			 */
761 			getmicrotime(&tv);
762 			cur_usec = (u_quad_t)tv.tv_sec * 1000000 +
763 			    (u_quad_t)tv.tv_usec;
764 			s = splsoftclock();
765 			if (LIST_FIRST(&slp->ns_tq) &&
766 			    LIST_FIRST(&slp->ns_tq)->nd_time <= cur_usec) {
767 				cacherep = RC_DOIT;
768 				writes_todo = 1;
769 			} else
770 				writes_todo = 0;
771 			splx(s);
772 		} while (writes_todo);
773 		if (nfsrv_dorec(slp, nfsd, &nd, &dummy)) {
774 			nfsd->nfsd_slp = NULL;
775 			nfsrv_slpderef(slp);
776 		}
777 	}
778 done:
779 	mutex_enter(&nfsd_lock);
780 	TAILQ_REMOVE(&nfsd_head, nfsd, nfsd_chain);
781 	doreinit = --nfs_numnfsd == 0;
782 	if (doreinit)
783 		nfssvc_sockhead_flag |= SLP_INIT;
784 	mutex_exit(&nfsd_lock);
785 	cv_destroy(&nfsd->nfsd_cv);
786 	kmem_free(nfsd, sizeof(*nfsd));
787 	nsd->nsd_nfsd = NULL;
788 	if (doreinit)
789 		nfsrv_init(true);	/* Reinitialize everything */
790 	uvm_lwp_rele(l);
791 	return (error);
792 }
793 
794 /*
795  * Shut down a socket associated with an nfssvc_sock structure.
796  * Should be called with the send lock set, if required.
797  * The trick here is to increment the sref at the start, so that the nfsds
798  * will stop using it and clear ns_flag at the end so that it will not be
799  * reassigned during cleanup.
800  *
801  * called at splsoftnet.
802  */
803 void
804 nfsrv_zapsock(slp)
805 	struct nfssvc_sock *slp;
806 {
807 	struct nfsuid *nuidp, *nnuidp;
808 	struct nfsrv_descript *nwp;
809 	struct socket *so;
810 	struct mbuf *m;
811 
812 	if (nfsdsock_drain(slp)) {
813 		return;
814 	}
815 	mutex_enter(&nfsd_lock);
816 	if (slp->ns_gflags & SLP_G_DOREC) {
817 		TAILQ_REMOVE(&nfssvc_sockpending, slp, ns_pending);
818 		slp->ns_gflags &= ~SLP_G_DOREC;
819 	}
820 	mutex_exit(&nfsd_lock);
821 
822 	so = slp->ns_so;
823 	KASSERT(so != NULL);
824 	solock(so);
825 	so->so_upcall = NULL;
826 	so->so_upcallarg = NULL;
827 	so->so_rcv.sb_flags &= ~SB_UPCALL;
828 	soshutdown(so, SHUT_RDWR);
829 	sounlock(so);
830 
831 	if (slp->ns_nam)
832 		m_free(slp->ns_nam);
833 	m_freem(slp->ns_raw);
834 	m = slp->ns_rec;
835 	while (m != NULL) {
836 		struct mbuf *n;
837 
838 		n = m->m_nextpkt;
839 		m_freem(m);
840 		m = n;
841 	}
842 	for (nuidp = TAILQ_FIRST(&slp->ns_uidlruhead); nuidp != 0;
843 	    nuidp = nnuidp) {
844 		nnuidp = TAILQ_NEXT(nuidp, nu_lru);
845 		LIST_REMOVE(nuidp, nu_hash);
846 		TAILQ_REMOVE(&slp->ns_uidlruhead, nuidp, nu_lru);
847 		if (nuidp->nu_flag & NU_NAM)
848 			m_freem(nuidp->nu_nam);
849 		kmem_free(nuidp, sizeof(*nuidp));
850 	}
851 	mutex_enter(&nfsd_lock);
852 	while ((nwp = LIST_FIRST(&slp->ns_tq)) != NULL) {
853 		LIST_REMOVE(nwp, nd_tq);
854 		mutex_exit(&nfsd_lock);
855 		nfsdreq_free(nwp);
856 		mutex_enter(&nfsd_lock);
857 	}
858 	mutex_exit(&nfsd_lock);
859 }
860 
861 /*
862  * Derefence a server socket structure. If it has no more references and
863  * is no longer valid, you can throw it away.
864  */
865 void
866 nfsrv_slpderef(slp)
867 	struct nfssvc_sock *slp;
868 {
869 	uint32_t ref;
870 
871 	mutex_enter(&nfsd_lock);
872 	KASSERT(slp->ns_sref > 0);
873 	ref = --slp->ns_sref;
874 	mutex_exit(&nfsd_lock);
875 	if (ref == 0 && (slp->ns_flags & SLP_VALID) == 0) {
876 		file_t *fp;
877 
878 		mutex_enter(&nfsd_lock);
879 		KASSERT((slp->ns_gflags & SLP_G_DOREC) == 0);
880 		TAILQ_REMOVE(&nfssvc_sockhead, slp, ns_chain);
881 		mutex_exit(&nfsd_lock);
882 
883 		fp = slp->ns_fp;
884 		if (fp != NULL) {
885 			slp->ns_fp = NULL;
886 			KASSERT(fp != NULL);
887 			KASSERT(fp->f_data == slp->ns_so);
888 			KASSERT(fp->f_count > 0);
889 			closef(fp);
890 			slp->ns_so = NULL;
891 		}
892 
893 		nfsrv_sockfree(slp);
894 	}
895 }
896 
897 /*
898  * Initialize the data structures for the server.
899  * Handshake with any new nfsds starting up to avoid any chance of
900  * corruption.
901  */
902 void
903 nfsrv_init(terminating)
904 	int terminating;
905 {
906 	struct nfssvc_sock *slp;
907 
908 	if (!terminating) {
909 		mutex_init(&nfsd_lock, MUTEX_DRIVER, IPL_SOFTNET);
910 		cv_init(&nfsd_initcv, "nfsdinit");
911 	}
912 
913 	mutex_enter(&nfsd_lock);
914 	if (!terminating && (nfssvc_sockhead_flag & SLP_INIT) != 0)
915 		panic("nfsd init");
916 	nfssvc_sockhead_flag |= SLP_INIT;
917 
918 	if (terminating) {
919 		KASSERT(SLIST_EMPTY(&nfsd_idle_head));
920 		KASSERT(TAILQ_EMPTY(&nfsd_head));
921 		while ((slp = TAILQ_FIRST(&nfssvc_sockhead)) != NULL) {
922 			mutex_exit(&nfsd_lock);
923 			KASSERT(slp->ns_sref == 0);
924 			slp->ns_sref++;
925 			nfsrv_zapsock(slp);
926 			nfsrv_slpderef(slp);
927 			mutex_enter(&nfsd_lock);
928 		}
929 		KASSERT(TAILQ_EMPTY(&nfssvc_sockpending));
930 		mutex_exit(&nfsd_lock);
931 		nfsrv_cleancache();	/* And clear out server cache */
932 	} else {
933 		mutex_exit(&nfsd_lock);
934 		nfs_pub.np_valid = 0;
935 	}
936 
937 	TAILQ_INIT(&nfssvc_sockhead);
938 	TAILQ_INIT(&nfssvc_sockpending);
939 
940 	TAILQ_INIT(&nfsd_head);
941 	SLIST_INIT(&nfsd_idle_head);
942 	nfsd_head_flag &= ~NFSD_CHECKSLP;
943 
944 	nfs_udpsock = nfsrv_sockalloc();
945 
946 #ifdef INET6
947 	nfs_udp6sock = nfsrv_sockalloc();
948 #endif
949 
950 #ifdef ISO
951 	nfs_cltpsock = nfsrv_sockalloc();
952 #endif
953 
954 	mutex_enter(&nfsd_lock);
955 	nfssvc_sockhead_flag &= ~SLP_INIT;
956 	cv_broadcast(&nfsd_initcv);
957 	mutex_exit(&nfsd_lock);
958 }
959 
960 /*
961  * Add entries to the server monitor log.
962  */
963 static void
964 nfsd_rt(sotype, nd, cacherep)
965 	int sotype;
966 	struct nfsrv_descript *nd;
967 	int cacherep;
968 {
969 	struct timeval tv;
970 	struct drt *rt;
971 
972 	rt = &nfsdrt.drt[nfsdrt.pos];
973 	if (cacherep == RC_DOIT)
974 		rt->flag = 0;
975 	else if (cacherep == RC_REPLY)
976 		rt->flag = DRT_CACHEREPLY;
977 	else
978 		rt->flag = DRT_CACHEDROP;
979 	if (sotype == SOCK_STREAM)
980 		rt->flag |= DRT_TCP;
981 	if (nd->nd_flag & ND_NFSV3)
982 		rt->flag |= DRT_NFSV3;
983 	rt->proc = nd->nd_procnum;
984 	if (mtod(nd->nd_nam, struct sockaddr *)->sa_family == AF_INET)
985 	    rt->ipadr = mtod(nd->nd_nam, struct sockaddr_in *)->sin_addr.s_addr;
986 	else
987 	    rt->ipadr = INADDR_ANY;
988 	getmicrotime(&tv);
989 	rt->resptime = ((tv.tv_sec - nd->nd_starttime.tv_sec) * 1000000) +
990 		(tv.tv_usec - nd->nd_starttime.tv_usec);
991 	rt->tstamp = tv;
992 	nfsdrt.pos = (nfsdrt.pos + 1) % NFSRTTLOGSIZ;
993 }
994 #endif /* NFSSERVER */
995 
996 #ifdef NFS
997 
998 int nfs_defect = 0;
999 /*
1000  * Asynchronous I/O threads for client nfs.
1001  * They do read-ahead and write-behind operations on the block I/O cache.
1002  * Never returns unless it fails or gets killed.
1003  */
1004 
1005 static void
1006 nfssvc_iod(void *arg)
1007 {
1008 	struct buf *bp;
1009 	struct nfs_iod *myiod;
1010 	struct nfsmount *nmp;
1011 
1012 	myiod = kmem_alloc(sizeof(*myiod), KM_SLEEP);
1013 	mutex_init(&myiod->nid_lock, MUTEX_DEFAULT, IPL_NONE);
1014 	cv_init(&myiod->nid_cv, "nfsiod");
1015 	myiod->nid_exiting = false;
1016 	myiod->nid_mount = NULL;
1017 	mutex_enter(&nfs_iodlist_lock);
1018 	LIST_INSERT_HEAD(&nfs_iodlist_all, myiod, nid_all);
1019 	mutex_exit(&nfs_iodlist_lock);
1020 
1021 	for (;;) {
1022 		mutex_enter(&nfs_iodlist_lock);
1023 		LIST_INSERT_HEAD(&nfs_iodlist_idle, myiod, nid_idle);
1024 		mutex_exit(&nfs_iodlist_lock);
1025 
1026 		mutex_enter(&myiod->nid_lock);
1027 		while (/*CONSTCOND*/ true) {
1028 			nmp = myiod->nid_mount;
1029 			if (nmp) {
1030 				myiod->nid_mount = NULL;
1031 				break;
1032 			}
1033 			if (__predict_false(myiod->nid_exiting)) {
1034 				/*
1035 				 * drop nid_lock to preserve locking order.
1036 				 */
1037 				mutex_exit(&myiod->nid_lock);
1038 				mutex_enter(&nfs_iodlist_lock);
1039 				mutex_enter(&myiod->nid_lock);
1040 				/*
1041 				 * recheck nid_mount because nfs_asyncio can
1042 				 * pick us in the meantime as we are still on
1043 				 * nfs_iodlist_lock.
1044 				 */
1045 				if (myiod->nid_mount != NULL) {
1046 					mutex_exit(&nfs_iodlist_lock);
1047 					continue;
1048 				}
1049 				LIST_REMOVE(myiod, nid_idle);
1050 				mutex_exit(&nfs_iodlist_lock);
1051 				goto quit;
1052 			}
1053 			cv_wait(&myiod->nid_cv, &myiod->nid_lock);
1054 		}
1055 		mutex_exit(&myiod->nid_lock);
1056 
1057 		mutex_enter(&nmp->nm_lock);
1058 		while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
1059 			/* Take one off the front of the list */
1060 			TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist);
1061 			nmp->nm_bufqlen--;
1062 			if (nmp->nm_bufqlen < 2 * nmp->nm_bufqiods) {
1063 				cv_broadcast(&nmp->nm_aiocv);
1064 			}
1065 			mutex_exit(&nmp->nm_lock);
1066 			KERNEL_LOCK(1, curlwp);
1067 			(void)nfs_doio(bp);
1068 			KERNEL_UNLOCK_LAST(curlwp);
1069 			mutex_enter(&nmp->nm_lock);
1070 			/*
1071 			 * If there are more than one iod on this mount,
1072 			 * then defect so that the iods can be shared out
1073 			 * fairly between the mounts
1074 			 */
1075 			if (nfs_defect && nmp->nm_bufqiods > 1) {
1076 				break;
1077 			}
1078 		}
1079 		KASSERT(nmp->nm_bufqiods > 0);
1080 		nmp->nm_bufqiods--;
1081 		mutex_exit(&nmp->nm_lock);
1082 	}
1083 quit:
1084 	KASSERT(myiod->nid_mount == NULL);
1085 	mutex_exit(&myiod->nid_lock);
1086 
1087 	cv_destroy(&myiod->nid_cv);
1088 	mutex_destroy(&myiod->nid_lock);
1089 	kmem_free(myiod, sizeof(*myiod));
1090 
1091 	kthread_exit(0);
1092 }
1093 
1094 void
1095 nfs_iodinit()
1096 {
1097 
1098 	mutex_init(&nfs_iodlist_lock, MUTEX_DEFAULT, IPL_NONE);
1099 	LIST_INIT(&nfs_iodlist_all);
1100 	LIST_INIT(&nfs_iodlist_idle);
1101 }
1102 
1103 int
1104 nfs_set_niothreads(int newval)
1105 {
1106 	struct nfs_iod *nid;
1107 	int error = 0;
1108         int hold_count;
1109 
1110 	KERNEL_UNLOCK_ALL(curlwp, &hold_count);
1111 
1112 	mutex_enter(&nfs_iodlist_lock);
1113 	/* clamp to sane range */
1114 	nfs_niothreads = max(0, min(newval, NFS_MAXASYNCDAEMON));
1115 
1116 	while (nfs_numasync != nfs_niothreads && error == 0) {
1117 		while (nfs_numasync < nfs_niothreads) {
1118 
1119 			/*
1120 			 * kthread_create can wait for pagedaemon and
1121 			 * pagedaemon can wait for nfsiod which needs to acquire
1122 			 * nfs_iodlist_lock.
1123 			 */
1124 
1125 			mutex_exit(&nfs_iodlist_lock);
1126 			error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL,
1127 			    nfssvc_iod, NULL, NULL, "nfsio");
1128 			mutex_enter(&nfs_iodlist_lock);
1129 			if (error) {
1130 				/* give up */
1131 				nfs_niothreads = nfs_numasync;
1132 				break;
1133 			}
1134 			nfs_numasync++;
1135 		}
1136 		while (nfs_numasync > nfs_niothreads) {
1137 			nid = LIST_FIRST(&nfs_iodlist_all);
1138 			if (nid == NULL) {
1139 				/* iod has not started yet. */
1140 				kpause("nfsiorm", false, hz, &nfs_iodlist_lock);
1141 				continue;
1142 			}
1143 			LIST_REMOVE(nid, nid_all);
1144 			mutex_enter(&nid->nid_lock);
1145 			KASSERT(!nid->nid_exiting);
1146 			nid->nid_exiting = true;
1147 			cv_signal(&nid->nid_cv);
1148 			mutex_exit(&nid->nid_lock);
1149 			nfs_numasync--;
1150 		}
1151 	}
1152 	mutex_exit(&nfs_iodlist_lock);
1153 
1154 	KERNEL_LOCK(hold_count, curlwp);
1155 	return error;
1156 }
1157 
1158 /*
1159  * Get an authorization string for the uid by having the mount_nfs sitting
1160  * on this mount point porpous out of the kernel and do it.
1161  */
1162 int
1163 nfs_getauth(nmp, rep, cred, auth_str, auth_len, verf_str, verf_len, key)
1164 	struct nfsmount *nmp;
1165 	struct nfsreq *rep;
1166 	kauth_cred_t cred;
1167 	char **auth_str;
1168 	int *auth_len;
1169 	char *verf_str;
1170 	int *verf_len;
1171 	NFSKERBKEY_T key;		/* return session key */
1172 {
1173 	int error = 0;
1174 
1175 	while ((nmp->nm_iflag & NFSMNT_WAITAUTH) == 0) {
1176 		nmp->nm_iflag |= NFSMNT_WANTAUTH;
1177 		(void) tsleep((void *)&nmp->nm_authtype, PSOCK,
1178 			"nfsauth1", 2 * hz);
1179 		error = nfs_sigintr(nmp, rep, rep->r_lwp);
1180 		if (error) {
1181 			nmp->nm_iflag &= ~NFSMNT_WANTAUTH;
1182 			return (error);
1183 		}
1184 	}
1185 	nmp->nm_iflag &= ~(NFSMNT_WAITAUTH | NFSMNT_WANTAUTH);
1186 	nmp->nm_authstr = *auth_str = (char *)malloc(RPCAUTH_MAXSIZ, M_TEMP, M_WAITOK);
1187 	nmp->nm_authlen = RPCAUTH_MAXSIZ;
1188 	nmp->nm_verfstr = verf_str;
1189 	nmp->nm_verflen = *verf_len;
1190 	nmp->nm_authuid = kauth_cred_geteuid(cred);
1191 	wakeup((void *)&nmp->nm_authstr);
1192 
1193 	/*
1194 	 * And wait for mount_nfs to do its stuff.
1195 	 */
1196 	while ((nmp->nm_iflag & NFSMNT_HASAUTH) == 0 && error == 0) {
1197 		(void) tsleep((void *)&nmp->nm_authlen, PSOCK,
1198 			"nfsauth2", 2 * hz);
1199 		error = nfs_sigintr(nmp, rep, rep->r_lwp);
1200 	}
1201 	if (nmp->nm_iflag & NFSMNT_AUTHERR) {
1202 		nmp->nm_iflag &= ~NFSMNT_AUTHERR;
1203 		error = EAUTH;
1204 	}
1205 	if (error)
1206 		free((void *)*auth_str, M_TEMP);
1207 	else {
1208 		*auth_len = nmp->nm_authlen;
1209 		*verf_len = nmp->nm_verflen;
1210 		memcpy(key, nmp->nm_key, sizeof (NFSKERBKEY_T));
1211 	}
1212 	nmp->nm_iflag &= ~NFSMNT_HASAUTH;
1213 	nmp->nm_iflag |= NFSMNT_WAITAUTH;
1214 	if (nmp->nm_iflag & NFSMNT_WANTAUTH) {
1215 		nmp->nm_iflag &= ~NFSMNT_WANTAUTH;
1216 		wakeup((void *)&nmp->nm_authtype);
1217 	}
1218 	return (error);
1219 }
1220 
1221 /*
1222  * Get a nickname authenticator and verifier.
1223  */
1224 int
1225 nfs_getnickauth(struct nfsmount *nmp, kauth_cred_t cred, char **auth_str,
1226     int *auth_len, char *verf_str, int verf_len)
1227 {
1228 	struct timeval ktvin, ktvout, tv;
1229 	struct nfsuid *nuidp;
1230 	u_int32_t *nickp, *verfp;
1231 
1232 	memset(&ktvout, 0, sizeof ktvout);	/* XXX gcc */
1233 
1234 #ifdef DIAGNOSTIC
1235 	if (verf_len < (4 * NFSX_UNSIGNED))
1236 		panic("nfs_getnickauth verf too small");
1237 #endif
1238 	LIST_FOREACH(nuidp, NMUIDHASH(nmp, kauth_cred_geteuid(cred)), nu_hash) {
1239 		if (kauth_cred_geteuid(nuidp->nu_cr) == kauth_cred_geteuid(cred))
1240 			break;
1241 	}
1242 	if (!nuidp || nuidp->nu_expire < time_second)
1243 		return (EACCES);
1244 
1245 	/*
1246 	 * Move to the end of the lru list (end of lru == most recently used).
1247 	 */
1248 	TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp, nu_lru);
1249 	TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp, nu_lru);
1250 
1251 	nickp = (u_int32_t *)malloc(2 * NFSX_UNSIGNED, M_TEMP, M_WAITOK);
1252 	*nickp++ = txdr_unsigned(RPCAKN_NICKNAME);
1253 	*nickp = txdr_unsigned(nuidp->nu_nickname);
1254 	*auth_str = (char *)nickp;
1255 	*auth_len = 2 * NFSX_UNSIGNED;
1256 
1257 	/*
1258 	 * Now we must encrypt the verifier and package it up.
1259 	 */
1260 	verfp = (u_int32_t *)verf_str;
1261 	*verfp++ = txdr_unsigned(RPCAKN_NICKNAME);
1262 	getmicrotime(&tv);
1263 	if (tv.tv_sec > nuidp->nu_timestamp.tv_sec ||
1264 	    (tv.tv_sec == nuidp->nu_timestamp.tv_sec &&
1265 	     tv.tv_usec > nuidp->nu_timestamp.tv_usec))
1266 		nuidp->nu_timestamp = tv;
1267 	else
1268 		nuidp->nu_timestamp.tv_usec++;
1269 	ktvin.tv_sec = txdr_unsigned(nuidp->nu_timestamp.tv_sec);
1270 	ktvin.tv_usec = txdr_unsigned(nuidp->nu_timestamp.tv_usec);
1271 
1272 	/*
1273 	 * Now encrypt the timestamp verifier in ecb mode using the session
1274 	 * key.
1275 	 */
1276 #ifdef NFSKERB
1277 	XXX
1278 #endif
1279 
1280 	*verfp++ = ktvout.tv_sec;
1281 	*verfp++ = ktvout.tv_usec;
1282 	*verfp = 0;
1283 	return (0);
1284 }
1285 
1286 /*
1287  * Save the current nickname in a hash list entry on the mount point.
1288  */
1289 int
1290 nfs_savenickauth(nmp, cred, len, key, mdp, dposp, mrep)
1291 	struct nfsmount *nmp;
1292 	kauth_cred_t cred;
1293 	int len;
1294 	NFSKERBKEY_T key;
1295 	struct mbuf **mdp;
1296 	char **dposp;
1297 	struct mbuf *mrep;
1298 {
1299 	struct nfsuid *nuidp;
1300 	u_int32_t *tl;
1301 	int32_t t1;
1302 	struct mbuf *md = *mdp;
1303 	struct timeval ktvin, ktvout;
1304 	u_int32_t nick;
1305 	char *dpos = *dposp, *cp2;
1306 	int deltasec, error = 0;
1307 
1308 	memset(&ktvout, 0, sizeof ktvout);	 /* XXX gcc */
1309 
1310 	if (len == (3 * NFSX_UNSIGNED)) {
1311 		nfsm_dissect(tl, u_int32_t *, 3 * NFSX_UNSIGNED);
1312 		ktvin.tv_sec = *tl++;
1313 		ktvin.tv_usec = *tl++;
1314 		nick = fxdr_unsigned(u_int32_t, *tl);
1315 
1316 		/*
1317 		 * Decrypt the timestamp in ecb mode.
1318 		 */
1319 #ifdef NFSKERB
1320 		XXX
1321 #endif
1322 		ktvout.tv_sec = fxdr_unsigned(long, ktvout.tv_sec);
1323 		ktvout.tv_usec = fxdr_unsigned(long, ktvout.tv_usec);
1324 		deltasec = time_second - ktvout.tv_sec;
1325 		if (deltasec < 0)
1326 			deltasec = -deltasec;
1327 		/*
1328 		 * If ok, add it to the hash list for the mount point.
1329 		 */
1330 		if (deltasec <= NFS_KERBCLOCKSKEW) {
1331 			if (nmp->nm_numuids < nuidhash_max) {
1332 				nmp->nm_numuids++;
1333 				nuidp = kmem_alloc(sizeof(*nuidp), KM_SLEEP);
1334 			} else {
1335 				nuidp = TAILQ_FIRST(&nmp->nm_uidlruhead);
1336 				LIST_REMOVE(nuidp, nu_hash);
1337 				TAILQ_REMOVE(&nmp->nm_uidlruhead, nuidp,
1338 					nu_lru);
1339 			}
1340 			nuidp->nu_flag = 0;
1341 			kauth_cred_seteuid(nuidp->nu_cr, kauth_cred_geteuid(cred));
1342 			nuidp->nu_expire = time_second + NFS_KERBTTL;
1343 			nuidp->nu_timestamp = ktvout;
1344 			nuidp->nu_nickname = nick;
1345 			memcpy(nuidp->nu_key, key, sizeof (NFSKERBKEY_T));
1346 			TAILQ_INSERT_TAIL(&nmp->nm_uidlruhead, nuidp,
1347 				nu_lru);
1348 			LIST_INSERT_HEAD(NMUIDHASH(nmp, kauth_cred_geteuid(cred)),
1349 				nuidp, nu_hash);
1350 		}
1351 	} else
1352 		nfsm_adv(nfsm_rndup(len));
1353 nfsmout:
1354 	*mdp = md;
1355 	*dposp = dpos;
1356 	return (error);
1357 }
1358 #endif /* NFS */
1359