xref: /csrg-svn/sys/nfs/nfs_subs.c (revision 39494)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Rick Macklem at The University of Guelph.
7  *
8  * Redistribution and use in source and binary forms are permitted
9  * provided that the above copyright notice and this paragraph are
10  * duplicated in all such forms and that any documentation,
11  * advertising materials, and other materials related to such
12  * distribution and use acknowledge that the software was developed
13  * by the University of California, Berkeley.  The name of the
14  * University may not be used to endorse or promote products derived
15  * from this software without specific prior written permission.
16  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19  *
20  *	@(#)nfs_subs.c	7.12 (Berkeley) 11/03/89
21  */
22 
23 /*
24  * These functions support the macros and help fiddle mbuf chains for
25  * the nfs op functions. They do things like create the rpc header and
26  * copy data between mbuf chains and uio lists.
27  */
28 #include "strings.h"
29 #include "param.h"
30 #include "systm.h"
31 #include "user.h"
32 #include "proc.h"
33 #include "mount.h"
34 #include "../ufs/dir.h"
35 #include "time.h"
36 #include "errno.h"
37 #include "kernel.h"
38 #include "malloc.h"
39 #include "mbuf.h"
40 #include "file.h"
41 #include "vnode.h"
42 #include "uio.h"
43 #include "namei.h"
44 #include "ucred.h"
45 #include "map.h"
46 #include "rpcv2.h"
47 #include "nfsv2.h"
48 #include "nfsnode.h"
49 #include "nfs.h"
50 #include "nfsiom.h"
51 #include "xdr_subs.h"
52 #include "nfsm_subs.h"
53 
54 #define TRUE	1
55 #define	FALSE	0
56 
57 /*
58  * Data items converted to xdr at startup, since they are constant
59  * This is kinda hokey, but may save a little time doing byte swaps
60  */
61 u_long nfs_procids[NFS_NPROCS];
62 u_long nfs_xdrneg1;
63 u_long rpc_call, rpc_vers, rpc_reply, rpc_msgdenied,
64 	rpc_mismatch, rpc_auth_unix, rpc_msgaccepted;
65 u_long nfs_vers, nfs_prog, nfs_true, nfs_false;
66 /* And other global data */
67 static u_long *rpc_uidp = (u_long *)0;
68 static u_long nfs_xid = 1;
69 static char *rpc_unixauth;
70 extern long hostid;
71 extern enum vtype v_type[NFLNK+1];
72 extern struct proc *nfs_iodwant[MAX_ASYNCDAEMON];
73 extern struct map nfsmap[NFS_MSIZ];
74 
75 /* Function ret types */
76 static char *nfs_unixauth();
77 
78 /*
79  * Maximum number of groups passed through to NFS server.
80  * For release 3.X systems, the maximum value is 8.
81  * For release 4.X systems, the maximum value is 10.
82  */
83 int numgrps = 8;
84 
85 /*
86  * Create the header for an rpc request packet
87  * The function nfs_unixauth() creates a unix style authorization string
88  * and returns a ptr to it.
89  * The hsiz is the size of the rest of the nfs request header.
90  * (just used to decide if a cluster is a good idea)
91  * nb: Note that the prog, vers and procid args are already in xdr byte order
92  */
93 struct mbuf *nfsm_reqh(prog, vers, procid, cred, hsiz, bpos, mb, retxid)
94 	u_long prog;
95 	u_long vers;
96 	u_long procid;
97 	struct ucred *cred;
98 	int hsiz;
99 	caddr_t *bpos;
100 	struct mbuf **mb;
101 	u_long *retxid;
102 {
103 	register struct mbuf *mreq, *m;
104 	register u_long *p;
105 	struct mbuf *m1;
106 	char *ap;
107 	int asiz, siz;
108 
109 	NFSMGETHDR(mreq);
110 	asiz = (((cred->cr_ngroups > numgrps) ? numgrps : cred->cr_ngroups)<<2);
111 #ifdef FILLINHOST
112 	asiz += nfsm_rndup(hostnamelen)+(9*NFSX_UNSIGNED);
113 #else
114 	asiz += 9*NFSX_UNSIGNED;
115 #endif
116 
117 	/* If we need a lot, alloc a cluster ?? */
118 	if ((asiz+hsiz+RPC_SIZ) > MHLEN)
119 		NFSMCLGET(mreq, M_WAIT);
120 	mreq->m_len = NFSMSIZ(mreq);
121 	siz = mreq->m_len;
122 	m1 = mreq;
123 	/*
124 	 * Alloc enough mbufs
125 	 * We do it now to avoid all sleeps after the call to nfs_unixauth()
126 	 */
127 	while ((asiz+RPC_SIZ) > siz) {
128 		MGET(m, M_WAIT, MT_DATA);
129 		m1->m_next = m;
130 		m->m_len = MLEN;
131 		siz += MLEN;
132 		m1 = m;
133 	}
134 	p = mtod(mreq, u_long *);
135 	*p++ = *retxid = txdr_unsigned(++nfs_xid);
136 	*p++ = rpc_call;
137 	*p++ = rpc_vers;
138 	*p++ = prog;
139 	*p++ = vers;
140 	*p++ = procid;
141 
142 	/* Now we can call nfs_unixauth() and copy it in */
143 	ap = nfs_unixauth(cred);
144 	m = mreq;
145 	siz = m->m_len-RPC_SIZ;
146 	if (asiz <= siz) {
147 		bcopy(ap, (caddr_t)p, asiz);
148 		m->m_len = asiz+RPC_SIZ;
149 	} else {
150 		bcopy(ap, (caddr_t)p, siz);
151 		ap += siz;
152 		asiz -= siz;
153 		while (asiz > 0) {
154 			siz = (asiz > MLEN) ? MLEN : asiz;
155 			m = m->m_next;
156 			bcopy(ap, mtod(m, caddr_t), siz);
157 			m->m_len = siz;
158 			asiz -= siz;
159 			ap += siz;
160 		}
161 	}
162 
163 	/* Finally, return values */
164 	*mb = m;
165 	*bpos = mtod(m, caddr_t)+m->m_len;
166 	return (mreq);
167 }
168 
169 /*
170  * copies mbuf chain to the uio scatter/gather list
171  */
172 nfsm_mbuftouio(mrep, uiop, siz, dpos)
173 	struct mbuf **mrep;
174 	struct uio *uiop;
175 	int siz;
176 	caddr_t *dpos;
177 {
178 	register int xfer, left, len;
179 	register struct mbuf *mp;
180 	register char *mbufcp, *uiocp;
181 	long uiosiz, rem;
182 
183 	mp = *mrep;
184 	mbufcp = *dpos;
185 	len = mtod(mp, caddr_t)+mp->m_len-mbufcp;
186 	rem = nfsm_rndup(siz)-siz;
187 	while (siz > 0) {
188 		if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
189 			return(EFBIG);
190 		left = uiop->uio_iov->iov_len;
191 		uiocp = uiop->uio_iov->iov_base;
192 		if (left > siz)
193 			left = siz;
194 		uiosiz = left;
195 		while (left > 0) {
196 			while (len == 0) {
197 				mp = mp->m_next;
198 				if (mp == NULL)
199 					return (EBADRPC);
200 				mbufcp = mtod(mp, caddr_t);
201 				len = mp->m_len;
202 			}
203 			xfer = (left > len) ? len : left;
204 #ifdef notdef
205 			/* Not Yet.. */
206 			if (uiop->uio_iov->iov_op != NULL)
207 				(*(uiop->uio_iov->iov_op))
208 				(mbufcp, uiocp, xfer);
209 			else
210 #endif
211 			if (uiop->uio_segflg == UIO_SYSSPACE)
212 				bcopy(mbufcp, uiocp, xfer);
213 			else
214 				copyout(mbufcp, uiocp, xfer);
215 			left -= xfer;
216 			len -= xfer;
217 			mbufcp += xfer;
218 			uiocp += xfer;
219 			uiop->uio_resid -= xfer;
220 		}
221 		if (uiop->uio_iov->iov_len <= siz) {
222 			uiop->uio_iovcnt--;
223 			uiop->uio_iov++;
224 		} else {
225 			uiop->uio_iov->iov_base += uiosiz;
226 			uiop->uio_iov->iov_len -= uiosiz;
227 		}
228 		siz -= uiosiz;
229 	}
230 	if (rem > 0)
231 		mbufcp += rem;
232 	*dpos = mbufcp;
233 	*mrep = mp;
234 	return(0);
235 }
236 
237 /*
238  * copies a uio scatter/gather list to an mbuf chain...
239  */
240 nfsm_uiotombuf(uiop, mq, siz, bpos)
241 	register struct uio *uiop;
242 	struct mbuf **mq;
243 	int siz;
244 	caddr_t *bpos;
245 {
246 	register struct mbuf *mp;
247 	struct mbuf *mp2;
248 	long xfer, left, uiosiz;
249 	int clflg;
250 	int rem, len;
251 	char *cp, *uiocp;
252 
253 	if (siz > MLEN)		/* or should it >= MCLBYTES ?? */
254 		clflg = 1;
255 	else
256 		clflg = 0;
257 	rem = nfsm_rndup(siz)-siz;
258 	mp2 = *mq;
259 	while (siz > 0) {
260 		if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
261 			return(EINVAL);
262 		left = uiop->uio_iov->iov_len;
263 		uiocp = uiop->uio_iov->iov_base;
264 		if (left > siz)
265 			left = siz;
266 		uiosiz = left;
267 		while (left > 0) {
268 			MGET(mp, M_WAIT, MT_DATA);
269 			if (clflg)
270 				NFSMCLGET(mp, M_WAIT);
271 			mp->m_len = NFSMSIZ(mp);
272 			mp2->m_next = mp;
273 			mp2 = mp;
274 			xfer = (left > mp->m_len) ? mp->m_len : left;
275 #ifdef notdef
276 			/* Not Yet.. */
277 			if (uiop->uio_iov->iov_op != NULL)
278 				(*(uiop->uio_iov->iov_op))
279 				(uiocp, mtod(mp, caddr_t), xfer);
280 			else
281 #endif
282 			if (uiop->uio_segflg == UIO_SYSSPACE)
283 				bcopy(uiocp, mtod(mp, caddr_t), xfer);
284 			else
285 				copyin(uiocp, mtod(mp, caddr_t), xfer);
286 			len = mp->m_len;
287 			mp->m_len = xfer;
288 			left -= xfer;
289 			uiocp += xfer;
290 			uiop->uio_resid -= xfer;
291 		}
292 		if (uiop->uio_iov->iov_len <= siz) {
293 			uiop->uio_iovcnt--;
294 			uiop->uio_iov++;
295 		} else {
296 			uiop->uio_iov->iov_base += uiosiz;
297 			uiop->uio_iov->iov_len -= uiosiz;
298 		}
299 		siz -= uiosiz;
300 	}
301 	if (rem > 0) {
302 		if (rem > (len-mp->m_len)) {
303 			MGET(mp, M_WAIT, MT_DATA);
304 			mp->m_len = 0;
305 			mp2->m_next = mp;
306 		}
307 		cp = mtod(mp, caddr_t)+mp->m_len;
308 		for (left = 0; left < rem; left++)
309 			*cp++ = '\0';
310 		mp->m_len += rem;
311 		*bpos = cp;
312 	} else
313 		*bpos = mtod(mp, caddr_t)+mp->m_len;
314 	*mq = mp;
315 	return(0);
316 }
317 
318 /*
319  * Help break down an mbuf chain by setting the first siz bytes contiguous
320  * pointed to by returned val.
321  * If Updateflg == True we can overwrite the first part of the mbuf data
322  * This is used by the macros nfsm_disect and nfsm_disecton for tough
323  * cases. (The macros use the vars. dpos and dpos2)
324  */
325 nfsm_disct(mdp, dposp, siz, left, updateflg, cp2)
326 	struct mbuf **mdp;
327 	caddr_t *dposp;
328 	int siz;
329 	int left;
330 	int updateflg;
331 	caddr_t *cp2;
332 {
333 	register struct mbuf *mp, *mp2;
334 	register int siz2, xfer;
335 	register caddr_t p;
336 
337 	mp = *mdp;
338 	while (left == 0) {
339 		*mdp = mp = mp->m_next;
340 		if (mp == NULL)
341 			return(EBADRPC);
342 		left = mp->m_len;
343 		*dposp = mtod(mp, caddr_t);
344 	}
345 	if (left >= siz) {
346 		*cp2 = *dposp;
347 		*dposp += siz;
348 		return(0);
349 	} else if (mp->m_next == NULL) {
350 		return(EBADRPC);
351 	} else if (siz > MCLBYTES) {
352 		panic("nfs S too big");
353 	} else {
354 		/* Iff update, you can overwrite, else must alloc new mbuf */
355 		if (updateflg) {
356 			NFSMINOFF(mp);
357 		} else {
358 			MGET(mp2, M_WAIT, MT_DATA);
359 			mp2->m_next = mp->m_next;
360 			mp->m_next = mp2;
361 			mp->m_len -= left;
362 			mp = mp2;
363 		}
364 		/* Alloc cluster iff we need it */
365 		if (!M_HASCL(mp) && siz > NFSMSIZ(mp)) {
366 			NFSMCLGET(mp, M_WAIT);
367 			if (!M_HASCL(mp))
368 				return(ENOBUFS);
369 		}
370 		*cp2 = p = mtod(mp, caddr_t);
371 		bcopy(*dposp, p, left);		/* Copy what was left */
372 		siz2 = siz-left;
373 		p += left;
374 		mp2 = mp->m_next;
375 		/* Loop arround copying up the siz2 bytes */
376 		while (siz2 > 0) {
377 			if (mp2 == NULL)
378 				return (EBADRPC);
379 			xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
380 			bcopy(mtod(mp2, caddr_t), p, xfer);
381 			NFSMADV(mp2, xfer);
382 			mp2->m_len -= xfer;
383 			siz2 -= xfer;
384 			if (siz2 > 0)
385 				mp2 = mp2->m_next;
386 		}
387 		mp->m_len = siz;
388 		*mdp = mp2;
389 		*dposp = mtod(mp2, caddr_t);
390 	}
391 	return (0);
392 }
393 
394 /*
395  * Advance the position in the mbuf chain with/without freeing mbufs
396  */
397 nfs_adv(mdp, dposp, offs, left)
398 	struct mbuf **mdp;
399 	caddr_t *dposp;
400 	int offs;
401 	int left;
402 {
403 	register struct mbuf *m;
404 	register int s;
405 
406 	m = *mdp;
407 	s = left;
408 	while (s < offs) {
409 		offs -= s;
410 		m = m->m_next;
411 		if (m == NULL)
412 			return(EBADRPC);
413 		s = m->m_len;
414 	}
415 	*mdp = m;
416 	*dposp = mtod(m, caddr_t)+offs;
417 	return(0);
418 }
419 
420 /*
421  * Copy a string into mbufs for the hard cases...
422  */
423 nfsm_strtmbuf(mb, bpos, cp, siz)
424 	struct mbuf **mb;
425 	char **bpos;
426 	char *cp;
427 	long siz;
428 {
429 	register struct mbuf *m1, *m2;
430 	long left, xfer, len, tlen;
431 	u_long *p;
432 	int putsize;
433 
434 	putsize = 1;
435 	m2 = *mb;
436 	left = NFSMSIZ(m2)-m2->m_len;
437 	if (left > 0) {
438 		p = ((u_long *)(*bpos));
439 		*p++ = txdr_unsigned(siz);
440 		putsize = 0;
441 		left -= NFSX_UNSIGNED;
442 		m2->m_len += NFSX_UNSIGNED;
443 		if (left > 0) {
444 			bcopy(cp, (caddr_t) p, left);
445 			siz -= left;
446 			cp += left;
447 			m2->m_len += left;
448 			left = 0;
449 		}
450 	}
451 	/* Loop arround adding mbufs */
452 	while (siz > 0) {
453 		MGET(m1, M_WAIT, MT_DATA);
454 		if (siz > MLEN)
455 			NFSMCLGET(m1, M_WAIT);
456 		m1->m_len = NFSMSIZ(m1);
457 		m2->m_next = m1;
458 		m2 = m1;
459 		p = mtod(m1, u_long *);
460 		tlen = 0;
461 		if (putsize) {
462 			*p++ = txdr_unsigned(siz);
463 			m1->m_len -= NFSX_UNSIGNED;
464 			tlen = NFSX_UNSIGNED;
465 			putsize = 0;
466 		}
467 		if (siz < m1->m_len) {
468 			len = nfsm_rndup(siz);
469 			xfer = siz;
470 			if (xfer < len)
471 				*(p+(xfer>>2)) = 0;
472 		} else {
473 			xfer = len = m1->m_len;
474 		}
475 		bcopy(cp, (caddr_t) p, xfer);
476 		m1->m_len = len+tlen;
477 		siz -= xfer;
478 		cp += xfer;
479 	}
480 	*mb = m1;
481 	*bpos = mtod(m1, caddr_t)+m1->m_len;
482 	return(0);
483 }
484 
485 /*
486  * Called once to initialize data structures...
487  */
488 nfs_init()
489 {
490 	register int i;
491 
492 	rpc_vers = txdr_unsigned(RPC_VER2);
493 	rpc_call = txdr_unsigned(RPC_CALL);
494 	rpc_reply = txdr_unsigned(RPC_REPLY);
495 	rpc_msgdenied = txdr_unsigned(RPC_MSGDENIED);
496 	rpc_msgaccepted = txdr_unsigned(RPC_MSGACCEPTED);
497 	rpc_mismatch = txdr_unsigned(RPC_MISMATCH);
498 	rpc_auth_unix = txdr_unsigned(RPCAUTH_UNIX);
499 	nfs_vers = txdr_unsigned(NFS_VER2);
500 	nfs_prog = txdr_unsigned(NFS_PROG);
501 	nfs_true = txdr_unsigned(TRUE);
502 	nfs_false = txdr_unsigned(FALSE);
503 	/* Loop thru nfs procids */
504 	for (i = 0; i < NFS_NPROCS; i++)
505 		nfs_procids[i] = txdr_unsigned(i);
506 	/* Ensure async daemons disabled */
507 	for (i = 0; i < MAX_ASYNCDAEMON; i++)
508 		nfs_iodwant[i] = (struct proc *)0;
509 	v_type[0] = VNON;
510 	v_type[1] = VREG;
511 	v_type[2] = VDIR;
512 	v_type[3] = VBLK;
513 	v_type[4] = VCHR;
514 	v_type[5] = VLNK;
515 	nfs_xdrneg1 = txdr_unsigned(-1);
516 	nfs_nhinit();			/* Init the nfsnode table */
517 	rminit(nfsmap, (long)NFS_MAPREG, (long)1, "nfs mapreg", NFS_MSIZ);
518 	/* And start timer */
519 	nfs_timer();
520 }
521 
522 /*
523  * Fill in the rest of the rpc_unixauth and return it
524  */
525 static char *nfs_unixauth(cr)
526 	register struct ucred *cr;
527 {
528 	register u_long *p;
529 	register int i;
530 	int ngr;
531 
532 	/* Maybe someday there should be a cache of AUTH_SHORT's */
533 	if ((p = rpc_uidp) == NULL) {
534 #ifdef FILLINHOST
535 		i = nfsm_rndup(hostnamelen)+(19*NFSX_UNSIGNED);
536 #else
537 		i = 19*NFSX_UNSIGNED;
538 #endif
539 		MALLOC(p, u_long *, i, M_TEMP, M_WAITOK);
540 		bzero((caddr_t)p, i);
541 		rpc_unixauth = (caddr_t)p;
542 		*p++ = txdr_unsigned(RPCAUTH_UNIX);
543 		p++;	/* Fill in size later */
544 		*p++ = hostid;
545 #ifdef FILLINHOST
546 		*p++ = txdr_unsigned(hostnamelen);
547 		i = nfsm_rndup(hostnamelen);
548 		bcopy(hostname, (caddr_t)p, hostnamelen);
549 		p += (i>>2);
550 #else
551 		*p++ = 0;
552 #endif
553 		rpc_uidp = p;
554 	}
555 	*p++ = txdr_unsigned(cr->cr_uid);
556 	*p++ = txdr_unsigned(cr->cr_groups[0]);
557 	ngr = (cr->cr_ngroups > numgrps) ? numgrps : cr->cr_ngroups;
558 	*p++ = txdr_unsigned(ngr);
559 	for (i = 0; i < ngr; i++)
560 		*p++ = txdr_unsigned(cr->cr_groups[i]);
561 	/* And add the AUTH_NULL */
562 	*p++ = 0;
563 	*p = 0;
564 	i = (((caddr_t)p)-rpc_unixauth)-12;
565 	p = (u_long *)(rpc_unixauth+4);
566 	*p = txdr_unsigned(i);
567 	return(rpc_unixauth);
568 }
569 
570 /*
571  * Attribute cache routines.
572  * nfs_loadattrcache() - loads or updates the cache contents from attributes
573  *	that are on the mbuf list
574  * nfs_getattrcache() - returns valid attributes if found in cache, returns
575  *	error otherwise
576  */
577 
578 /*
579  * Load the attribute cache (that lives in the nfsnode entry) with
580  * the values on the mbuf list and
581  * Iff vap not NULL
582  *    copy the attributes to *vaper
583  */
584 nfs_loadattrcache(vpp, mdp, dposp, vaper)
585 	struct vnode **vpp;
586 	struct mbuf **mdp;
587 	caddr_t *dposp;
588 	struct vattr *vaper;
589 {
590 	register struct vnode *vp = *vpp;
591 	register struct vattr *vap;
592 	register struct nfsv2_fattr *fp;
593 	extern struct vnodeops spec_nfsv2nodeops;
594 	register struct nfsnode *np;
595 	register long t1;
596 	caddr_t dpos, cp2;
597 	int error = 0;
598 	struct mbuf *md;
599 	enum vtype type;
600 	dev_t rdev;
601 	struct timeval mtime;
602 	struct vnode *nvp;
603 
604 	md = *mdp;
605 	dpos = *dposp;
606 	t1 = (mtod(md, caddr_t)+md->m_len)-dpos;
607 	if (error = nfsm_disct(&md, &dpos, NFSX_FATTR, t1, TRUE, &cp2))
608 		return (error);
609 	fp = (struct nfsv2_fattr *)cp2;
610 	type = nfstov_type(fp->fa_type);
611 	rdev = fxdr_unsigned(dev_t, fp->fa_rdev);
612 	fxdr_time(&fp->fa_mtime, &mtime);
613 	/*
614 	 * If v_type == VNON it is a new node, so fill in the v_type,
615 	 * n_mtime fields. Check to see if it represents a special
616 	 * device, and if so, check for a possible alias. Once the
617 	 * correct vnode has been obtained, fill in the rest of the
618 	 * information.
619 	 */
620 	np = VTONFS(vp);
621 	if (vp->v_type == VNON) {
622 		vp->v_type = type;
623 		if (vp->v_type == VCHR || vp->v_type == VBLK) {
624 			vp->v_rdev = rdev;
625 			vp->v_op = &spec_nfsv2nodeops;
626 			if (nvp = checkalias(vp, vp->v_mount)) {
627 				/*
628 				 * Reinitialize aliased node.
629 				 */
630 				np = VTONFS(nvp);
631 				np->n_vnode = nvp;
632 				np->n_flag = NLOCKED;
633 				bcopy((caddr_t)&VTONFS(vp)->n_fh,
634 					(caddr_t)&np->n_fh, NFSX_FH);
635 				insque(np, nfs_hash(&np->n_fh));
636 				np->n_attrstamp = 0;
637 				np->n_sillyrename = (struct sillyrename *)0;
638 				/*
639 				 * Discard unneeded vnode and update actual one
640 				 */
641 				vput(vp);
642 				*vpp = nvp;
643 			}
644 		}
645 		np->n_mtime = mtime.tv_sec;
646 	}
647 	vap = &np->n_vattr;
648 	vap->va_type = type;
649 	vap->va_mode = nfstov_mode(fp->fa_mode);
650 	vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink);
651 	vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
652 	vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
653 	vap->va_size = fxdr_unsigned(u_long, fp->fa_size);
654 	if ((np->n_flag & NMODIFIED) == 0 || vap->va_size > np->n_size)
655 		np->n_size = vap->va_size;
656 	vap->va_size1 = 0;		/* OR -1 ?? */
657 	vap->va_blocksize = fxdr_unsigned(long, fp->fa_blocksize);
658 	vap->va_rdev = rdev;
659 	vap->va_bytes = fxdr_unsigned(long, fp->fa_blocks) * vap->va_blocksize;
660 	vap->va_bytes1 = 0;
661 	vap->va_fsid = fxdr_unsigned(long, fp->fa_fsid);
662 	vap->va_fileid = fxdr_unsigned(long, fp->fa_fileid);
663 	fxdr_time(&fp->fa_atime, &vap->va_atime);
664 	fxdr_time(&fp->fa_ctime, &vap->va_ctime);
665 	vap->va_mtime = mtime;
666 	vap->va_gen = 0;
667 	vap->va_flags = 0;
668 	np->n_attrstamp = time.tv_sec;
669 	*dposp = dpos;
670 	*mdp = md;
671 	if (vaper != NULL) {
672 		bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap));
673 		if ((np->n_flag & NMODIFIED) && (np->n_size > vap->va_size))
674 			vaper->va_size = np->n_size;
675 	}
676 	return (0);
677 }
678 
679 /*
680  * Check the time stamp
681  * If the cache is valid, copy contents to *vap and return 0
682  * otherwise return an error
683  */
684 nfs_getattrcache(vp, vap)
685 	register struct vnode *vp;
686 	struct vattr *vap;
687 {
688 	register struct nfsnode *np;
689 
690 	np = VTONFS(vp);
691 	if ((time.tv_sec-np->n_attrstamp) < NFS_ATTRTIMEO) {
692 		nfsstats.attrcache_hits++;
693 		bcopy((caddr_t)&np->n_vattr,(caddr_t)vap,sizeof(struct vattr));
694 		if ((np->n_flag & NMODIFIED) == 0)
695 			np->n_size = vap->va_size;
696 		else if (np->n_size > vap->va_size)
697 			vap->va_size = np->n_size;
698 		return (0);
699 	} else {
700 		nfsstats.attrcache_misses++;
701 		return (ENOENT);
702 	}
703 }
704 
705 /*
706  * nfs_namei - a liitle like namei(), but for one element only
707  *	essentially look up file handle, fill in ndp and call VOP_LOOKUP()
708  */
709 nfs_namei(ndp, fhp, len, mdp, dposp)
710 	register struct nameidata *ndp;
711 	fhandle_t *fhp;
712 	int len;
713 	struct mbuf **mdp;
714 	caddr_t *dposp;
715 {
716 	register int i, rem;
717 	register struct mbuf *md;
718 	register char *cp;
719 	struct vnode *dp = (struct vnode *)0;
720 	int flag;
721 	int docache;
722 	int wantparent;
723 	int lockparent;
724 	int error = 0;
725 
726 	ndp->ni_vp = ndp->ni_dvp = (struct vnode *)0;
727 	flag = ndp->ni_nameiop & OPFLAG;
728 	wantparent = ndp->ni_nameiop & (LOCKPARENT | WANTPARENT);
729 	lockparent = ndp->ni_nameiop & LOCKPARENT;
730 	docache = (ndp->ni_nameiop & NOCACHE) ^ NOCACHE;
731 	if (flag == DELETE || wantparent)
732 		docache = 0;
733 
734 	/* Fill in the nameidata and call lookup */
735 	cp = *dposp;
736 	md = *mdp;
737 	rem = mtod(md, caddr_t)+md->m_len-cp;
738 	ndp->ni_hash = 0;
739 	for (i = 0; i < len;) {
740 		if (rem == 0) {
741 			md = md->m_next;
742 			if (md == NULL)
743 				return (EBADRPC);
744 			cp = mtod(md, caddr_t);
745 			rem = md->m_len;
746 		}
747 		if (*cp == '\0' || *cp == '/')
748 			return (EINVAL);
749 		if (*cp & 0200)
750 			if ((*cp&0377) == ('/'|0200) || flag != DELETE)
751 				return (EINVAL);
752 		ndp->ni_dent.d_name[i++] = *cp;
753 		ndp->ni_hash += (unsigned char)*cp * i;
754 		cp++;
755 		rem--;
756 	}
757 	*mdp = md;
758 	len = nfsm_rndup(len)-len;
759 	if (len > 0)
760 		*dposp = cp+len;
761 	else
762 		*dposp = cp;
763 	ndp->ni_namelen = i;
764 	ndp->ni_dent.d_namlen = i;
765 	ndp->ni_dent.d_name[i] = '\0';
766 	ndp->ni_pathlen = 1;
767 	ndp->ni_dirp = ndp->ni_ptr = &ndp->ni_dent.d_name[0];
768 	ndp->ni_next = &ndp->ni_dent.d_name[i];
769 	ndp->ni_loopcnt = 0;	/* Not actually used for now */
770 	ndp->ni_endoff = 0;
771 	if (docache)
772 		ndp->ni_makeentry = 1;
773 	else
774 		ndp->ni_makeentry = 0;
775 	ndp->ni_isdotdot = (i == 2 &&
776 		ndp->ni_dent.d_name[1] == '.' && ndp->ni_dent.d_name[0] == '.');
777 
778 	if (error = nfsrv_fhtovp(fhp, TRUE, &dp, ndp->ni_cred))
779 		return (error);
780 	if (dp->v_type != VDIR) {
781 		vput(dp);
782 		return (ENOTDIR);
783 	}
784 	/*
785 	 * Must set current directory here to avoid confusion in namei()
786 	 * called from rename()
787 	 */
788 	ndp->ni_cdir = dp;
789 	ndp->ni_rdir = (struct vnode *)0;
790 
791 	/*
792 	 * Handle "..":
793 	 * If this vnode is the root of the mounted
794 	 *    file system, then ignore it so can't get out
795 	 */
796 	if (ndp->ni_isdotdot && (dp->v_flag & VROOT)) {
797 		ndp->ni_dvp = dp;
798 		ndp->ni_vp = dp;
799 		VREF(dp);
800 		goto nextname;
801 	}
802 
803 	/*
804 	 * We now have a segment name to search for, and a directory to search.
805 	 */
806 	if (error = VOP_LOOKUP(dp, ndp)) {
807 		if (ndp->ni_vp != NULL)
808 			panic("leaf should be empty");
809 		/*
810 		 * If creating and at end of pathname, then can consider
811 		 * allowing file to be created.
812 		 */
813 		if (ndp->ni_dvp->v_mount->m_flag & (M_RDONLY | M_EXRDONLY))
814 			error = EROFS;
815 		if (flag == LOOKUP || flag == DELETE || error != ENOENT)
816 			goto bad;
817 		/*
818 		 * We return with ni_vp NULL to indicate that the entry
819 		 * doesn't currently exist, leaving a pointer to the
820 		 * (possibly locked) directory inode in ndp->ni_dvp.
821 		 */
822 		return (0);	/* should this be ENOENT? */
823 	}
824 
825 	dp = ndp->ni_vp;
826 
827 nextname:
828 	ndp->ni_ptr = ndp->ni_next;
829 	/*
830 	 * Check for read-only file systems
831 	 */
832 	if (flag == DELETE || flag == RENAME) {
833 		/*
834 		 * Disallow directory write attempts on read-only
835 		 * file systems.
836 		 */
837 		if ((dp->v_mount->m_flag & (M_RDONLY|M_EXRDONLY)) ||
838 		    (wantparent && (ndp->ni_dvp->v_mount->m_flag & (M_RDONLY|M_EXRDONLY)))) {
839 			error = EROFS;
840 			goto bad2;
841 		}
842 	}
843 
844 	if (!wantparent)
845 		vrele(ndp->ni_dvp);
846 
847 	if ((ndp->ni_nameiop & LOCKLEAF) == 0)
848 		VOP_UNLOCK(dp);
849 	return (0);
850 
851 bad2:
852 	if (lockparent)
853 		VOP_UNLOCK(ndp->ni_dvp);
854 	vrele(ndp->ni_dvp);
855 bad:
856 	vput(dp);
857 	ndp->ni_vp = NULL;
858 	return (error);
859 }
860 
861 /*
862  * A fiddled version of m_adj() that ensures null fill to a long
863  * boundary and only trims off the back end
864  */
865 nfsm_adj(mp, len, nul)
866 	struct mbuf *mp;
867 	register int len;
868 	int nul;
869 {
870 	register struct mbuf *m;
871 	register int count, i;
872 	register char *cp;
873 
874 	/*
875 	 * Trim from tail.  Scan the mbuf chain,
876 	 * calculating its length and finding the last mbuf.
877 	 * If the adjustment only affects this mbuf, then just
878 	 * adjust and return.  Otherwise, rescan and truncate
879 	 * after the remaining size.
880 	 */
881 	count = 0;
882 	m = mp;
883 	for (;;) {
884 		count += m->m_len;
885 		if (m->m_next == (struct mbuf *)0)
886 			break;
887 		m = m->m_next;
888 	}
889 	if (m->m_len > len) {
890 		m->m_len -= len;
891 		if (nul > 0) {
892 			cp = mtod(m, caddr_t)+m->m_len-nul;
893 			for (i = 0; i < nul; i++)
894 				*cp++ = '\0';
895 		}
896 		return;
897 	}
898 	count -= len;
899 	if (count < 0)
900 		count = 0;
901 	/*
902 	 * Correct length for chain is "count".
903 	 * Find the mbuf with last data, adjust its length,
904 	 * and toss data from remaining mbufs on chain.
905 	 */
906 	for (m = mp; m; m = m->m_next) {
907 		if (m->m_len >= count) {
908 			m->m_len = count;
909 			if (nul > 0) {
910 				cp = mtod(m, caddr_t)+m->m_len-nul;
911 				for (i = 0; i < nul; i++)
912 					*cp++ = '\0';
913 			}
914 			break;
915 		}
916 		count -= m->m_len;
917 	}
918 	while (m = m->m_next)
919 		m->m_len = 0;
920 }
921 
922 /*
923  * nfsrv_fhtovp() - convert a fh to a vnode ptr (optionally locked)
924  * 	- look up fsid in mount list (if not found ret error)
925  *	- check that it is exported
926  *	- get vp by calling VFS_FHTOVP() macro
927  *	- if not lockflag unlock it with VOP_UNLOCK()
928  *	- if cred->cr_uid == 0 set it to m_exroot
929  */
930 nfsrv_fhtovp(fhp, lockflag, vpp, cred)
931 	fhandle_t *fhp;
932 	int lockflag;
933 	struct vnode **vpp;
934 	struct ucred *cred;
935 {
936 	register struct mount *mp;
937 
938 	if ((mp = getvfs(&fhp->fh_fsid)) == NULL)
939 		return (ESTALE);
940 	if ((mp->m_flag & M_EXPORTED) == 0)
941 		return (EACCES);
942 	if (VFS_FHTOVP(mp, &fhp->fh_fid, vpp))
943 		return (ESTALE);
944 	if (cred->cr_uid == 0)
945 		cred->cr_uid = mp->m_exroot;
946 	if (!lockflag)
947 		VOP_UNLOCK(*vpp);
948 	return (0);
949 }
950