xref: /netbsd-src/sys/kern/vfs_getcwd.c (revision 481fca6e59249d8ffcf24fef7cfbe7b131bfb080)
1 /* $NetBSD: vfs_getcwd.c,v 1.13 2000/03/30 09:27:14 augustss Exp $ */
2 
3 /*-
4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Bill Sommerfeld.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/namei.h>
42 #include <sys/filedesc.h>
43 #include <sys/kernel.h>
44 #include <sys/file.h>
45 #include <sys/stat.h>
46 #include <sys/vnode.h>
47 #include <sys/mount.h>
48 #include <sys/proc.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/dirent.h>
52 #include <ufs/ufs/dir.h>	/* XXX only for DIRBLKSIZ */
53 
54 #include <sys/syscallargs.h>
55 
56 static int
57 getcwd_scandir __P((struct vnode **, struct vnode **,
58     char **, char *, struct proc *));
59 static int
60 getcwd_getcache __P((struct vnode **, struct vnode **,
61     char **, char *));
62 int
63 getcwd_common __P((struct vnode *, struct vnode *,
64 		   char **, char *, int, int, struct proc *));
65 
66 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN+1) + 4)
67 
68 /*
69  * Vnode variable naming conventions in this file:
70  *
71  * rvp: the current root we're aiming towards.
72  * lvp, *lvpp: the "lower" vnode
73  * uvp, *uvpp: the "upper" vnode.
74  *
75  * Since all the vnodes we're dealing with are directories, and the
76  * lookups are going *up* in the filesystem rather than *down*, the
77  * usual "pvp" (parent) or "dvp" (directory) naming conventions are
78  * too confusing.
79  */
80 
81 /*
82  * XXX Will infinite loop in certain cases if a directory read reliably
83  *	returns EINVAL on last block.
84  * XXX is EINVAL the right thing to return if a directory is malformed?
85  */
86 
87 /*
88  * XXX Untested vs. mount -o union; probably does the wrong thing.
89  */
90 
91 /*
92  * Find parent vnode of *lvpp, return in *uvpp
93  *
94  * If we care about the name, scan it looking for name of directory
95  * entry pointing at lvp.
96  *
97  * Place the name in the buffer which starts at bufp, immediately
98  * before *bpp, and move bpp backwards to point at the start of it.
99  *
100  * On entry, *lvpp is a locked vnode reference; on exit, it is vput and NULL'ed
101  * On exit, *uvpp is either NULL or is a locked vnode reference.
102  */
103 static int
104 getcwd_scandir(lvpp, uvpp, bpp, bufp, p)
105 	struct vnode **lvpp;
106 	struct vnode **uvpp;
107 	char **bpp;
108 	char *bufp;
109 	struct proc *p;
110 {
111 	int     error = 0;
112 	int     eofflag;
113 	off_t   off;
114 	int     tries;
115 	struct uio uio;
116 	struct iovec iov;
117 	char   *dirbuf = NULL;
118 	int	dirbuflen;
119 	ino_t   fileno;
120 	struct vattr va;
121 	struct vnode *uvp = NULL;
122 	struct vnode *lvp = *lvpp;
123 	struct componentname cn;
124 	int len, reclen;
125 	tries = 0;
126 
127 	/*
128 	 * If we want the filename, get some info we need while the
129 	 * current directory is still locked.
130 	 */
131 	if (bufp != NULL) {
132 		error = VOP_GETATTR(lvp, &va, p->p_ucred, p);
133 		if (error) {
134 			vput(lvp);
135 			*lvpp = NULL;
136 			*uvpp = NULL;
137 			return error;
138 		}
139 	}
140 
141 	/*
142 	 * Ok, we have to do it the hard way..
143 	 * Next, get parent vnode using lookup of ..
144 	 */
145 	cn.cn_nameiop = LOOKUP;
146 	cn.cn_flags = ISLASTCN | ISDOTDOT | RDONLY;
147 	cn.cn_proc = p;
148 	cn.cn_cred = p->p_ucred;
149 	cn.cn_pnbuf = NULL;
150 	cn.cn_nameptr = "..";
151 	cn.cn_namelen = 2;
152 	cn.cn_hash = 0;
153 	cn.cn_consume = 0;
154 
155 	/*
156 	 * At this point, lvp is locked and will be unlocked by the lookup.
157 	 * On successful return, *uvpp will be locked
158 	 */
159 	error = VOP_LOOKUP(lvp, uvpp, &cn);
160 	if (error) {
161 		vput(lvp);
162 		*lvpp = NULL;
163 		*uvpp = NULL;
164 		return error;
165 	}
166 	uvp = *uvpp;
167 
168 	/* If we don't care about the pathname, we're done */
169 	if (bufp == NULL) {
170 		vrele(lvp);
171 		*lvpp = NULL;
172 		return 0;
173 	}
174 
175 	fileno = va.va_fileid;
176 
177 	dirbuflen = DIRBLKSIZ;
178 	if (dirbuflen < va.va_blocksize)
179 		dirbuflen = va.va_blocksize;
180 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
181 
182 #if 0
183 unionread:
184 #endif
185 	off = 0;
186 	do {
187 		/* call VOP_READDIR of parent */
188 		iov.iov_base = dirbuf;
189 		iov.iov_len = dirbuflen;
190 
191 		uio.uio_iov = &iov;
192 		uio.uio_iovcnt = 1;
193 		uio.uio_offset = off;
194 		uio.uio_resid = dirbuflen;
195 		uio.uio_segflg = UIO_SYSSPACE;
196 		uio.uio_rw = UIO_READ;
197 		uio.uio_procp = p;
198 
199 		eofflag = 0;
200 
201 		error = VOP_READDIR(uvp, &uio, p->p_ucred, &eofflag, 0, 0);
202 
203 		off = uio.uio_offset;
204 
205 		/*
206 		 * Try again if NFS tosses its cookies.
207 		 * XXX this can still loop forever if the directory is busted
208 		 * such that the second or subsequent page of it always
209 		 * returns EINVAL
210 		 */
211 		if ((error == EINVAL) && (tries < 3)) {
212 			off = 0;
213 			tries++;
214 			continue;	/* once more, with feeling */
215 		}
216 
217 		if (!error) {
218 			char   *cpos;
219 			struct dirent *dp;
220 
221 			cpos = dirbuf;
222 			tries = 0;
223 
224 			/* scan directory page looking for matching vnode */
225 			for (len = (dirbuflen - uio.uio_resid); len > 0; len -= reclen) {
226 				dp = (struct dirent *) cpos;
227 				reclen = dp->d_reclen;
228 
229 				/* check for malformed directory.. */
230 				if (reclen < DIRENT_MINSIZE) {
231 					error = EINVAL;
232 					goto out;
233 				}
234 				/*
235 				 * XXX should perhaps do VOP_LOOKUP to
236 				 * check that we got back to the right place,
237 				 * but getting the locking games for that
238 				 * right would be heinous.
239 				 */
240 				if ((dp->d_type != DT_WHT) &&
241 				    (dp->d_fileno == fileno)) {
242 					char *bp = *bpp;
243 					bp -= dp->d_namlen;
244 
245 					if (bp <= bufp) {
246 						error = ERANGE;
247 						goto out;
248 					}
249 					memcpy(bp, dp->d_name, dp->d_namlen);
250 					error = 0;
251 					*bpp = bp;
252 					goto out;
253 				}
254 				cpos += reclen;
255 			}
256 		}
257 	} while (!eofflag);
258 #if 0
259 	/*
260 	 * Deal with mount -o union, which unions only the
261 	 * root directory of the mount.
262 	 */
263 	if ((uvp->v_flag & VROOT) &&
264 	    (uvp->v_mount->mnt_flag & MNT_UNION)) {
265 		struct vnode *tvp = uvp;
266 		uvp = uvp->v_mount->mnt_vnodecovered;
267 		vput(tvp);
268 		VREF(uvp);
269 		*uvpp = uvp;
270 		error = vn_lock(uvp, LK_EXCLUSIVE | LK_RETRY);
271 		if (error != 0) {
272 			vrele(uvp);
273 			*uvpp = uvp = NULL;
274 			goto out;
275 		}
276 		goto unionread;
277 	}
278 #endif
279 	error = ENOENT;
280 
281 out:
282 	vrele(lvp);
283 	*lvpp = NULL;
284 	free(dirbuf, M_TEMP);
285 	return error;
286 }
287 
288 /*
289  * Look in the vnode-to-name reverse cache to see if
290  * we can find things the easy way.
291  *
292  * XXX vget failure path is untested.
293  *
294  * On entry, *lvpp is a locked vnode reference.
295  * On exit, one of the following is the case:
296  *	0) Both *lvpp and *uvpp are NULL and failure is returned.
297  * 	1) *uvpp is NULL, *lvpp remains locked and -1 is returned (cache miss)
298  *      2) *uvpp is a locked vnode reference, *lvpp is vput and NULL'ed
299  *	   and 0 is returned (cache hit)
300  */
301 
302 static int
303 getcwd_getcache(lvpp, uvpp, bpp, bufp)
304 	struct vnode **lvpp, **uvpp;
305 	char **bpp;
306 	char *bufp;
307 {
308 	struct vnode *lvp, *uvp = NULL;
309 	int error;
310 	int vpid;
311 
312 	lvp = *lvpp;
313 
314 	/*
315 	 * This returns 0 on a cache hit, -1 on a clean cache miss,
316 	 * or an errno on other failure.
317 	 */
318 	error = cache_revlookup(lvp, uvpp, bpp, bufp);
319 	if (error) {
320 		if (error != -1) {
321 			vput(lvp);
322 			*lvpp = NULL;
323 			*uvpp = NULL;
324 		}
325 		return error;
326 	}
327 	uvp = *uvpp;
328 	vpid = uvp->v_id;
329 
330 	/*
331 	 * Since we're going up, we have to release the current lock
332 	 * before we take the parent lock.
333 	 */
334 
335 	VOP_UNLOCK(lvp, 0);
336 
337 	error = vget(uvp, LK_EXCLUSIVE | LK_RETRY);
338 	if (error != 0)
339 		*uvpp = NULL;
340 	/*
341 	 * Verify that vget succeeded, and check that vnode capability
342 	 * didn't change while we were waiting for the lock.
343 	 */
344 	if (error || (vpid != uvp->v_id)) {
345 		/*
346 		 * Oops, we missed.  If the vget failed, or the
347 		 * capability changed, try to get our lock back; if
348 		 * that works, tell caller to try things the hard way,
349 		 * otherwise give up.
350 		 */
351 		if (!error) vput(uvp);
352 		*uvpp = NULL;
353 
354 		error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
355 
356 		if (!error)
357 			return -1;
358 	}
359 	vrele(lvp);
360 	*lvpp = NULL;
361 
362 	return error;
363 }
364 
365 /*
366  * common routine shared by sys___getcwd() and vn_isunder()
367  */
368 
369 #define GETCWD_CHECK_ACCESS 0x0001
370 
371 int
372 getcwd_common (lvp, rvp, bpp, bufp, limit, flags, p)
373 	struct vnode *lvp;
374 	struct vnode *rvp;
375 	char **bpp;
376 	char *bufp;
377 	int limit;
378 	int flags;
379 	struct proc *p;
380 {
381 	struct cwdinfo *cwdi = p->p_cwdi;
382 	struct vnode *uvp = NULL;
383 	char *bp = NULL;
384 	int error;
385 	int perms = VEXEC;
386 
387 	if (rvp == NULL) {
388 		rvp = cwdi->cwdi_rdir;
389 		if (rvp == NULL)
390 			rvp = rootvnode;
391 	}
392 
393 	VREF(rvp);
394 	VREF(lvp);
395 
396 	/*
397 	 * Error handling invariant:
398 	 * Before a `goto out':
399 	 *	lvp is either NULL, or locked and held.
400 	 *	uvp is either NULL, or locked and held.
401 	 */
402 
403 	error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
404 	if (error) {
405 		vrele(lvp);
406 		lvp = NULL;
407 		goto out;
408 	}
409 	if (bufp)
410 		bp = *bpp;
411 	/*
412 	 * this loop will terminate when one of the following happens:
413 	 *	- we hit the root
414 	 *	- getdirentries or lookup fails
415 	 *	- we run out of space in the buffer.
416 	 */
417 	if (lvp == rvp) {
418 		if (bp)
419 			*(--bp) = '/';
420 		goto out;
421 	}
422 	do {
423 		if (lvp->v_type != VDIR) {
424 			error = ENOTDIR;
425 			goto out;
426 		}
427 
428 		/*
429 		 * access check here is optional, depending on
430 		 * whether or not caller cares.
431 		 */
432 		if (flags & GETCWD_CHECK_ACCESS) {
433 			error = VOP_ACCESS(lvp, perms, p->p_ucred, p);
434 			if (error)
435 				goto out;
436 			perms = VEXEC|VREAD;
437 		}
438 
439 		/*
440 		 * step up if we're a covered vnode..
441 		 */
442 		while (lvp->v_flag & VROOT) {
443 			struct vnode *tvp;
444 
445 			if (lvp == rvp)
446 				goto out;
447 
448 			tvp = lvp;
449 			lvp = lvp->v_mount->mnt_vnodecovered;
450 			vput(tvp);
451 			/*
452 			 * hodie natus est radici frater
453 			 */
454 			if (lvp == NULL) {
455 				error = ENOENT;
456 				goto out;
457 			}
458 			VREF(lvp);
459 			error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
460 			if (error != 0) {
461 				vrele(lvp);
462 				lvp = NULL;
463 				goto out;
464 			}
465 		}
466 		/*
467 		 * Look in the name cache; if that fails, look in the
468 		 * directory..
469 		 */
470 		error = getcwd_getcache(&lvp, &uvp, &bp, bufp);
471 		if (error == -1)
472 			error = getcwd_scandir(&lvp, &uvp, &bp, bufp, p);
473 		if (error)
474 			goto out;
475 #if DIAGNOSTIC
476 		if (lvp != NULL)
477 			panic("getcwd: oops, forgot to null lvp");
478 		if (bufp && (bp <= bufp)) {
479 			panic("getcwd: oops, went back too far");
480 		}
481 #endif
482 		if (bp)
483 			*(--bp) = '/';
484 		lvp = uvp;
485 		uvp = NULL;
486 		limit--;
487 	} while ((lvp != rvp) && (limit > 0));
488 
489 out:
490 	if (bpp)
491 		*bpp = bp;
492 	if (uvp)
493 		vput(uvp);
494 	if (lvp)
495 		vput(lvp);
496 	vrele(rvp);
497 	return error;
498 }
499 
500 /*
501  * Check if one directory can be found inside another in the directory
502  * hierarchy.
503  *
504  * Intended to be used in chroot, chdir, fchdir, etc., to ensure that
505  * chroot() actually means something.
506  */
507 int
508 vn_isunder(lvp, rvp, p)
509 	struct vnode *lvp;
510 	struct vnode *rvp;
511 	struct proc *p;
512 {
513 	int error;
514 
515 	error = getcwd_common (lvp, rvp, NULL, NULL, MAXPATHLEN/2, 0, p);
516 
517 	if (!error)
518 		return 1;
519 	else
520 		return 0;
521 }
522 
523 /*
524  * Returns true if proc p1's root directory equal to or under p2's
525  * root directory.
526  *
527  * Intended to be used from ptrace/procfs sorts of things.
528  */
529 
530 int
531 proc_isunder (p1, p2)
532 	struct proc *p1;
533 	struct proc *p2;
534 {
535 	struct vnode *r1 = p1->p_cwdi->cwdi_rdir;
536 	struct vnode *r2 = p2->p_cwdi->cwdi_rdir;
537 
538 	if (r1 == NULL)
539 		return (r2 == NULL);
540 	else if (r2 == NULL)
541 		return 1;
542 	else
543 		return vn_isunder(r1, r2, p2);
544 }
545 
546 /*
547  * Find pathname of process's current directory.
548  *
549  * Use vfs vnode-to-name reverse cache; if that fails, fall back
550  * to reading directory contents.
551  */
552 
553 int
554 sys___getcwd(p, v, retval)
555 	struct proc *p;
556 	void   *v;
557 	register_t *retval;
558 {
559 	struct sys___getcwd_args /* {
560 		syscallarg(char *) bufp;
561 		syscallarg(size_t) length;
562 	} */ *uap = v;
563 
564 	int     error;
565 	char   *path;
566 	char   *bp, *bend;
567 	int     len = SCARG(uap, length);
568 	int	lenused;
569 
570 	if (len > MAXPATHLEN*4)
571 		len = MAXPATHLEN*4;
572 	else if (len < 2)
573 		return ERANGE;
574 
575 	path = (char *)malloc(len, M_TEMP, M_WAITOK);
576 	if (!path)
577 		return ENOMEM;
578 
579 	bp = &path[len];
580 	bend = bp;
581 	*(--bp) = '\0';
582 
583 	/*
584 	 * 5th argument here is "max number of vnodes to traverse".
585 	 * Since each entry takes up at least 2 bytes in the output buffer,
586 	 * limit it to N/2 vnodes for an N byte buffer.
587 	 */
588 	error = getcwd_common (p->p_cwdi->cwdi_cdir, NULL, &bp, path, len/2,
589 			       GETCWD_CHECK_ACCESS, p);
590 
591 	if (error)
592 		goto out;
593 	lenused = bend - bp;
594 	*retval = lenused;
595 	/* put the result into user buffer */
596 	error = copyout(bp, SCARG(uap, bufp), lenused);
597 
598 out:
599 	free(path, M_TEMP);
600 	return error;
601 }
602