xref: /netbsd-src/sys/kern/vfs_getcwd.c (revision bf1e9b32e27832f0c493206710fb8b58a980838a)
1 /* $NetBSD: vfs_getcwd.c,v 1.27 2005/06/05 23:47:48 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Bill Sommerfeld.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: vfs_getcwd.c,v 1.27 2005/06/05 23:47:48 thorpej Exp $");
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/namei.h>
45 #include <sys/filedesc.h>
46 #include <sys/kernel.h>
47 #include <sys/file.h>
48 #include <sys/stat.h>
49 #include <sys/vnode.h>
50 #include <sys/mount.h>
51 #include <sys/proc.h>
52 #include <sys/uio.h>
53 #include <sys/malloc.h>
54 #include <sys/dirent.h>
55 #include <ufs/ufs/dir.h>	/* XXX only for DIRBLKSIZ */
56 
57 #include <sys/sa.h>
58 #include <sys/syscallargs.h>
59 
60 #define DIRENT_MINSIZE (sizeof(struct dirent) - (MAXNAMLEN + 1) + 4)
61 
62 /*
63  * Vnode variable naming conventions in this file:
64  *
65  * rvp: the current root we're aiming towards.
66  * lvp, *lvpp: the "lower" vnode
67  * uvp, *uvpp: the "upper" vnode.
68  *
69  * Since all the vnodes we're dealing with are directories, and the
70  * lookups are going *up* in the filesystem rather than *down*, the
71  * usual "pvp" (parent) or "dvp" (directory) naming conventions are
72  * too confusing.
73  */
74 
75 /*
76  * XXX Will infinite loop in certain cases if a directory read reliably
77  *	returns EINVAL on last block.
78  * XXX is EINVAL the right thing to return if a directory is malformed?
79  */
80 
81 /*
82  * XXX Untested vs. mount -o union; probably does the wrong thing.
83  */
84 
85 /*
86  * Find parent vnode of *lvpp, return in *uvpp
87  *
88  * If we care about the name, scan it looking for name of directory
89  * entry pointing at lvp.
90  *
91  * Place the name in the buffer which starts at bufp, immediately
92  * before *bpp, and move bpp backwards to point at the start of it.
93  *
94  * On entry, *lvpp is a locked vnode reference; on exit, it is vput and NULL'ed
95  * On exit, *uvpp is either NULL or is a locked vnode reference.
96  */
97 static int
98 getcwd_scandir(struct vnode **lvpp, struct vnode **uvpp, char **bpp,
99     char *bufp, struct proc *p)
100 {
101 	int     error = 0;
102 	int     eofflag;
103 	off_t   off;
104 	int     tries;
105 	struct uio uio;
106 	struct iovec iov;
107 	char   *dirbuf = NULL;
108 	int	dirbuflen;
109 	ino_t   fileno;
110 	struct vattr va;
111 	struct vnode *uvp = NULL;
112 	struct vnode *lvp = *lvpp;
113 	struct componentname cn;
114 	int len, reclen;
115 	tries = 0;
116 
117 	/*
118 	 * If we want the filename, get some info we need while the
119 	 * current directory is still locked.
120 	 */
121 	if (bufp != NULL) {
122 		error = VOP_GETATTR(lvp, &va, p->p_ucred, p);
123 		if (error) {
124 			vput(lvp);
125 			*lvpp = NULL;
126 			*uvpp = NULL;
127 			return error;
128 		}
129 	}
130 
131 	/*
132 	 * Ok, we have to do it the hard way..
133 	 * Next, get parent vnode using lookup of ..
134 	 */
135 	cn.cn_nameiop = LOOKUP;
136 	cn.cn_flags = ISLASTCN | ISDOTDOT | RDONLY;
137 	cn.cn_proc = p;
138 	cn.cn_cred = p->p_ucred;
139 	cn.cn_pnbuf = NULL;
140 	cn.cn_nameptr = "..";
141 	cn.cn_namelen = 2;
142 	cn.cn_hash = 0;
143 	cn.cn_consume = 0;
144 
145 	/*
146 	 * At this point, lvp is locked and will be unlocked by the lookup.
147 	 * On successful return, *uvpp will be locked
148 	 */
149 	error = VOP_LOOKUP(lvp, uvpp, &cn);
150 	if (error) {
151 		vput(lvp);
152 		*lvpp = NULL;
153 		*uvpp = NULL;
154 		return error;
155 	}
156 	uvp = *uvpp;
157 
158 	/* If we don't care about the pathname, we're done */
159 	if (bufp == NULL) {
160 		vrele(lvp);
161 		*lvpp = NULL;
162 		return 0;
163 	}
164 
165 	fileno = va.va_fileid;
166 
167 	dirbuflen = DIRBLKSIZ;
168 	if (dirbuflen < va.va_blocksize)
169 		dirbuflen = va.va_blocksize;
170 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
171 
172 #if 0
173 unionread:
174 #endif
175 	off = 0;
176 	do {
177 		/* call VOP_READDIR of parent */
178 		iov.iov_base = dirbuf;
179 		iov.iov_len = dirbuflen;
180 
181 		uio.uio_iov = &iov;
182 		uio.uio_iovcnt = 1;
183 		uio.uio_offset = off;
184 		uio.uio_resid = dirbuflen;
185 		uio.uio_segflg = UIO_SYSSPACE;
186 		uio.uio_rw = UIO_READ;
187 		uio.uio_procp = NULL;
188 
189 		eofflag = 0;
190 
191 		error = VOP_READDIR(uvp, &uio, p->p_ucred, &eofflag, 0, 0);
192 
193 		off = uio.uio_offset;
194 
195 		/*
196 		 * Try again if NFS tosses its cookies.
197 		 * XXX this can still loop forever if the directory is busted
198 		 * such that the second or subsequent page of it always
199 		 * returns EINVAL
200 		 */
201 		if ((error == EINVAL) && (tries < 3)) {
202 			off = 0;
203 			tries++;
204 			continue;	/* once more, with feeling */
205 		}
206 
207 		if (!error) {
208 			char   *cpos;
209 			struct dirent *dp;
210 
211 			cpos = dirbuf;
212 			tries = 0;
213 
214 			/* scan directory page looking for matching vnode */
215 			for (len = (dirbuflen - uio.uio_resid); len > 0;
216 			    len -= reclen) {
217 				dp = (struct dirent *) cpos;
218 				reclen = dp->d_reclen;
219 
220 				/* check for malformed directory.. */
221 				if (reclen < DIRENT_MINSIZE) {
222 					error = EINVAL;
223 					goto out;
224 				}
225 				/*
226 				 * XXX should perhaps do VOP_LOOKUP to
227 				 * check that we got back to the right place,
228 				 * but getting the locking games for that
229 				 * right would be heinous.
230 				 */
231 				if ((dp->d_type != DT_WHT) &&
232 				    (dp->d_fileno == fileno)) {
233 					char *bp = *bpp;
234 
235 					bp -= dp->d_namlen;
236 					if (bp <= bufp) {
237 						error = ERANGE;
238 						goto out;
239 					}
240 					memcpy(bp, dp->d_name, dp->d_namlen);
241 					error = 0;
242 					*bpp = bp;
243 					goto out;
244 				}
245 				cpos += reclen;
246 			}
247 		} else
248 			goto out;
249 	} while (!eofflag);
250 #if 0
251 	/*
252 	 * Deal with mount -o union, which unions only the
253 	 * root directory of the mount.
254 	 */
255 	if ((uvp->v_flag & VROOT) &&
256 	    (uvp->v_mount->mnt_flag & MNT_UNION)) {
257 		struct vnode *tvp = uvp;
258 
259 		uvp = uvp->v_mount->mnt_vnodecovered;
260 		vput(tvp);
261 		VREF(uvp);
262 		*uvpp = uvp;
263 		error = vn_lock(uvp, LK_EXCLUSIVE | LK_RETRY);
264 		if (error != 0) {
265 			vrele(uvp);
266 			*uvpp = uvp = NULL;
267 			goto out;
268 		}
269 		goto unionread;
270 	}
271 #endif
272 	error = ENOENT;
273 
274 out:
275 	vrele(lvp);
276 	*lvpp = NULL;
277 	free(dirbuf, M_TEMP);
278 	return error;
279 }
280 
281 /*
282  * Look in the vnode-to-name reverse cache to see if
283  * we can find things the easy way.
284  *
285  * XXX vget failure path is untested.
286  *
287  * On entry, *lvpp is a locked vnode reference.
288  * On exit, one of the following is the case:
289  *	0) Both *lvpp and *uvpp are NULL and failure is returned.
290  * 	1) *uvpp is NULL, *lvpp remains locked and -1 is returned (cache miss)
291  *	2) *uvpp is a locked vnode reference, *lvpp is vput and NULL'ed
292  *	   and 0 is returned (cache hit)
293  */
294 
295 static int
296 getcwd_getcache(struct vnode **lvpp, struct vnode **uvpp, char **bpp,
297     char *bufp)
298 {
299 	struct vnode *lvp, *uvp = NULL;
300 	char *obp = *bpp;
301 	int error;
302 
303 	lvp = *lvpp;
304 
305 	/*
306 	 * This returns 0 on a cache hit, -1 on a clean cache miss,
307 	 * or an errno on other failure.
308 	 */
309 	error = cache_revlookup(lvp, uvpp, bpp, bufp);
310 	if (error) {
311 		if (error != -1) {
312 			vput(lvp);
313 			*lvpp = NULL;
314 			*uvpp = NULL;
315 		}
316 		return error;
317 	}
318 	uvp = *uvpp;
319 
320 	/*
321 	 * Since we're going up, we have to release the current lock
322 	 * before we take the parent lock.
323 	 */
324 
325 	VOP_UNLOCK(lvp, 0);
326 
327 	error = vget(uvp, LK_EXCLUSIVE | LK_RETRY);
328 	/*
329 	 * Verify that vget succeeded while we were waiting for the
330 	 * lock.
331 	 */
332 	if (error) {
333 		/*
334 		 * Oops, we missed.  If the vget failed try to get our
335 		 * lock back; if that works, rewind the `bp' and tell
336 		 * caller to try things the hard way, otherwise give
337 		 * up.
338 		 */
339 		*uvpp = NULL;
340 		error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
341 		if (error == 0) {
342 			*bpp = obp;
343 			return -1;
344 		}
345 	}
346 	vrele(lvp);
347 	*lvpp = NULL;
348 
349 	return error;
350 }
351 
352 /*
353  * common routine shared by sys___getcwd() and vn_isunder()
354  */
355 
356 int
357 getcwd_common(struct vnode *lvp, struct vnode *rvp, char **bpp, char *bufp,
358     int limit, int flags, struct proc *p)
359 {
360 	struct cwdinfo *cwdi = p->p_cwdi;
361 	struct vnode *uvp = NULL;
362 	char *bp = NULL;
363 	int error;
364 	int perms = VEXEC;
365 
366 	if (rvp == NULL) {
367 		rvp = cwdi->cwdi_rdir;
368 		if (rvp == NULL)
369 			rvp = rootvnode;
370 	}
371 
372 	VREF(rvp);
373 	VREF(lvp);
374 
375 	/*
376 	 * Error handling invariant:
377 	 * Before a `goto out':
378 	 *	lvp is either NULL, or locked and held.
379 	 *	uvp is either NULL, or locked and held.
380 	 */
381 
382 	error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
383 	if (error) {
384 		vrele(lvp);
385 		lvp = NULL;
386 		goto out;
387 	}
388 	if (bufp)
389 		bp = *bpp;
390 	/*
391 	 * this loop will terminate when one of the following happens:
392 	 *	- we hit the root
393 	 *	- getdirentries or lookup fails
394 	 *	- we run out of space in the buffer.
395 	 */
396 	if (lvp == rvp) {
397 		if (bp)
398 			*(--bp) = '/';
399 		goto out;
400 	}
401 	do {
402 		if (lvp->v_type != VDIR) {
403 			error = ENOTDIR;
404 			goto out;
405 		}
406 
407 		/*
408 		 * access check here is optional, depending on
409 		 * whether or not caller cares.
410 		 */
411 		if (flags & GETCWD_CHECK_ACCESS) {
412 			error = VOP_ACCESS(lvp, perms, p->p_ucred, p);
413 			if (error)
414 				goto out;
415 			perms = VEXEC|VREAD;
416 		}
417 
418 		/*
419 		 * step up if we're a covered vnode..
420 		 */
421 		while (lvp->v_flag & VROOT) {
422 			struct vnode *tvp;
423 
424 			if (lvp == rvp)
425 				goto out;
426 
427 			tvp = lvp;
428 			lvp = lvp->v_mount->mnt_vnodecovered;
429 			vput(tvp);
430 			/*
431 			 * hodie natus est radici frater
432 			 */
433 			if (lvp == NULL) {
434 				error = ENOENT;
435 				goto out;
436 			}
437 			VREF(lvp);
438 			error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
439 			if (error != 0) {
440 				vrele(lvp);
441 				lvp = NULL;
442 				goto out;
443 			}
444 		}
445 		/*
446 		 * Look in the name cache; if that fails, look in the
447 		 * directory..
448 		 */
449 		error = getcwd_getcache(&lvp, &uvp, &bp, bufp);
450 		if (error == -1)
451 			error = getcwd_scandir(&lvp, &uvp, &bp, bufp, p);
452 		if (error)
453 			goto out;
454 #if DIAGNOSTIC
455 		if (lvp != NULL)
456 			panic("getcwd: oops, forgot to null lvp");
457 		if (bufp && (bp <= bufp)) {
458 			panic("getcwd: oops, went back too far");
459 		}
460 #endif
461 		if (bp)
462 			*(--bp) = '/';
463 		lvp = uvp;
464 		uvp = NULL;
465 		limit--;
466 	} while ((lvp != rvp) && (limit > 0));
467 
468 out:
469 	if (bpp)
470 		*bpp = bp;
471 	if (uvp)
472 		vput(uvp);
473 	if (lvp)
474 		vput(lvp);
475 	vrele(rvp);
476 	return error;
477 }
478 
479 /*
480  * Check if one directory can be found inside another in the directory
481  * hierarchy.
482  *
483  * Intended to be used in chroot, chdir, fchdir, etc., to ensure that
484  * chroot() actually means something.
485  */
486 int
487 vn_isunder(struct vnode *lvp, struct vnode *rvp, struct proc *p)
488 {
489 	int error;
490 
491 	error = getcwd_common(lvp, rvp, NULL, NULL, MAXPATHLEN / 2, 0, p);
492 
493 	if (!error)
494 		return 1;
495 	else
496 		return 0;
497 }
498 
499 /*
500  * Returns true if proc p1's root directory equal to or under p2's
501  * root directory.
502  *
503  * Intended to be used from ptrace/procfs sorts of things.
504  */
505 
506 int
507 proc_isunder(struct proc *p1, struct proc *p2)
508 {
509 	struct vnode *r1 = p1->p_cwdi->cwdi_rdir;
510 	struct vnode *r2 = p2->p_cwdi->cwdi_rdir;
511 
512 	if (r1 == NULL)
513 		return (r2 == NULL);
514 	else if (r2 == NULL)
515 		return 1;
516 	else
517 		return vn_isunder(r1, r2, p2);
518 }
519 
520 /*
521  * Find pathname of process's current directory.
522  *
523  * Use vfs vnode-to-name reverse cache; if that fails, fall back
524  * to reading directory contents.
525  */
526 
527 int
528 sys___getcwd(struct lwp *l, void *v, register_t *retval)
529 {
530 	struct sys___getcwd_args /* {
531 		syscallarg(char *) bufp;
532 		syscallarg(size_t) length;
533 	} */ *uap = v;
534 
535 	int     error;
536 	char   *path;
537 	char   *bp, *bend;
538 	int     len = SCARG(uap, length);
539 	int	lenused;
540 
541 	if (len > MAXPATHLEN * 4)
542 		len = MAXPATHLEN * 4;
543 	else if (len < 2)
544 		return ERANGE;
545 
546 	path = (char *)malloc(len, M_TEMP, M_WAITOK);
547 	if (!path)
548 		return ENOMEM;
549 
550 	bp = &path[len];
551 	bend = bp;
552 	*(--bp) = '\0';
553 
554 	/*
555 	 * 5th argument here is "max number of vnodes to traverse".
556 	 * Since each entry takes up at least 2 bytes in the output buffer,
557 	 * limit it to N/2 vnodes for an N byte buffer.
558 	 */
559 	error = getcwd_common(l->l_proc->p_cwdi->cwdi_cdir, NULL, &bp, path,
560 	    len / 2, GETCWD_CHECK_ACCESS, l->l_proc);
561 
562 	if (error)
563 		goto out;
564 	lenused = bend - bp;
565 	*retval = lenused;
566 	/* put the result into user buffer */
567 	error = copyout(bp, SCARG(uap, bufp), lenused);
568 
569 out:
570 	free(path, M_TEMP);
571 	return error;
572 }
573