xref: /netbsd-src/sys/kern/vfs_getcwd.c (revision aaf4ece63a859a04e37cf3a7229b5fab0157cc06)
1 /* $NetBSD: vfs_getcwd.c,v 1.29 2005/12/11 12:24:30 christos Exp $ */
2 
3 /*-
4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Bill Sommerfeld.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: vfs_getcwd.c,v 1.29 2005/12/11 12:24:30 christos Exp $");
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/namei.h>
45 #include <sys/filedesc.h>
46 #include <sys/kernel.h>
47 #include <sys/file.h>
48 #include <sys/stat.h>
49 #include <sys/vnode.h>
50 #include <sys/mount.h>
51 #include <sys/proc.h>
52 #include <sys/uio.h>
53 #include <sys/malloc.h>
54 #include <sys/dirent.h>
55 #include <ufs/ufs/dir.h>	/* XXX only for DIRBLKSIZ */
56 
57 #include <sys/sa.h>
58 #include <sys/syscallargs.h>
59 
60 /*
61  * Vnode variable naming conventions in this file:
62  *
63  * rvp: the current root we're aiming towards.
64  * lvp, *lvpp: the "lower" vnode
65  * uvp, *uvpp: the "upper" vnode.
66  *
67  * Since all the vnodes we're dealing with are directories, and the
68  * lookups are going *up* in the filesystem rather than *down*, the
69  * usual "pvp" (parent) or "dvp" (directory) naming conventions are
70  * too confusing.
71  */
72 
73 /*
74  * XXX Will infinite loop in certain cases if a directory read reliably
75  *	returns EINVAL on last block.
76  * XXX is EINVAL the right thing to return if a directory is malformed?
77  */
78 
79 /*
80  * XXX Untested vs. mount -o union; probably does the wrong thing.
81  */
82 
83 /*
84  * Find parent vnode of *lvpp, return in *uvpp
85  *
86  * If we care about the name, scan it looking for name of directory
87  * entry pointing at lvp.
88  *
89  * Place the name in the buffer which starts at bufp, immediately
90  * before *bpp, and move bpp backwards to point at the start of it.
91  *
92  * On entry, *lvpp is a locked vnode reference; on exit, it is vput and NULL'ed
93  * On exit, *uvpp is either NULL or is a locked vnode reference.
94  */
95 static int
96 getcwd_scandir(struct vnode **lvpp, struct vnode **uvpp, char **bpp,
97     char *bufp, struct lwp *l)
98 {
99 	int     error = 0;
100 	int     eofflag;
101 	off_t   off;
102 	int     tries;
103 	struct uio uio;
104 	struct iovec iov;
105 	char   *dirbuf = NULL;
106 	int	dirbuflen;
107 	ino_t   fileno;
108 	struct vattr va;
109 	struct vnode *uvp = NULL;
110 	struct vnode *lvp = *lvpp;
111 	struct ucred *ucred = l->l_proc->p_ucred;
112 	struct componentname cn;
113 	int len, reclen;
114 	tries = 0;
115 
116 	/*
117 	 * If we want the filename, get some info we need while the
118 	 * current directory is still locked.
119 	 */
120 	if (bufp != NULL) {
121 		error = VOP_GETATTR(lvp, &va, ucred, l);
122 		if (error) {
123 			vput(lvp);
124 			*lvpp = NULL;
125 			*uvpp = NULL;
126 			return error;
127 		}
128 	}
129 
130 	/*
131 	 * Ok, we have to do it the hard way..
132 	 * Next, get parent vnode using lookup of ..
133 	 */
134 	cn.cn_nameiop = LOOKUP;
135 	cn.cn_flags = ISLASTCN | ISDOTDOT | RDONLY;
136 	cn.cn_lwp = l;
137 	cn.cn_cred = ucred;
138 	cn.cn_pnbuf = NULL;
139 	cn.cn_nameptr = "..";
140 	cn.cn_namelen = 2;
141 	cn.cn_hash = 0;
142 	cn.cn_consume = 0;
143 
144 	/*
145 	 * At this point, lvp is locked and will be unlocked by the lookup.
146 	 * On successful return, *uvpp will be locked
147 	 */
148 	error = VOP_LOOKUP(lvp, uvpp, &cn);
149 	if (error) {
150 		vput(lvp);
151 		*lvpp = NULL;
152 		*uvpp = NULL;
153 		return error;
154 	}
155 	uvp = *uvpp;
156 
157 	/* If we don't care about the pathname, we're done */
158 	if (bufp == NULL) {
159 		vrele(lvp);
160 		*lvpp = NULL;
161 		return 0;
162 	}
163 
164 	fileno = va.va_fileid;
165 
166 	dirbuflen = DIRBLKSIZ;
167 	if (dirbuflen < va.va_blocksize)
168 		dirbuflen = va.va_blocksize;
169 	dirbuf = (char *)malloc(dirbuflen, M_TEMP, M_WAITOK);
170 
171 #if 0
172 unionread:
173 #endif
174 	off = 0;
175 	do {
176 		/* call VOP_READDIR of parent */
177 		iov.iov_base = dirbuf;
178 		iov.iov_len = dirbuflen;
179 
180 		uio.uio_iov = &iov;
181 		uio.uio_iovcnt = 1;
182 		uio.uio_offset = off;
183 		uio.uio_resid = dirbuflen;
184 		uio.uio_segflg = UIO_SYSSPACE;
185 		uio.uio_rw = UIO_READ;
186 		uio.uio_lwp = NULL;
187 
188 		eofflag = 0;
189 
190 		error = VOP_READDIR(uvp, &uio, ucred, &eofflag, 0, 0);
191 
192 		off = uio.uio_offset;
193 
194 		/*
195 		 * Try again if NFS tosses its cookies.
196 		 * XXX this can still loop forever if the directory is busted
197 		 * such that the second or subsequent page of it always
198 		 * returns EINVAL
199 		 */
200 		if ((error == EINVAL) && (tries < 3)) {
201 			off = 0;
202 			tries++;
203 			continue;	/* once more, with feeling */
204 		}
205 
206 		if (!error) {
207 			char   *cpos;
208 			struct dirent *dp;
209 
210 			cpos = dirbuf;
211 			tries = 0;
212 
213 			/* scan directory page looking for matching vnode */
214 			for (len = (dirbuflen - uio.uio_resid); len > 0;
215 			    len -= reclen) {
216 				dp = (struct dirent *) cpos;
217 				reclen = dp->d_reclen;
218 
219 				/* check for malformed directory.. */
220 				if (reclen < _DIRENT_MINSIZE(dp)) {
221 					error = EINVAL;
222 					goto out;
223 				}
224 				/*
225 				 * XXX should perhaps do VOP_LOOKUP to
226 				 * check that we got back to the right place,
227 				 * but getting the locking games for that
228 				 * right would be heinous.
229 				 */
230 				if ((dp->d_type != DT_WHT) &&
231 				    (dp->d_fileno == fileno)) {
232 					char *bp = *bpp;
233 
234 					bp -= dp->d_namlen;
235 					if (bp <= bufp) {
236 						error = ERANGE;
237 						goto out;
238 					}
239 					memcpy(bp, dp->d_name, dp->d_namlen);
240 					error = 0;
241 					*bpp = bp;
242 					goto out;
243 				}
244 				cpos += reclen;
245 			}
246 		} else
247 			goto out;
248 	} while (!eofflag);
249 #if 0
250 	/*
251 	 * Deal with mount -o union, which unions only the
252 	 * root directory of the mount.
253 	 */
254 	if ((uvp->v_flag & VROOT) &&
255 	    (uvp->v_mount->mnt_flag & MNT_UNION)) {
256 		struct vnode *tvp = uvp;
257 
258 		uvp = uvp->v_mount->mnt_vnodecovered;
259 		vput(tvp);
260 		VREF(uvp);
261 		*uvpp = uvp;
262 		error = vn_lock(uvp, LK_EXCLUSIVE | LK_RETRY);
263 		if (error != 0) {
264 			vrele(uvp);
265 			*uvpp = uvp = NULL;
266 			goto out;
267 		}
268 		goto unionread;
269 	}
270 #endif
271 	error = ENOENT;
272 
273 out:
274 	vrele(lvp);
275 	*lvpp = NULL;
276 	free(dirbuf, M_TEMP);
277 	return error;
278 }
279 
280 /*
281  * Look in the vnode-to-name reverse cache to see if
282  * we can find things the easy way.
283  *
284  * XXX vget failure path is untested.
285  *
286  * On entry, *lvpp is a locked vnode reference.
287  * On exit, one of the following is the case:
288  *	0) Both *lvpp and *uvpp are NULL and failure is returned.
289  * 	1) *uvpp is NULL, *lvpp remains locked and -1 is returned (cache miss)
290  *	2) *uvpp is a locked vnode reference, *lvpp is vput and NULL'ed
291  *	   and 0 is returned (cache hit)
292  */
293 
294 static int
295 getcwd_getcache(struct vnode **lvpp, struct vnode **uvpp, char **bpp,
296     char *bufp)
297 {
298 	struct vnode *lvp, *uvp = NULL;
299 	char *obp = *bpp;
300 	int error;
301 
302 	lvp = *lvpp;
303 
304 	/*
305 	 * This returns 0 on a cache hit, -1 on a clean cache miss,
306 	 * or an errno on other failure.
307 	 */
308 	error = cache_revlookup(lvp, uvpp, bpp, bufp);
309 	if (error) {
310 		if (error != -1) {
311 			vput(lvp);
312 			*lvpp = NULL;
313 			*uvpp = NULL;
314 		}
315 		return error;
316 	}
317 	uvp = *uvpp;
318 
319 	/*
320 	 * Since we're going up, we have to release the current lock
321 	 * before we take the parent lock.
322 	 */
323 
324 	VOP_UNLOCK(lvp, 0);
325 
326 	error = vget(uvp, LK_EXCLUSIVE | LK_RETRY);
327 	/*
328 	 * Verify that vget succeeded while we were waiting for the
329 	 * lock.
330 	 */
331 	if (error) {
332 		/*
333 		 * Oops, we missed.  If the vget failed try to get our
334 		 * lock back; if that works, rewind the `bp' and tell
335 		 * caller to try things the hard way, otherwise give
336 		 * up.
337 		 */
338 		*uvpp = NULL;
339 		error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
340 		if (error == 0) {
341 			*bpp = obp;
342 			return -1;
343 		}
344 	}
345 	vrele(lvp);
346 	*lvpp = NULL;
347 
348 	return error;
349 }
350 
351 /*
352  * common routine shared by sys___getcwd() and vn_isunder()
353  */
354 
355 int
356 getcwd_common(struct vnode *lvp, struct vnode *rvp, char **bpp, char *bufp,
357     int limit, int flags, struct lwp *l)
358 {
359 	struct cwdinfo *cwdi = l->l_proc->p_cwdi;
360 	struct ucred *ucred = l->l_proc->p_ucred;
361 	struct vnode *uvp = NULL;
362 	char *bp = NULL;
363 	int error;
364 	int perms = VEXEC;
365 
366 	if (rvp == NULL) {
367 		rvp = cwdi->cwdi_rdir;
368 		if (rvp == NULL)
369 			rvp = rootvnode;
370 	}
371 
372 	VREF(rvp);
373 	VREF(lvp);
374 
375 	/*
376 	 * Error handling invariant:
377 	 * Before a `goto out':
378 	 *	lvp is either NULL, or locked and held.
379 	 *	uvp is either NULL, or locked and held.
380 	 */
381 
382 	error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
383 	if (error) {
384 		vrele(lvp);
385 		lvp = NULL;
386 		goto out;
387 	}
388 	if (bufp)
389 		bp = *bpp;
390 	/*
391 	 * this loop will terminate when one of the following happens:
392 	 *	- we hit the root
393 	 *	- getdirentries or lookup fails
394 	 *	- we run out of space in the buffer.
395 	 */
396 	if (lvp == rvp) {
397 		if (bp)
398 			*(--bp) = '/';
399 		goto out;
400 	}
401 	do {
402 		if (lvp->v_type != VDIR) {
403 			error = ENOTDIR;
404 			goto out;
405 		}
406 
407 		/*
408 		 * access check here is optional, depending on
409 		 * whether or not caller cares.
410 		 */
411 		if (flags & GETCWD_CHECK_ACCESS) {
412 			error = VOP_ACCESS(lvp, perms, ucred, l);
413 			if (error)
414 				goto out;
415 			perms = VEXEC|VREAD;
416 		}
417 
418 		/*
419 		 * step up if we're a covered vnode..
420 		 */
421 		while (lvp->v_flag & VROOT) {
422 			struct vnode *tvp;
423 
424 			if (lvp == rvp)
425 				goto out;
426 
427 			tvp = lvp;
428 			lvp = lvp->v_mount->mnt_vnodecovered;
429 			vput(tvp);
430 			/*
431 			 * hodie natus est radici frater
432 			 */
433 			if (lvp == NULL) {
434 				error = ENOENT;
435 				goto out;
436 			}
437 			VREF(lvp);
438 			error = vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY);
439 			if (error != 0) {
440 				vrele(lvp);
441 				lvp = NULL;
442 				goto out;
443 			}
444 		}
445 		/*
446 		 * Look in the name cache; if that fails, look in the
447 		 * directory..
448 		 */
449 		error = getcwd_getcache(&lvp, &uvp, &bp, bufp);
450 		if (error == -1)
451 			error = getcwd_scandir(&lvp, &uvp, &bp, bufp, l);
452 		if (error)
453 			goto out;
454 #if DIAGNOSTIC
455 		if (lvp != NULL)
456 			panic("getcwd: oops, forgot to null lvp");
457 		if (bufp && (bp <= bufp)) {
458 			panic("getcwd: oops, went back too far");
459 		}
460 #endif
461 		if (bp)
462 			*(--bp) = '/';
463 		lvp = uvp;
464 		uvp = NULL;
465 		limit--;
466 	} while ((lvp != rvp) && (limit > 0));
467 
468 out:
469 	if (bpp)
470 		*bpp = bp;
471 	if (uvp)
472 		vput(uvp);
473 	if (lvp)
474 		vput(lvp);
475 	vrele(rvp);
476 	return error;
477 }
478 
479 /*
480  * Check if one directory can be found inside another in the directory
481  * hierarchy.
482  *
483  * Intended to be used in chroot, chdir, fchdir, etc., to ensure that
484  * chroot() actually means something.
485  */
486 int
487 vn_isunder(struct vnode *lvp, struct vnode *rvp, struct lwp *l)
488 {
489 	int error;
490 
491 	error = getcwd_common(lvp, rvp, NULL, NULL, MAXPATHLEN / 2, 0, l);
492 
493 	if (!error)
494 		return 1;
495 	else
496 		return 0;
497 }
498 
499 /*
500  * Returns true if proc p1's root directory equal to or under p2's
501  * root directory.
502  *
503  * Intended to be used from ptrace/procfs sorts of things.
504  */
505 
506 int
507 proc_isunder(struct proc *p1, struct lwp *l2)
508 {
509 	struct vnode *r1 = p1->p_cwdi->cwdi_rdir;
510 	struct vnode *r2 = l2->l_proc->p_cwdi->cwdi_rdir;
511 
512 	if (r1 == NULL)
513 		return (r2 == NULL);
514 	else if (r2 == NULL)
515 		return 1;
516 	else
517 		return vn_isunder(r1, r2, l2);
518 }
519 
520 /*
521  * Find pathname of process's current directory.
522  *
523  * Use vfs vnode-to-name reverse cache; if that fails, fall back
524  * to reading directory contents.
525  */
526 
527 int
528 sys___getcwd(struct lwp *l, void *v, register_t *retval)
529 {
530 	struct sys___getcwd_args /* {
531 		syscallarg(char *) bufp;
532 		syscallarg(size_t) length;
533 	} */ *uap = v;
534 
535 	int     error;
536 	char   *path;
537 	char   *bp, *bend;
538 	int     len = SCARG(uap, length);
539 	int	lenused;
540 
541 	if (len > MAXPATHLEN * 4)
542 		len = MAXPATHLEN * 4;
543 	else if (len < 2)
544 		return ERANGE;
545 
546 	path = (char *)malloc(len, M_TEMP, M_WAITOK);
547 	if (!path)
548 		return ENOMEM;
549 
550 	bp = &path[len];
551 	bend = bp;
552 	*(--bp) = '\0';
553 
554 	/*
555 	 * 5th argument here is "max number of vnodes to traverse".
556 	 * Since each entry takes up at least 2 bytes in the output buffer,
557 	 * limit it to N/2 vnodes for an N byte buffer.
558 	 */
559 	error = getcwd_common(l->l_proc->p_cwdi->cwdi_cdir, NULL, &bp, path,
560 	    len/2, GETCWD_CHECK_ACCESS, l);
561 
562 	if (error)
563 		goto out;
564 	lenused = bend - bp;
565 	*retval = lenused;
566 	/* put the result into user buffer */
567 	error = copyout(bp, SCARG(uap, bufp), lenused);
568 
569 out:
570 	free(path, M_TEMP);
571 	return error;
572 }
573