xref: /dflybsd-src/sys/kern/kern_varsym.c (revision e7302aa08274de307cd2c3345fc64c56dbe56e21)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/kern_varsym.c,v 1.9 2007/04/30 07:18:54 dillon Exp $
35  */
36 
37 /*
38  * This module implements variable storage and management for variant
39  * symlinks.  These variables may also be used for general purposes.
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/ucred.h>
46 #include <sys/resourcevar.h>
47 #include <sys/proc.h>
48 #include <sys/priv.h>
49 #include <sys/jail.h>
50 #include <sys/queue.h>
51 #include <sys/sysctl.h>
52 #include <sys/malloc.h>
53 #include <sys/varsym.h>
54 #include <sys/sysproto.h>
55 
56 #include <sys/mplock2.h>
57 
58 MALLOC_DEFINE(M_VARSYM, "varsym", "variable sets for variant symlinks");
59 
60 struct varsymset	varsymset_sys;
61 
62 /*
63  * Initialize the variant symlink subsystem
64  */
65 static void
66 varsym_sysinit(void *dummy)
67 {
68     varsymset_init(&varsymset_sys, NULL);
69 }
70 SYSINIT(announce, SI_BOOT2_MACHDEP, SI_ORDER_FIRST, varsym_sysinit, NULL);
71 
72 /*
73  * varsymreplace() - called from namei
74  *
75  *	Do variant symlink variable substitution
76  */
77 int
78 varsymreplace(char *cp, int linklen, int maxlen)
79 {
80     int rlen;
81     int xlen;
82     int nlen;
83     int i;
84     varsym_t var;
85 
86     rlen = linklen;
87     while (linklen > 1) {
88 	if (cp[0] == '$' && cp[1] == '{') {
89 	    for (i = 2; i < linklen; ++i) {
90 		if (cp[i] == '}')
91 		    break;
92 	    }
93 	    if (i < linklen &&
94 		(var = varsymfind(VARSYM_ALL_MASK, cp + 2, i - 2)) != NULL
95 	    ) {
96 		xlen = i + 1;			/* bytes to strike */
97 		nlen = strlen(var->vs_data);	/* bytes to add */
98 		if (linklen + nlen - xlen >= maxlen) {
99 		    varsymdrop(var);
100 		    return(-1);
101 		}
102 		KKASSERT(linklen >= xlen);
103 		if (linklen != xlen)
104 		    bcopy(cp + xlen, cp + nlen, linklen - xlen);
105 		bcopy(var->vs_data, cp, nlen);
106 		linklen += nlen - xlen;	/* new relative length */
107 		rlen += nlen - xlen;	/* returned total length */
108 		cp += nlen;		/* adjust past replacement */
109 		linklen -= nlen;	/* adjust past replacement */
110 		maxlen -= nlen;		/* adjust past replacement */
111 	    } else {
112 		/*
113 		 * It's ok if i points to the '}', it will simply be
114 		 * skipped.  i could also have hit linklen.
115 		 */
116 		cp += i;
117 		linklen -= i;
118 		maxlen -= i;
119 	    }
120 	} else {
121 	    ++cp;
122 	    --linklen;
123 	    --maxlen;
124 	}
125     }
126     return(rlen);
127 }
128 
129 /*
130  * varsym_set() system call
131  *
132  * (int level, const char *name, const char *data)
133  *
134  * MPALMOSTSAFE
135  */
136 int
137 sys_varsym_set(struct varsym_set_args *uap)
138 {
139     char name[MAXVARSYM_NAME];
140     char *buf;
141     struct thread *td;
142     struct proc *p;
143     struct lwp *lp;
144     int error;
145 
146     td = curthread;
147     lp = td->td_lwp;
148     p = lp ? lp->lwp_proc : NULL;
149 
150     if ((error = copyinstr(uap->name, name, sizeof(name), NULL)) != 0)
151 	goto done2;
152     buf = kmalloc(MAXVARSYM_DATA, M_TEMP, M_WAITOK);
153     if (uap->data &&
154 	(error = copyinstr(uap->data, buf, MAXVARSYM_DATA, NULL)) != 0)
155     {
156 	goto done1;
157     }
158 
159     get_mplock();
160 
161     switch(uap->level) {
162     case VARSYM_SYS:
163 	if (lp != NULL && td->td_ucred->cr_prison != NULL)
164 	    uap->level = VARSYM_PRISON;
165     case VARSYM_PRISON:
166 	if (lp != NULL &&
167 	    (error = priv_check_cred(td->td_ucred, PRIV_VARSYM_SYS, 0)) != 0)
168 	    break;
169 	/* fall through */
170     case VARSYM_USER:
171 	/* XXX check jail / implement per-jail user */
172 	/* fall through */
173     case VARSYM_PROC:
174 	if (uap->data) {
175 	    (void)varsymmake(uap->level, name, NULL);
176 	    error = varsymmake(uap->level, name, buf);
177 	} else {
178 	    error = varsymmake(uap->level, name, NULL);
179 	}
180 	break;
181     }
182     rel_mplock();
183 done1:
184     kfree(buf, M_TEMP);
185 done2:
186     return(error);
187 }
188 
189 /*
190  * varsym_get() system call
191  *
192  * (int mask, const char *wild, char *buf, int bufsize)
193  *
194  * MPALMOSTSAFE
195  */
196 int
197 sys_varsym_get(struct varsym_get_args *uap)
198 {
199     char wild[MAXVARSYM_NAME];
200     varsym_t sym;
201     int error;
202     int dlen;
203 
204     get_mplock();
205     if ((error = copyinstr(uap->wild, wild, sizeof(wild), NULL)) != 0)
206 	goto done;
207     sym = varsymfind(uap->mask, wild, strlen(wild));
208     if (sym == NULL) {
209 	error = ENOENT;
210 	goto done;
211     }
212     dlen = strlen(sym->vs_data);
213     if (dlen < uap->bufsize) {
214 	copyout(sym->vs_data, uap->buf, dlen + 1);
215     } else if (uap->bufsize) {
216 	copyout("", uap->buf, 1);
217     }
218     uap->sysmsg_result = dlen + 1;
219     varsymdrop(sym);
220 done:
221     rel_mplock();
222     return(error);
223 }
224 
225 /*
226  * varsym_list() system call
227  *
228  * (int level, char *buf, int maxsize, int *marker)
229  *
230  * MPALMOSTSAFE
231  */
232 int
233 sys_varsym_list(struct varsym_list_args *uap)
234 {
235 	struct varsymset *vss;
236 	struct varsyment *ve;
237 	struct thread *td;
238 	struct proc *p;
239 	struct lwp *lp;
240 	int i;
241 	int error;
242 	int bytes;
243 	int earlyterm;
244 	int marker;
245 
246 	/*
247 	 * Get the marker from userspace.
248 	 */
249 	get_mplock();
250 	if ((error = copyin(uap->marker, &marker, sizeof(marker))) != 0)
251 		goto done;
252 
253 	/*
254 	 * Figure out the varsym set.
255 	 */
256 	td = curthread;
257 	lp = td->td_lwp;
258 	p = lp ? lp->lwp_proc : NULL;
259 
260 	vss = NULL;
261 
262 	switch (uap->level) {
263 	case VARSYM_PROC:
264 		if (p)
265 			vss = &p->p_varsymset;
266 		break;
267 	case VARSYM_USER:
268 		if (lp)
269 			vss = &td->td_ucred->cr_uidinfo->ui_varsymset;
270 		break;
271 	case VARSYM_SYS:
272 		vss = &varsymset_sys;
273 		break;
274 	case VARSYM_PRISON:
275 		if (lp && td->td_ucred->cr_prison)
276 			vss = &td->td_ucred->cr_prison->pr_varsymset;
277 		break;
278 	}
279 	if (vss == NULL) {
280 		error = EINVAL;
281 		goto done;
282 	}
283 
284 	/*
285 	 * Loop through the variables and dump them to uap->buf
286 	 */
287 	i = 0;
288 	bytes = 0;
289 	earlyterm = 0;
290 
291 	lockmgr(&vss->vx_lock, LK_SHARED);
292 	TAILQ_FOREACH(ve, &vss->vx_queue, ve_entry) {
293 		varsym_t sym = ve->ve_sym;
294 		int namelen = strlen(sym->vs_name);
295 		int datalen = strlen(sym->vs_data);
296 		int totlen = namelen + datalen + 2;
297 
298 		/*
299 		 * Skip to our index point
300 		 */
301 		if (i < marker) {
302 			++i;
303 			continue;
304 		}
305 
306 		/*
307 		 * Stop if there is insufficient space in the user buffer.
308 		 * If we haven't stored anything yet return EOVERFLOW.
309 		 * Note that the marker index (i) does not change.
310 		 */
311 		if (bytes + totlen > uap->maxsize) {
312 			if (bytes == 0)
313 				error = EOVERFLOW;
314 			earlyterm = 1;
315 			break;
316 		}
317 
318 		error = copyout(sym->vs_name, uap->buf + bytes, namelen + 1);
319 		if (error == 0) {
320 			bytes += namelen + 1;
321 			error = copyout(sym->vs_data, uap->buf + bytes, datalen + 1);
322 			if (error == 0)
323 				bytes += datalen + 1;
324 			else
325 				bytes -= namelen + 1;	/* revert if error */
326 		}
327 		if (error) {
328 			earlyterm = 1;
329 			break;
330 		}
331 		++i;
332 	}
333 	lockmgr(&vss->vx_lock, LK_RELEASE);
334 
335 	/*
336 	 * Save the marker back.  If no error occured and earlyterm is clear
337 	 * the marker is set to -1 indicating that the variable list has been
338 	 * exhausted.  If no error occured the number of bytes loaded into
339 	 * the buffer will be returned, otherwise the syscall code returns -1.
340 	 */
341 	if (error == 0 && earlyterm == 0)
342 		marker = -1;
343 	else
344 		marker = i;
345 	if (error == 0)
346 		error = copyout(&marker, uap->marker, sizeof(marker));
347 	uap->sysmsg_result = bytes;
348 done:
349 	rel_mplock();
350 	return(error);
351 }
352 
353 /*
354  * Lookup a variant symlink.  XXX use a hash table.
355  */
356 static
357 struct varsyment *
358 varsymlookup(struct varsymset *vss, const char *name, int namelen)
359 {
360     struct varsyment *ve;
361 
362     KKASSERT(lockstatus(&vss->vx_lock, curthread) != 0);
363     TAILQ_FOREACH(ve, &vss->vx_queue, ve_entry) {
364 	varsym_t var = ve->ve_sym;
365 	if (var->vs_namelen == namelen &&
366 	    bcmp(name, var->vs_name, namelen) == 0
367 	) {
368 	    return(ve);
369 	}
370     }
371     return(NULL);
372 }
373 
374 static
375 void
376 vsslock(struct varsymset **vss, struct varsymset *n)
377 {
378 	if (*vss) {
379 		lockmgr(&(*vss)->vx_lock, LK_RELEASE);
380 	}
381 	lockmgr(&n->vx_lock, LK_SHARED);
382 	*vss = n;
383 }
384 
385 varsym_t
386 varsymfind(int mask, const char *name, int namelen)
387 {
388     struct varsyment *ve = NULL;
389     struct varsymset *vss = NULL;
390     struct thread *td;
391     struct lwp *lp;
392     struct proc *p;
393     varsym_t sym;
394 
395     td = curthread;
396     lp = td->td_lwp;
397     p = lp ? lp->lwp_proc : NULL;
398 
399     if ((mask & (VARSYM_PROC_MASK|VARSYM_USER_MASK)) && lp != NULL) {
400 	if (mask & VARSYM_PROC_MASK) {
401 	    vsslock(&vss, &p->p_varsymset);
402 	    ve = varsymlookup(vss, name, namelen);
403 	}
404 	if (ve == NULL && (mask & VARSYM_USER_MASK)) {
405 	    vsslock(&vss, &td->td_ucred->cr_uidinfo->ui_varsymset);
406 	    ve = varsymlookup(vss, name, namelen);
407 	}
408     }
409     if (ve == NULL && (mask & VARSYM_SYS_MASK)) {
410 	if (lp != NULL && td->td_ucred->cr_prison) {
411 	    vsslock(&vss, &td->td_ucred->cr_prison->pr_varsymset);
412 	    ve = varsymlookup(vss, name, namelen);
413 	} else {
414 	    vsslock(&vss, &varsymset_sys);
415 	    ve = varsymlookup(vss, name, namelen);
416 	}
417     }
418     if (ve) {
419 	sym = ve->ve_sym;
420 	atomic_add_int(&sym->vs_refs, 1);
421     } else {
422 	sym = NULL;
423     }
424     lockmgr(&vss->vx_lock, LK_RELEASE);
425     return sym;
426 }
427 
428 int
429 varsymmake(int level, const char *name, const char *data)
430 {
431     struct varsymset *vss = NULL;
432     struct varsyment *ve;
433     struct thread *td;
434     struct proc *p;
435     struct lwp *lp;
436     varsym_t sym;
437     int namelen = strlen(name);
438     int datalen;
439     int error;
440 
441     td = curthread;
442     lp = td->td_lwp;
443     p = lp ? lp->lwp_proc : NULL;
444 
445     switch(level) {
446     case VARSYM_PROC:
447 	if (p)
448 	    vss = &p->p_varsymset;
449 	break;
450     case VARSYM_USER:
451 	if (lp)
452 	    vss = &td->td_ucred->cr_uidinfo->ui_varsymset;
453 	break;
454     case VARSYM_SYS:
455 	vss = &varsymset_sys;
456 	break;
457     case VARSYM_PRISON:
458 	if (lp && td->td_ucred->cr_prison)
459 	    vss = &td->td_ucred->cr_prison->pr_varsymset;
460 	break;
461     }
462     if (vss == NULL) {
463 	return EINVAL;
464     }
465     lockmgr(&vss->vx_lock, LK_EXCLUSIVE);
466     if (data && vss->vx_setsize >= MAXVARSYM_SET) {
467 	error = E2BIG;
468     } else if (data) {
469 	datalen = strlen(data);
470 	ve = kmalloc(sizeof(struct varsyment), M_VARSYM, M_WAITOK|M_ZERO);
471 	sym = kmalloc(sizeof(struct varsym) + namelen + datalen + 2, M_VARSYM, M_WAITOK);
472 	ve->ve_sym = sym;
473 	sym->vs_refs = 1;
474 	sym->vs_namelen = namelen;
475 	sym->vs_name = (char *)(sym + 1);
476 	sym->vs_data = sym->vs_name + namelen + 1;
477 	strcpy(sym->vs_name, name);
478 	strcpy(sym->vs_data, data);
479 	TAILQ_INSERT_TAIL(&vss->vx_queue, ve, ve_entry);
480 	vss->vx_setsize += sizeof(struct varsyment) + sizeof(struct varsym) + namelen + datalen + 8;
481 	error = 0;
482     } else {
483 	if ((ve = varsymlookup(vss, name, namelen)) != NULL) {
484 	    TAILQ_REMOVE(&vss->vx_queue, ve, ve_entry);
485 	    vss->vx_setsize -= sizeof(struct varsyment) + sizeof(struct varsym) + namelen + strlen(ve->ve_sym->vs_data) + 8;
486 	    varsymdrop(ve->ve_sym);
487 	    kfree(ve, M_VARSYM);
488 	    error = 0;
489 	} else {
490 	    error = ENOENT;
491 	}
492     }
493     lockmgr(&vss->vx_lock, LK_RELEASE);
494     return(error);
495 }
496 
497 void
498 varsymdrop(varsym_t sym)
499 {
500     KKASSERT(sym->vs_refs > 0);
501     if (atomic_fetchadd_int(&sym->vs_refs, -1) == 1) {
502 	kfree(sym, M_VARSYM);
503     }
504 }
505 
506 /*
507  * Insert a duplicate of ve in vss. Does not do any locking,
508  * so it is the callers responsibility to make sure nobody
509  * else can mess with the TAILQ in vss at the same time.
510  */
511 static void
512 varsymdup(struct varsymset *vss, struct varsyment *ve)
513 {
514     struct varsyment *nve;
515 
516     nve = kmalloc(sizeof(struct varsyment), M_VARSYM, M_WAITOK|M_ZERO);
517     nve->ve_sym = ve->ve_sym;
518     ++nve->ve_sym->vs_refs;	/* can't be reached, no need for atomic add */
519     /*
520      * We're only called through varsymset_init() so vss is not yet reachable,
521      * no need to lock.
522      */
523     TAILQ_INSERT_TAIL(&vss->vx_queue, nve, ve_entry);
524 }
525 
526 void
527 varsymset_init(struct varsymset *vss, struct varsymset *copy)
528 {
529     struct varsyment *ve;
530 
531     TAILQ_INIT(&vss->vx_queue);
532     lockinit(&vss->vx_lock, "vx", 0, 0);
533     if (copy) {
534 	TAILQ_FOREACH(ve, &copy->vx_queue, ve_entry) {
535 	    varsymdup(vss, ve);
536 	}
537 	vss->vx_setsize = copy->vx_setsize;
538     }
539 }
540 
541 void
542 varsymset_clean(struct varsymset *vss)
543 {
544     struct varsyment *ve;
545 
546     lockmgr(&vss->vx_lock, LK_EXCLUSIVE);
547     while ((ve = TAILQ_FIRST(&vss->vx_queue)) != NULL) {
548 	TAILQ_REMOVE(&vss->vx_queue, ve, ve_entry);
549 	varsymdrop(ve->ve_sym);
550 	kfree(ve, M_VARSYM);
551     }
552     vss->vx_setsize = 0;
553     lockmgr(&vss->vx_lock, LK_RELEASE);
554 }
555 
556