xref: /netbsd-src/sys/compat/linux/common/linux_sched.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /*	$NetBSD: linux_sched.c,v 1.18 2004/09/10 22:22:20 wiz Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center; by Matthias Scheler.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Linux compatibility module. Try to deal with scheduler related syscalls.
42  */
43 
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: linux_sched.c,v 1.18 2004/09/10 22:22:20 wiz Exp $");
46 
47 #include <sys/param.h>
48 #include <sys/mount.h>
49 #include <sys/proc.h>
50 #include <sys/systm.h>
51 #include <sys/sa.h>
52 #include <sys/syscallargs.h>
53 #include <sys/wait.h>
54 
55 #include <machine/cpu.h>
56 
57 #include <compat/linux/common/linux_types.h>
58 #include <compat/linux/common/linux_signal.h>
59 
60 #include <compat/linux/linux_syscallargs.h>
61 
62 #include <compat/linux/common/linux_sched.h>
63 
64 int
65 linux_sys_clone(l, v, retval)
66 	struct lwp *l;
67 	void *v;
68 	register_t *retval;
69 {
70 	struct linux_sys_clone_args /* {
71 		syscallarg(int) flags;
72 		syscallarg(void *) stack;
73 	} */ *uap = v;
74 	int flags, sig;
75 
76 	/*
77 	 * We don't support the Linux CLONE_PID or CLONE_PTRACE flags.
78 	 */
79 	if (SCARG(uap, flags) & (LINUX_CLONE_PID|LINUX_CLONE_PTRACE))
80 		return (EINVAL);
81 
82 	/*
83 	 * Thread group implies shared signals. Shared signals
84 	 * imply shared VM. This matches what Linux kernel does.
85 	 */
86 	if (SCARG(uap, flags) & LINUX_CLONE_THREAD
87 	    && (SCARG(uap, flags) & LINUX_CLONE_SIGHAND) == 0)
88 		return (EINVAL);
89 	if (SCARG(uap, flags) & LINUX_CLONE_SIGHAND
90 	    && (SCARG(uap, flags) & LINUX_CLONE_VM) == 0)
91 		return (EINVAL);
92 
93 	flags = 0;
94 
95 	if (SCARG(uap, flags) & LINUX_CLONE_VM)
96 		flags |= FORK_SHAREVM;
97 	if (SCARG(uap, flags) & LINUX_CLONE_FS)
98 		flags |= FORK_SHARECWD;
99 	if (SCARG(uap, flags) & LINUX_CLONE_FILES)
100 		flags |= FORK_SHAREFILES;
101 	if (SCARG(uap, flags) & LINUX_CLONE_SIGHAND)
102 		flags |= FORK_SHARESIGS;
103 	if (SCARG(uap, flags) & LINUX_CLONE_VFORK)
104 		flags |= FORK_PPWAIT;
105 
106 	sig = SCARG(uap, flags) & LINUX_CLONE_CSIGNAL;
107 	if (sig < 0 || sig >= LINUX__NSIG)
108 		return (EINVAL);
109 	sig = linux_to_native_signo[sig];
110 
111 	/*
112 	 * Note that Linux does not provide a portable way of specifying
113 	 * the stack area; the caller must know if the stack grows up
114 	 * or down.  So, we pass a stack size of 0, so that the code
115 	 * that makes this adjustment is a noop.
116 	 */
117 	return (fork1(l, flags, sig, SCARG(uap, stack), 0,
118 	    NULL, NULL, retval, NULL));
119 }
120 
121 int
122 linux_sys_sched_setparam(cl, v, retval)
123 	struct lwp *cl;
124 	void *v;
125 	register_t *retval;
126 {
127 	struct linux_sys_sched_setparam_args /* {
128 		syscallarg(linux_pid_t) pid;
129 		syscallarg(const struct linux_sched_param *) sp;
130 	} */ *uap = v;
131 	struct proc *cp = cl->l_proc;
132 	int error;
133 	struct linux_sched_param lp;
134 	struct proc *p;
135 
136 /*
137  * We only check for valid parameters and return afterwards.
138  */
139 
140 	if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL)
141 		return EINVAL;
142 
143 	error = copyin(SCARG(uap, sp), &lp, sizeof(lp));
144 	if (error)
145 		return error;
146 
147 	if (SCARG(uap, pid) != 0) {
148 		struct pcred *pc = cp->p_cred;
149 
150 		if ((p = pfind(SCARG(uap, pid))) == NULL)
151 			return ESRCH;
152 		if (!(cp == p ||
153 		      pc->pc_ucred->cr_uid == 0 ||
154 		      pc->p_ruid == p->p_cred->p_ruid ||
155 		      pc->pc_ucred->cr_uid == p->p_cred->p_ruid ||
156 		      pc->p_ruid == p->p_ucred->cr_uid ||
157 		      pc->pc_ucred->cr_uid == p->p_ucred->cr_uid))
158 			return EPERM;
159 	}
160 
161 	return 0;
162 }
163 
164 int
165 linux_sys_sched_getparam(cl, v, retval)
166 	struct lwp *cl;
167 	void *v;
168 	register_t *retval;
169 {
170 	struct linux_sys_sched_getparam_args /* {
171 		syscallarg(linux_pid_t) pid;
172 		syscallarg(struct linux_sched_param *) sp;
173 	} */ *uap = v;
174 	struct proc *cp = cl->l_proc;
175 	struct proc *p;
176 	struct linux_sched_param lp;
177 
178 /*
179  * We only check for valid parameters and return a dummy priority afterwards.
180  */
181 	if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL)
182 		return EINVAL;
183 
184 	if (SCARG(uap, pid) != 0) {
185 		struct pcred *pc = cp->p_cred;
186 
187 		if ((p = pfind(SCARG(uap, pid))) == NULL)
188 			return ESRCH;
189 		if (!(cp == p ||
190 		      pc->pc_ucred->cr_uid == 0 ||
191 		      pc->p_ruid == p->p_cred->p_ruid ||
192 		      pc->pc_ucred->cr_uid == p->p_cred->p_ruid ||
193 		      pc->p_ruid == p->p_ucred->cr_uid ||
194 		      pc->pc_ucred->cr_uid == p->p_ucred->cr_uid))
195 			return EPERM;
196 	}
197 
198 	lp.sched_priority = 0;
199 	return copyout(&lp, SCARG(uap, sp), sizeof(lp));
200 }
201 
202 int
203 linux_sys_sched_setscheduler(cl, v, retval)
204 	struct lwp *cl;
205 	void *v;
206 	register_t *retval;
207 {
208 	struct linux_sys_sched_setscheduler_args /* {
209 		syscallarg(linux_pid_t) pid;
210 		syscallarg(int) policy;
211 		syscallarg(cont struct linux_sched_scheduler *) sp;
212 	} */ *uap = v;
213 	struct proc *cp = cl->l_proc;
214 	int error;
215 	struct linux_sched_param lp;
216 	struct proc *p;
217 
218 /*
219  * We only check for valid parameters and return afterwards.
220  */
221 
222 	if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL)
223 		return EINVAL;
224 
225 	error = copyin(SCARG(uap, sp), &lp, sizeof(lp));
226 	if (error)
227 		return error;
228 
229 	if (SCARG(uap, pid) != 0) {
230 		struct pcred *pc = cp->p_cred;
231 
232 		if ((p = pfind(SCARG(uap, pid))) == NULL)
233 			return ESRCH;
234 		if (!(cp == p ||
235 		      pc->pc_ucred->cr_uid == 0 ||
236 		      pc->p_ruid == p->p_cred->p_ruid ||
237 		      pc->pc_ucred->cr_uid == p->p_cred->p_ruid ||
238 		      pc->p_ruid == p->p_ucred->cr_uid ||
239 		      pc->pc_ucred->cr_uid == p->p_ucred->cr_uid))
240 			return EPERM;
241 	}
242 
243 /*
244  * We can't emulate anything put the default scheduling policy.
245  */
246 	if (SCARG(uap, policy) != LINUX_SCHED_OTHER || lp.sched_priority != 0)
247 		return EINVAL;
248 
249 	return 0;
250 }
251 
252 int
253 linux_sys_sched_getscheduler(cl, v, retval)
254 	struct lwp *cl;
255 	void *v;
256 	register_t *retval;
257 {
258 	struct linux_sys_sched_getscheduler_args /* {
259 		syscallarg(linux_pid_t) pid;
260 	} */ *uap = v;
261 	struct proc *cp = cl->l_proc;
262 	struct proc *p;
263 
264 	*retval = -1;
265 /*
266  * We only check for valid parameters and return afterwards.
267  */
268 
269 	if (SCARG(uap, pid) != 0) {
270 		struct pcred *pc = cp->p_cred;
271 
272 		if ((p = pfind(SCARG(uap, pid))) == NULL)
273 			return ESRCH;
274 		if (!(cp == p ||
275 		      pc->pc_ucred->cr_uid == 0 ||
276 		      pc->p_ruid == p->p_cred->p_ruid ||
277 		      pc->pc_ucred->cr_uid == p->p_cred->p_ruid ||
278 		      pc->p_ruid == p->p_ucred->cr_uid ||
279 		      pc->pc_ucred->cr_uid == p->p_ucred->cr_uid))
280 			return EPERM;
281 	}
282 
283 /*
284  * We can't emulate anything put the default scheduling policy.
285  */
286 	*retval = LINUX_SCHED_OTHER;
287 	return 0;
288 }
289 
290 int
291 linux_sys_sched_yield(cl, v, retval)
292 	struct lwp *cl;
293 	void *v;
294 	register_t *retval;
295 {
296 
297 	yield();
298 	return 0;
299 }
300 
301 int
302 linux_sys_sched_get_priority_max(cl, v, retval)
303 	struct lwp *cl;
304 	void *v;
305 	register_t *retval;
306 {
307 	struct linux_sys_sched_get_priority_max_args /* {
308 		syscallarg(int) policy;
309 	} */ *uap = v;
310 
311 /*
312  * We can't emulate anything put the default scheduling policy.
313  */
314 	if (SCARG(uap, policy) != LINUX_SCHED_OTHER) {
315 		*retval = -1;
316 		return EINVAL;
317 	}
318 
319 	*retval = 0;
320 	return 0;
321 }
322 
323 int
324 linux_sys_sched_get_priority_min(cl, v, retval)
325 	struct lwp *cl;
326 	void *v;
327 	register_t *retval;
328 {
329 	struct linux_sys_sched_get_priority_min_args /* {
330 		syscallarg(int) policy;
331 	} */ *uap = v;
332 
333 /*
334  * We can't emulate anything put the default scheduling policy.
335  */
336 	if (SCARG(uap, policy) != LINUX_SCHED_OTHER) {
337 		*retval = -1;
338 		return EINVAL;
339 	}
340 
341 	*retval = 0;
342 	return 0;
343 }
344 
345 #ifndef __m68k__
346 /* Present on everything but m68k */
347 int
348 linux_sys_exit_group(l, v, retval)
349 	struct lwp *l;
350 	void *v;
351 	register_t *retval;
352 {
353 	struct linux_sys_exit_group_args /* {
354 		syscallarg(int) error_code;
355 	} */ *uap = v;
356 
357 	/*
358 	 * XXX The calling thread is supposed to kill all threads
359 	 * in the same thread group (i.e. all threads created
360 	 * via clone(2) with CLONE_THREAD flag set). This appears
361 	 * to not be used yet, so the thread group handling
362 	 * is currently not implemented.
363 	 */
364 
365 	exit1(l, W_EXITCODE(SCARG(uap, error_code), 0));
366 	/* NOTREACHED */
367 	return 0;
368 }
369 #endif /* !__m68k__ */
370