1 /* $NetBSD: linux_sched.c,v 1.69 2017/04/21 15:10:34 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center; by Matthias Scheler. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Linux compatibility module. Try to deal with scheduler related syscalls. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: linux_sched.c,v 1.69 2017/04/21 15:10:34 christos Exp $"); 39 40 #include <sys/param.h> 41 #include <sys/mount.h> 42 #include <sys/proc.h> 43 #include <sys/systm.h> 44 #include <sys/sysctl.h> 45 #include <sys/syscallargs.h> 46 #include <sys/wait.h> 47 #include <sys/kauth.h> 48 #include <sys/ptrace.h> 49 #include <sys/atomic.h> 50 51 #include <sys/cpu.h> 52 53 #include <compat/linux/common/linux_types.h> 54 #include <compat/linux/common/linux_signal.h> 55 #include <compat/linux/common/linux_emuldata.h> 56 #include <compat/linux/common/linux_ipc.h> 57 #include <compat/linux/common/linux_sem.h> 58 #include <compat/linux/common/linux_exec.h> 59 #include <compat/linux/common/linux_machdep.h> 60 61 #include <compat/linux/linux_syscallargs.h> 62 63 #include <compat/linux/common/linux_sched.h> 64 65 static int linux_clone_nptl(struct lwp *, const struct linux_sys_clone_args *, 66 register_t *); 67 68 /* Unlike Linux, dynamically calculate CPU mask size */ 69 #define LINUX_CPU_MASK_SIZE (sizeof(long) * ((ncpu + LONG_BIT - 1) / LONG_BIT)) 70 71 #if DEBUG_LINUX 72 #define DPRINTF(x) uprintf x 73 #else 74 #define DPRINTF(x) 75 #endif 76 77 static void 78 linux_child_return(void *arg) 79 { 80 struct lwp *l = arg; 81 struct proc *p = l->l_proc; 82 struct linux_emuldata *led = l->l_emuldata; 83 void *ctp = led->led_child_tidptr; 84 int error; 85 86 if (ctp) { 87 if ((error = copyout(&p->p_pid, ctp, sizeof(p->p_pid))) != 0) 88 printf("%s: LINUX_CLONE_CHILD_SETTID " 89 "failed (child_tidptr = %p, tid = %d error =%d)\n", 90 __func__, ctp, p->p_pid, error); 91 } 92 child_return(arg); 93 } 94 95 int 96 linux_sys_clone(struct lwp *l, const struct linux_sys_clone_args *uap, 97 register_t *retval) 98 { 99 /* { 100 syscallarg(int) flags; 101 syscallarg(void *) stack; 102 syscallarg(void *) parent_tidptr; 103 syscallarg(void *) tls; 104 syscallarg(void *) child_tidptr; 105 } */ 106 struct proc *p; 107 struct linux_emuldata *led; 108 int flags, sig, error; 109 110 /* 111 * We don't support the Linux CLONE_PID or CLONE_PTRACE flags. 112 */ 113 if (SCARG(uap, flags) & (LINUX_CLONE_PID|LINUX_CLONE_PTRACE)) 114 return EINVAL; 115 116 /* 117 * Thread group implies shared signals. Shared signals 118 * imply shared VM. This matches what Linux kernel does. 119 */ 120 if (SCARG(uap, flags) & LINUX_CLONE_THREAD 121 && (SCARG(uap, flags) & LINUX_CLONE_SIGHAND) == 0) 122 return EINVAL; 123 if (SCARG(uap, flags) & LINUX_CLONE_SIGHAND 124 && (SCARG(uap, flags) & LINUX_CLONE_VM) == 0) 125 return EINVAL; 126 127 /* 128 * The thread group flavor is implemented totally differently. 129 */ 130 if (SCARG(uap, flags) & LINUX_CLONE_THREAD) 131 return linux_clone_nptl(l, uap, retval); 132 133 flags = 0; 134 if (SCARG(uap, flags) & LINUX_CLONE_VM) 135 flags |= FORK_SHAREVM; 136 if (SCARG(uap, flags) & LINUX_CLONE_FS) 137 flags |= FORK_SHARECWD; 138 if (SCARG(uap, flags) & LINUX_CLONE_FILES) 139 flags |= FORK_SHAREFILES; 140 if (SCARG(uap, flags) & LINUX_CLONE_SIGHAND) 141 flags |= FORK_SHARESIGS; 142 if (SCARG(uap, flags) & LINUX_CLONE_VFORK) 143 flags |= FORK_PPWAIT; 144 145 sig = SCARG(uap, flags) & LINUX_CLONE_CSIGNAL; 146 if (sig < 0 || sig >= LINUX__NSIG) 147 return EINVAL; 148 sig = linux_to_native_signo[sig]; 149 150 if (SCARG(uap, flags) & LINUX_CLONE_CHILD_SETTID) { 151 led = l->l_emuldata; 152 led->led_child_tidptr = SCARG(uap, child_tidptr); 153 } 154 155 /* 156 * Note that Linux does not provide a portable way of specifying 157 * the stack area; the caller must know if the stack grows up 158 * or down. So, we pass a stack size of 0, so that the code 159 * that makes this adjustment is a noop. 160 */ 161 if ((error = fork1(l, flags, sig, SCARG(uap, stack), 0, 162 linux_child_return, NULL, retval, &p)) != 0) { 163 DPRINTF(("%s: fork1: error %d\n", __func__, error)); 164 return error; 165 } 166 167 return 0; 168 } 169 170 static int 171 linux_clone_nptl(struct lwp *l, const struct linux_sys_clone_args *uap, register_t *retval) 172 { 173 /* { 174 syscallarg(int) flags; 175 syscallarg(void *) stack; 176 syscallarg(void *) parent_tidptr; 177 syscallarg(void *) tls; 178 syscallarg(void *) child_tidptr; 179 } */ 180 struct proc *p; 181 struct lwp *l2; 182 struct linux_emuldata *led; 183 void *parent_tidptr, *tls, *child_tidptr; 184 struct schedstate_percpu *spc; 185 vaddr_t uaddr; 186 lwpid_t lid; 187 int flags, tnprocs, error; 188 189 p = l->l_proc; 190 flags = SCARG(uap, flags); 191 parent_tidptr = SCARG(uap, parent_tidptr); 192 tls = SCARG(uap, tls); 193 child_tidptr = SCARG(uap, child_tidptr); 194 195 tnprocs = atomic_inc_uint_nv(&nprocs); 196 if (__predict_false(tnprocs >= maxproc) || 197 kauth_authorize_process(l->l_cred, KAUTH_PROCESS_FORK, p, 198 KAUTH_ARG(tnprocs), NULL, NULL) != 0) { 199 atomic_dec_uint(&nprocs); 200 return EAGAIN; 201 } 202 203 uaddr = uvm_uarea_alloc(); 204 if (__predict_false(uaddr == 0)) { 205 atomic_dec_uint(&nprocs); 206 return ENOMEM; 207 } 208 209 error = lwp_create(l, p, uaddr, LWP_DETACHED | LWP_PIDLID, 210 SCARG(uap, stack), 0, child_return, NULL, &l2, l->l_class, 211 &l->l_sigmask, &l->l_sigstk); 212 if (__predict_false(error)) { 213 DPRINTF(("%s: lwp_create error=%d\n", __func__, error)); 214 atomic_dec_uint(&nprocs); 215 uvm_uarea_free(uaddr); 216 return error; 217 } 218 lid = l2->l_lid; 219 220 /* LINUX_CLONE_CHILD_CLEARTID: clear TID in child's memory on exit() */ 221 if (flags & LINUX_CLONE_CHILD_CLEARTID) { 222 led = l2->l_emuldata; 223 led->led_clear_tid = child_tidptr; 224 } 225 226 /* LINUX_CLONE_PARENT_SETTID: store child's TID in parent's memory */ 227 if (flags & LINUX_CLONE_PARENT_SETTID) { 228 if ((error = copyout(&lid, parent_tidptr, sizeof(lid))) != 0) 229 printf("%s: LINUX_CLONE_PARENT_SETTID " 230 "failed (parent_tidptr = %p tid = %d error=%d)\n", 231 __func__, parent_tidptr, lid, error); 232 } 233 234 /* LINUX_CLONE_CHILD_SETTID: store child's TID in child's memory */ 235 if (flags & LINUX_CLONE_CHILD_SETTID) { 236 if ((error = copyout(&lid, child_tidptr, sizeof(lid))) != 0) 237 printf("%s: LINUX_CLONE_CHILD_SETTID " 238 "failed (child_tidptr = %p, tid = %d error=%d)\n", 239 __func__, child_tidptr, lid, error); 240 } 241 242 if (flags & LINUX_CLONE_SETTLS) { 243 error = LINUX_LWP_SETPRIVATE(l2, tls); 244 if (error) { 245 DPRINTF(("%s: LINUX_LWP_SETPRIVATE %d\n", __func__, 246 error)); 247 lwp_exit(l2); 248 return error; 249 } 250 } 251 252 /* 253 * Set the new LWP running, unless the process is stopping, 254 * then the LWP is created stopped. 255 */ 256 mutex_enter(p->p_lock); 257 lwp_lock(l2); 258 spc = &l2->l_cpu->ci_schedstate; 259 if ((l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) { 260 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) { 261 KASSERT(l2->l_wchan == NULL); 262 l2->l_stat = LSSTOP; 263 p->p_nrlwps--; 264 lwp_unlock_to(l2, spc->spc_lwplock); 265 } else { 266 KASSERT(lwp_locked(l2, spc->spc_mutex)); 267 l2->l_stat = LSRUN; 268 sched_enqueue(l2, false); 269 lwp_unlock(l2); 270 } 271 } else { 272 l2->l_stat = LSSUSPENDED; 273 p->p_nrlwps--; 274 lwp_unlock_to(l2, spc->spc_lwplock); 275 } 276 mutex_exit(p->p_lock); 277 278 retval[0] = lid; 279 retval[1] = 0; 280 return 0; 281 } 282 283 /* 284 * linux realtime priority 285 * 286 * - SCHED_RR and SCHED_FIFO tasks have priorities [1,99]. 287 * 288 * - SCHED_OTHER tasks don't have realtime priorities. 289 * in particular, sched_param::sched_priority is always 0. 290 */ 291 292 #define LINUX_SCHED_RTPRIO_MIN 1 293 #define LINUX_SCHED_RTPRIO_MAX 99 294 295 static int 296 sched_linux2native(int linux_policy, struct linux_sched_param *linux_params, 297 int *native_policy, struct sched_param *native_params) 298 { 299 300 switch (linux_policy) { 301 case LINUX_SCHED_OTHER: 302 if (native_policy != NULL) { 303 *native_policy = SCHED_OTHER; 304 } 305 break; 306 307 case LINUX_SCHED_FIFO: 308 if (native_policy != NULL) { 309 *native_policy = SCHED_FIFO; 310 } 311 break; 312 313 case LINUX_SCHED_RR: 314 if (native_policy != NULL) { 315 *native_policy = SCHED_RR; 316 } 317 break; 318 319 default: 320 return EINVAL; 321 } 322 323 if (linux_params != NULL) { 324 int prio = linux_params->sched_priority; 325 326 KASSERT(native_params != NULL); 327 328 if (linux_policy == LINUX_SCHED_OTHER) { 329 if (prio != 0) { 330 return EINVAL; 331 } 332 native_params->sched_priority = PRI_NONE; /* XXX */ 333 } else { 334 if (prio < LINUX_SCHED_RTPRIO_MIN || 335 prio > LINUX_SCHED_RTPRIO_MAX) { 336 return EINVAL; 337 } 338 native_params->sched_priority = 339 (prio - LINUX_SCHED_RTPRIO_MIN) 340 * (SCHED_PRI_MAX - SCHED_PRI_MIN) 341 / (LINUX_SCHED_RTPRIO_MAX - LINUX_SCHED_RTPRIO_MIN) 342 + SCHED_PRI_MIN; 343 } 344 } 345 346 return 0; 347 } 348 349 static int 350 sched_native2linux(int native_policy, struct sched_param *native_params, 351 int *linux_policy, struct linux_sched_param *linux_params) 352 { 353 354 switch (native_policy) { 355 case SCHED_OTHER: 356 if (linux_policy != NULL) { 357 *linux_policy = LINUX_SCHED_OTHER; 358 } 359 break; 360 361 case SCHED_FIFO: 362 if (linux_policy != NULL) { 363 *linux_policy = LINUX_SCHED_FIFO; 364 } 365 break; 366 367 case SCHED_RR: 368 if (linux_policy != NULL) { 369 *linux_policy = LINUX_SCHED_RR; 370 } 371 break; 372 373 default: 374 panic("%s: unknown policy %d\n", __func__, native_policy); 375 } 376 377 if (native_params != NULL) { 378 int prio = native_params->sched_priority; 379 380 KASSERT(prio >= SCHED_PRI_MIN); 381 KASSERT(prio <= SCHED_PRI_MAX); 382 KASSERT(linux_params != NULL); 383 384 DPRINTF(("%s: native: policy %d, priority %d\n", 385 __func__, native_policy, prio)); 386 387 if (native_policy == SCHED_OTHER) { 388 linux_params->sched_priority = 0; 389 } else { 390 linux_params->sched_priority = 391 (prio - SCHED_PRI_MIN) 392 * (LINUX_SCHED_RTPRIO_MAX - LINUX_SCHED_RTPRIO_MIN) 393 / (SCHED_PRI_MAX - SCHED_PRI_MIN) 394 + LINUX_SCHED_RTPRIO_MIN; 395 } 396 DPRINTF(("%s: linux: policy %d, priority %d\n", 397 __func__, -1, linux_params->sched_priority)); 398 } 399 400 return 0; 401 } 402 403 int 404 linux_sys_sched_setparam(struct lwp *l, const struct linux_sys_sched_setparam_args *uap, register_t *retval) 405 { 406 /* { 407 syscallarg(linux_pid_t) pid; 408 syscallarg(const struct linux_sched_param *) sp; 409 } */ 410 int error, policy; 411 struct linux_sched_param lp; 412 struct sched_param sp; 413 414 if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL) { 415 error = EINVAL; 416 goto out; 417 } 418 419 error = copyin(SCARG(uap, sp), &lp, sizeof(lp)); 420 if (error) 421 goto out; 422 423 /* We need the current policy in Linux terms. */ 424 error = do_sched_getparam(SCARG(uap, pid), 0, &policy, NULL); 425 if (error) 426 goto out; 427 error = sched_native2linux(policy, NULL, &policy, NULL); 428 if (error) 429 goto out; 430 431 error = sched_linux2native(policy, &lp, &policy, &sp); 432 if (error) 433 goto out; 434 435 error = do_sched_setparam(SCARG(uap, pid), 0, policy, &sp); 436 if (error) 437 goto out; 438 439 out: 440 return error; 441 } 442 443 int 444 linux_sys_sched_getparam(struct lwp *l, const struct linux_sys_sched_getparam_args *uap, register_t *retval) 445 { 446 /* { 447 syscallarg(linux_pid_t) pid; 448 syscallarg(struct linux_sched_param *) sp; 449 } */ 450 struct linux_sched_param lp; 451 struct sched_param sp; 452 int error, policy; 453 454 if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL) { 455 error = EINVAL; 456 goto out; 457 } 458 459 error = do_sched_getparam(SCARG(uap, pid), 0, &policy, &sp); 460 if (error) 461 goto out; 462 DPRINTF(("%s: native: policy %d, priority %d\n", 463 __func__, policy, sp.sched_priority)); 464 465 error = sched_native2linux(policy, &sp, NULL, &lp); 466 if (error) 467 goto out; 468 DPRINTF(("%s: linux: policy %d, priority %d\n", 469 __func__, policy, lp.sched_priority)); 470 471 error = copyout(&lp, SCARG(uap, sp), sizeof(lp)); 472 if (error) 473 goto out; 474 475 out: 476 return error; 477 } 478 479 int 480 linux_sys_sched_setscheduler(struct lwp *l, const struct linux_sys_sched_setscheduler_args *uap, register_t *retval) 481 { 482 /* { 483 syscallarg(linux_pid_t) pid; 484 syscallarg(int) policy; 485 syscallarg(cont struct linux_sched_param *) sp; 486 } */ 487 int error, policy; 488 struct linux_sched_param lp; 489 struct sched_param sp; 490 491 if (SCARG(uap, pid) < 0 || SCARG(uap, sp) == NULL) { 492 error = EINVAL; 493 goto out; 494 } 495 496 error = copyin(SCARG(uap, sp), &lp, sizeof(lp)); 497 if (error) 498 goto out; 499 DPRINTF(("%s: linux: policy %d, priority %d\n", 500 __func__, SCARG(uap, policy), lp.sched_priority)); 501 502 error = sched_linux2native(SCARG(uap, policy), &lp, &policy, &sp); 503 if (error) 504 goto out; 505 DPRINTF(("%s: native: policy %d, priority %d\n", 506 __func__, policy, sp.sched_priority)); 507 508 error = do_sched_setparam(SCARG(uap, pid), 0, policy, &sp); 509 if (error) 510 goto out; 511 512 out: 513 return error; 514 } 515 516 int 517 linux_sys_sched_getscheduler(struct lwp *l, const struct linux_sys_sched_getscheduler_args *uap, register_t *retval) 518 { 519 /* { 520 syscallarg(linux_pid_t) pid; 521 } */ 522 int error, policy; 523 524 *retval = -1; 525 526 error = do_sched_getparam(SCARG(uap, pid), 0, &policy, NULL); 527 if (error) 528 goto out; 529 530 error = sched_native2linux(policy, NULL, &policy, NULL); 531 if (error) 532 goto out; 533 534 *retval = policy; 535 536 out: 537 return error; 538 } 539 540 int 541 linux_sys_sched_yield(struct lwp *l, const void *v, register_t *retval) 542 { 543 544 yield(); 545 return 0; 546 } 547 548 int 549 linux_sys_sched_get_priority_max(struct lwp *l, const struct linux_sys_sched_get_priority_max_args *uap, register_t *retval) 550 { 551 /* { 552 syscallarg(int) policy; 553 } */ 554 555 switch (SCARG(uap, policy)) { 556 case LINUX_SCHED_OTHER: 557 *retval = 0; 558 break; 559 case LINUX_SCHED_FIFO: 560 case LINUX_SCHED_RR: 561 *retval = LINUX_SCHED_RTPRIO_MAX; 562 break; 563 default: 564 return EINVAL; 565 } 566 567 return 0; 568 } 569 570 int 571 linux_sys_sched_get_priority_min(struct lwp *l, const struct linux_sys_sched_get_priority_min_args *uap, register_t *retval) 572 { 573 /* { 574 syscallarg(int) policy; 575 } */ 576 577 switch (SCARG(uap, policy)) { 578 case LINUX_SCHED_OTHER: 579 *retval = 0; 580 break; 581 case LINUX_SCHED_FIFO: 582 case LINUX_SCHED_RR: 583 *retval = LINUX_SCHED_RTPRIO_MIN; 584 break; 585 default: 586 return EINVAL; 587 } 588 589 return 0; 590 } 591 592 int 593 linux_sys_exit(struct lwp *l, const struct linux_sys_exit_args *uap, register_t *retval) 594 { 595 596 lwp_exit(l); 597 return 0; 598 } 599 600 #ifndef __m68k__ 601 /* Present on everything but m68k */ 602 int 603 linux_sys_exit_group(struct lwp *l, const struct linux_sys_exit_group_args *uap, register_t *retval) 604 { 605 606 return sys_exit(l, (const void *)uap, retval); 607 } 608 #endif /* !__m68k__ */ 609 610 int 611 linux_sys_set_tid_address(struct lwp *l, const struct linux_sys_set_tid_address_args *uap, register_t *retval) 612 { 613 /* { 614 syscallarg(int *) tidptr; 615 } */ 616 struct linux_emuldata *led; 617 618 led = (struct linux_emuldata *)l->l_emuldata; 619 led->led_clear_tid = SCARG(uap, tid); 620 *retval = l->l_lid; 621 622 return 0; 623 } 624 625 /* ARGUSED1 */ 626 int 627 linux_sys_gettid(struct lwp *l, const void *v, register_t *retval) 628 { 629 630 *retval = l->l_lid; 631 return 0; 632 } 633 634 /* 635 * The affinity syscalls assume that the layout of our cpu kcpuset is 636 * the same as linux's: a linear bitmask. 637 */ 638 int 639 linux_sys_sched_getaffinity(struct lwp *l, const struct linux_sys_sched_getaffinity_args *uap, register_t *retval) 640 { 641 /* { 642 syscallarg(linux_pid_t) pid; 643 syscallarg(unsigned int) len; 644 syscallarg(unsigned long *) mask; 645 } */ 646 struct lwp *t; 647 kcpuset_t *kcset; 648 size_t size; 649 cpuid_t i; 650 int error; 651 652 size = LINUX_CPU_MASK_SIZE; 653 if (SCARG(uap, len) < size) 654 return EINVAL; 655 656 /* Lock the LWP */ 657 t = lwp_find2(SCARG(uap, pid), l->l_lid); 658 if (t == NULL) 659 return ESRCH; 660 661 /* Check the permission */ 662 if (kauth_authorize_process(l->l_cred, 663 KAUTH_PROCESS_SCHEDULER_GETAFFINITY, t->l_proc, NULL, NULL, NULL)) { 664 mutex_exit(t->l_proc->p_lock); 665 return EPERM; 666 } 667 668 kcpuset_create(&kcset, true); 669 lwp_lock(t); 670 if (t->l_affinity != NULL) 671 kcpuset_copy(kcset, t->l_affinity); 672 else { 673 /* 674 * All available CPUs should be masked when affinity has not 675 * been set. 676 */ 677 kcpuset_zero(kcset); 678 for (i = 0; i < ncpu; i++) 679 kcpuset_set(kcset, i); 680 } 681 lwp_unlock(t); 682 mutex_exit(t->l_proc->p_lock); 683 error = kcpuset_copyout(kcset, (cpuset_t *)SCARG(uap, mask), size); 684 kcpuset_unuse(kcset, NULL); 685 *retval = size; 686 return error; 687 } 688 689 int 690 linux_sys_sched_setaffinity(struct lwp *l, const struct linux_sys_sched_setaffinity_args *uap, register_t *retval) 691 { 692 /* { 693 syscallarg(linux_pid_t) pid; 694 syscallarg(unsigned int) len; 695 syscallarg(unsigned long *) mask; 696 } */ 697 struct sys__sched_setaffinity_args ssa; 698 size_t size; 699 700 size = LINUX_CPU_MASK_SIZE; 701 if (SCARG(uap, len) < size) 702 return EINVAL; 703 704 SCARG(&ssa, pid) = SCARG(uap, pid); 705 SCARG(&ssa, lid) = l->l_lid; 706 SCARG(&ssa, size) = size; 707 SCARG(&ssa, cpuset) = (cpuset_t *)SCARG(uap, mask); 708 709 return sys__sched_setaffinity(l, &ssa, retval); 710 } 711