1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2000 Marcel Moolenaar
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_posix.h"
30
31 #include <sys/param.h>
32 #include <sys/imgact_aout.h>
33 #include <sys/fcntl.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mman.h>
37 #include <sys/mutex.h>
38 #include <sys/namei.h>
39 #include <sys/priv.h>
40 #include <sys/proc.h>
41 #include <sys/racct.h>
42 #include <sys/resource.h>
43 #include <sys/resourcevar.h>
44 #include <sys/syscallsubr.h>
45 #include <sys/sysproto.h>
46 #include <sys/vnode.h>
47
48 #include <security/audit/audit.h>
49 #include <security/mac/mac_framework.h>
50
51 #include <machine/frame.h>
52 #include <machine/pcb.h> /* needed for pcb definition in linux_set_thread_area */
53 #include <machine/psl.h>
54 #include <machine/segments.h>
55 #include <machine/sysarch.h>
56
57 #include <vm/pmap.h>
58 #include <vm/vm.h>
59 #include <vm/vm_extern.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_map.h>
62 #include <vm/vm_param.h>
63
64 #include <x86/reg.h>
65
66 #include <i386/linux/linux.h>
67 #include <i386/linux/linux_proto.h>
68 #include <compat/linux/linux_emul.h>
69 #include <compat/linux/linux_fork.h>
70 #include <compat/linux/linux_ipc.h>
71 #include <compat/linux/linux_misc.h>
72 #include <compat/linux/linux_mmap.h>
73 #include <compat/linux/linux_signal.h>
74 #include <compat/linux/linux_util.h>
75
76
77 struct l_descriptor {
78 l_uint entry_number;
79 l_ulong base_addr;
80 l_uint limit;
81 l_uint seg_32bit:1;
82 l_uint contents:2;
83 l_uint read_exec_only:1;
84 l_uint limit_in_pages:1;
85 l_uint seg_not_present:1;
86 l_uint useable:1;
87 };
88
89 struct l_old_select_argv {
90 l_int nfds;
91 l_fd_set *readfds;
92 l_fd_set *writefds;
93 l_fd_set *exceptfds;
94 struct l_timeval *timeout;
95 };
96
97 struct l_ipc_kludge {
98 struct l_msgbuf *msgp;
99 l_long msgtyp;
100 };
101
102 int
linux_ipc(struct thread * td,struct linux_ipc_args * args)103 linux_ipc(struct thread *td, struct linux_ipc_args *args)
104 {
105
106 switch (args->what & 0xFFFF) {
107 case LINUX_SEMOP: {
108
109 return (kern_semop(td, args->arg1, PTRIN(args->ptr),
110 args->arg2, NULL));
111 }
112 case LINUX_SEMGET: {
113 struct linux_semget_args a;
114
115 a.key = args->arg1;
116 a.nsems = args->arg2;
117 a.semflg = args->arg3;
118 return (linux_semget(td, &a));
119 }
120 case LINUX_SEMCTL: {
121 struct linux_semctl_args a;
122 int error;
123
124 a.semid = args->arg1;
125 a.semnum = args->arg2;
126 a.cmd = args->arg3;
127 error = copyin(PTRIN(args->ptr), &a.arg, sizeof(a.arg));
128 if (error)
129 return (error);
130 return (linux_semctl(td, &a));
131 }
132 case LINUX_SEMTIMEDOP: {
133 struct linux_semtimedop_args a;
134
135 a.semid = args->arg1;
136 a.tsops = PTRIN(args->ptr);
137 a.nsops = args->arg2;
138 a.timeout = PTRIN(args->arg5);
139 return (linux_semtimedop(td, &a));
140 }
141 case LINUX_MSGSND: {
142 struct linux_msgsnd_args a;
143
144 a.msqid = args->arg1;
145 a.msgp = PTRIN(args->ptr);
146 a.msgsz = args->arg2;
147 a.msgflg = args->arg3;
148 return (linux_msgsnd(td, &a));
149 }
150 case LINUX_MSGRCV: {
151 struct linux_msgrcv_args a;
152
153 a.msqid = args->arg1;
154 a.msgsz = args->arg2;
155 a.msgflg = args->arg3;
156 if ((args->what >> 16) == 0) {
157 struct l_ipc_kludge tmp;
158 int error;
159
160 if (args->ptr == 0)
161 return (EINVAL);
162 error = copyin(PTRIN(args->ptr), &tmp, sizeof(tmp));
163 if (error)
164 return (error);
165 a.msgp = PTRIN(tmp.msgp);
166 a.msgtyp = tmp.msgtyp;
167 } else {
168 a.msgp = PTRIN(args->ptr);
169 a.msgtyp = args->arg5;
170 }
171 return (linux_msgrcv(td, &a));
172 }
173 case LINUX_MSGGET: {
174 struct linux_msgget_args a;
175
176 a.key = args->arg1;
177 a.msgflg = args->arg2;
178 return (linux_msgget(td, &a));
179 }
180 case LINUX_MSGCTL: {
181 struct linux_msgctl_args a;
182
183 a.msqid = args->arg1;
184 a.cmd = args->arg2;
185 a.buf = PTRIN(args->ptr);
186 return (linux_msgctl(td, &a));
187 }
188 case LINUX_SHMAT: {
189 struct linux_shmat_args a;
190 l_uintptr_t addr;
191 int error;
192
193 a.shmid = args->arg1;
194 a.shmaddr = PTRIN(args->ptr);
195 a.shmflg = args->arg2;
196 error = linux_shmat(td, &a);
197 if (error != 0)
198 return (error);
199 addr = td->td_retval[0];
200 error = copyout(&addr, PTRIN(args->arg3), sizeof(addr));
201 td->td_retval[0] = 0;
202 return (error);
203 }
204 case LINUX_SHMDT: {
205 struct linux_shmdt_args a;
206
207 a.shmaddr = PTRIN(args->ptr);
208 return (linux_shmdt(td, &a));
209 }
210 case LINUX_SHMGET: {
211 struct linux_shmget_args a;
212
213 a.key = args->arg1;
214 a.size = args->arg2;
215 a.shmflg = args->arg3;
216 return (linux_shmget(td, &a));
217 }
218 case LINUX_SHMCTL: {
219 struct linux_shmctl_args a;
220
221 a.shmid = args->arg1;
222 a.cmd = args->arg2;
223 a.buf = PTRIN(args->ptr);
224 return (linux_shmctl(td, &a));
225 }
226 default:
227 break;
228 }
229
230 return (EINVAL);
231 }
232
233 int
linux_old_select(struct thread * td,struct linux_old_select_args * args)234 linux_old_select(struct thread *td, struct linux_old_select_args *args)
235 {
236 struct l_old_select_argv linux_args;
237 struct linux_select_args newsel;
238 int error;
239
240 error = copyin(args->ptr, &linux_args, sizeof(linux_args));
241 if (error)
242 return (error);
243
244 newsel.nfds = linux_args.nfds;
245 newsel.readfds = linux_args.readfds;
246 newsel.writefds = linux_args.writefds;
247 newsel.exceptfds = linux_args.exceptfds;
248 newsel.timeout = linux_args.timeout;
249 return (linux_select(td, &newsel));
250 }
251
252 int
linux_set_cloned_tls(struct thread * td,void * desc)253 linux_set_cloned_tls(struct thread *td, void *desc)
254 {
255 struct segment_descriptor sd;
256 struct l_user_desc info;
257 int idx, error;
258 int a[2];
259
260 error = copyin(desc, &info, sizeof(struct l_user_desc));
261 if (error) {
262 linux_msg(td, "set_cloned_tls copyin failed!");
263 } else {
264 idx = info.entry_number;
265
266 /*
267 * looks like we're getting the idx we returned
268 * in the set_thread_area() syscall
269 */
270 if (idx != 6 && idx != 3) {
271 linux_msg(td, "set_cloned_tls resetting idx!");
272 idx = 3;
273 }
274
275 /* this doesnt happen in practice */
276 if (idx == 6) {
277 /* we might copy out the entry_number as 3 */
278 info.entry_number = 3;
279 error = copyout(&info, desc, sizeof(struct l_user_desc));
280 if (error)
281 linux_msg(td, "set_cloned_tls copyout failed!");
282 }
283
284 a[0] = LINUX_LDT_entry_a(&info);
285 a[1] = LINUX_LDT_entry_b(&info);
286
287 memcpy(&sd, &a, sizeof(a));
288 /* set %gs */
289 td->td_pcb->pcb_gsd = sd;
290 td->td_pcb->pcb_gs = GSEL(GUGS_SEL, SEL_UPL);
291 }
292
293 return (error);
294 }
295
296 int
linux_set_upcall(struct thread * td,register_t stack)297 linux_set_upcall(struct thread *td, register_t stack)
298 {
299
300 if (stack)
301 td->td_frame->tf_esp = stack;
302
303 /*
304 * The newly created Linux thread returns
305 * to the user space by the same path that a parent do.
306 */
307 td->td_frame->tf_eax = 0;
308 return (0);
309 }
310
311 int
linux_mmap(struct thread * td,struct linux_mmap_args * args)312 linux_mmap(struct thread *td, struct linux_mmap_args *args)
313 {
314 int error;
315 struct l_mmap_argv linux_args;
316
317 error = copyin(args->ptr, &linux_args, sizeof(linux_args));
318 if (error)
319 return (error);
320
321 return (linux_mmap_common(td, linux_args.addr, linux_args.len,
322 linux_args.prot, linux_args.flags, linux_args.fd,
323 (uint32_t)linux_args.pgoff));
324 }
325
326 int
linux_ioperm(struct thread * td,struct linux_ioperm_args * args)327 linux_ioperm(struct thread *td, struct linux_ioperm_args *args)
328 {
329 int error;
330 struct i386_ioperm_args iia;
331
332 iia.start = args->start;
333 iia.length = args->length;
334 iia.enable = args->enable;
335 error = i386_set_ioperm(td, &iia);
336 return (error);
337 }
338
339 int
linux_iopl(struct thread * td,struct linux_iopl_args * args)340 linux_iopl(struct thread *td, struct linux_iopl_args *args)
341 {
342 int error;
343
344 if (args->level < 0 || args->level > 3)
345 return (EINVAL);
346 if ((error = priv_check(td, PRIV_IO)) != 0)
347 return (error);
348 if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
349 return (error);
350 td->td_frame->tf_eflags = (td->td_frame->tf_eflags & ~PSL_IOPL) |
351 (args->level * (PSL_IOPL / 3));
352 return (0);
353 }
354
355 int
linux_modify_ldt(struct thread * td,struct linux_modify_ldt_args * uap)356 linux_modify_ldt(struct thread *td, struct linux_modify_ldt_args *uap)
357 {
358 int error;
359 struct i386_ldt_args ldt;
360 struct l_descriptor ld;
361 union descriptor desc;
362 int size, written;
363
364 switch (uap->func) {
365 case 0x00: /* read_ldt */
366 ldt.start = 0;
367 ldt.descs = uap->ptr;
368 ldt.num = uap->bytecount / sizeof(union descriptor);
369 error = i386_get_ldt(td, &ldt);
370 td->td_retval[0] *= sizeof(union descriptor);
371 break;
372 case 0x02: /* read_default_ldt = 0 */
373 size = 5*sizeof(struct l_desc_struct);
374 if (size > uap->bytecount)
375 size = uap->bytecount;
376 for (written = error = 0; written < size && error == 0; written++)
377 error = subyte((char *)uap->ptr + written, 0);
378 td->td_retval[0] = written;
379 break;
380 case 0x01: /* write_ldt */
381 case 0x11: /* write_ldt */
382 if (uap->bytecount != sizeof(ld))
383 return (EINVAL);
384
385 error = copyin(uap->ptr, &ld, sizeof(ld));
386 if (error)
387 return (error);
388
389 ldt.start = ld.entry_number;
390 ldt.descs = &desc;
391 ldt.num = 1;
392 desc.sd.sd_lolimit = (ld.limit & 0x0000ffff);
393 desc.sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16;
394 desc.sd.sd_lobase = (ld.base_addr & 0x00ffffff);
395 desc.sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24;
396 desc.sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) |
397 (ld.contents << 2);
398 desc.sd.sd_dpl = 3;
399 desc.sd.sd_p = (ld.seg_not_present ^ 1);
400 desc.sd.sd_xx = 0;
401 desc.sd.sd_def32 = ld.seg_32bit;
402 desc.sd.sd_gran = ld.limit_in_pages;
403 error = i386_set_ldt(td, &ldt, &desc);
404 break;
405 default:
406 error = ENOSYS;
407 break;
408 }
409
410 if (error == EOPNOTSUPP) {
411 linux_msg(td, "modify_ldt needs kernel option USER_LDT");
412 error = ENOSYS;
413 }
414
415 return (error);
416 }
417
418 int
linux_sigaction(struct thread * td,struct linux_sigaction_args * args)419 linux_sigaction(struct thread *td, struct linux_sigaction_args *args)
420 {
421 l_osigaction_t osa;
422 l_sigaction_t act, oact;
423 int error;
424
425 if (args->nsa != NULL) {
426 error = copyin(args->nsa, &osa, sizeof(l_osigaction_t));
427 if (error)
428 return (error);
429 act.lsa_handler = osa.lsa_handler;
430 act.lsa_flags = osa.lsa_flags;
431 act.lsa_restorer = osa.lsa_restorer;
432 LINUX_SIGEMPTYSET(act.lsa_mask);
433 act.lsa_mask.__mask = osa.lsa_mask;
434 }
435
436 error = linux_do_sigaction(td, args->sig, args->nsa ? &act : NULL,
437 args->osa ? &oact : NULL);
438
439 if (args->osa != NULL && !error) {
440 osa.lsa_handler = oact.lsa_handler;
441 osa.lsa_flags = oact.lsa_flags;
442 osa.lsa_restorer = oact.lsa_restorer;
443 osa.lsa_mask = oact.lsa_mask.__mask;
444 error = copyout(&osa, args->osa, sizeof(l_osigaction_t));
445 }
446
447 return (error);
448 }
449
450 /*
451 * Linux has two extra args, restart and oldmask. We dont use these,
452 * but it seems that "restart" is actually a context pointer that
453 * enables the signal to happen with a different register set.
454 */
455 int
linux_sigsuspend(struct thread * td,struct linux_sigsuspend_args * args)456 linux_sigsuspend(struct thread *td, struct linux_sigsuspend_args *args)
457 {
458 sigset_t sigmask;
459 l_sigset_t mask;
460
461 LINUX_SIGEMPTYSET(mask);
462 mask.__mask = args->mask;
463 linux_to_bsd_sigset(&mask, &sigmask);
464 return (kern_sigsuspend(td, sigmask));
465 }
466
467 int
linux_pause(struct thread * td,struct linux_pause_args * args)468 linux_pause(struct thread *td, struct linux_pause_args *args)
469 {
470 struct proc *p = td->td_proc;
471 sigset_t sigmask;
472
473 PROC_LOCK(p);
474 sigmask = td->td_sigmask;
475 PROC_UNLOCK(p);
476 return (kern_sigsuspend(td, sigmask));
477 }
478
479 int
linux_set_thread_area(struct thread * td,struct linux_set_thread_area_args * args)480 linux_set_thread_area(struct thread *td, struct linux_set_thread_area_args *args)
481 {
482 struct l_user_desc info;
483 int error;
484 int idx;
485 int a[2];
486 struct segment_descriptor sd;
487
488 error = copyin(args->desc, &info, sizeof(struct l_user_desc));
489 if (error)
490 return (error);
491
492 idx = info.entry_number;
493 /*
494 * Semantics of Linux version: every thread in the system has array of
495 * 3 tls descriptors. 1st is GLIBC TLS, 2nd is WINE, 3rd unknown. This
496 * syscall loads one of the selected tls descriptors with a value and
497 * also loads GDT descriptors 6, 7 and 8 with the content of the
498 * per-thread descriptors.
499 *
500 * Semantics of FreeBSD version: I think we can ignore that Linux has 3
501 * per-thread descriptors and use just the 1st one. The tls_array[]
502 * is used only in set/get-thread_area() syscalls and for loading the
503 * GDT descriptors. In FreeBSD we use just one GDT descriptor for TLS
504 * so we will load just one.
505 *
506 * XXX: this doesn't work when a user space process tries to use more
507 * than 1 TLS segment. Comment in the Linux sources says wine might do
508 * this.
509 */
510
511 /*
512 * we support just GLIBC TLS now
513 * we should let 3 proceed as well because we use this segment so
514 * if code does two subsequent calls it should succeed
515 */
516 if (idx != 6 && idx != -1 && idx != 3)
517 return (EINVAL);
518
519 /*
520 * we have to copy out the GDT entry we use
521 * FreeBSD uses GDT entry #3 for storing %gs so load that
522 *
523 * XXX: what if a user space program doesn't check this value and tries
524 * to use 6, 7 or 8?
525 */
526 idx = info.entry_number = 3;
527 error = copyout(&info, args->desc, sizeof(struct l_user_desc));
528 if (error)
529 return (error);
530
531 if (LINUX_LDT_empty(&info)) {
532 a[0] = 0;
533 a[1] = 0;
534 } else {
535 a[0] = LINUX_LDT_entry_a(&info);
536 a[1] = LINUX_LDT_entry_b(&info);
537 }
538
539 memcpy(&sd, &a, sizeof(a));
540 /* this is taken from i386 version of cpu_set_user_tls() */
541 critical_enter();
542 /* set %gs */
543 td->td_pcb->pcb_gsd = sd;
544 PCPU_GET(fsgs_gdt)[1] = sd;
545 load_gs(GSEL(GUGS_SEL, SEL_UPL));
546 critical_exit();
547
548 return (0);
549 }
550
551 int
linux_get_thread_area(struct thread * td,struct linux_get_thread_area_args * args)552 linux_get_thread_area(struct thread *td, struct linux_get_thread_area_args *args)
553 {
554
555 struct l_user_desc info;
556 int error;
557 int idx;
558 struct l_desc_struct desc;
559 struct segment_descriptor sd;
560
561 error = copyin(args->desc, &info, sizeof(struct l_user_desc));
562 if (error)
563 return (error);
564
565 idx = info.entry_number;
566 /* XXX: I am not sure if we want 3 to be allowed too. */
567 if (idx != 6 && idx != 3)
568 return (EINVAL);
569
570 idx = 3;
571
572 memset(&info, 0, sizeof(info));
573
574 sd = PCPU_GET(fsgs_gdt)[1];
575
576 memcpy(&desc, &sd, sizeof(desc));
577
578 info.entry_number = idx;
579 info.base_addr = LINUX_GET_BASE(&desc);
580 info.limit = LINUX_GET_LIMIT(&desc);
581 info.seg_32bit = LINUX_GET_32BIT(&desc);
582 info.contents = LINUX_GET_CONTENTS(&desc);
583 info.read_exec_only = !LINUX_GET_WRITABLE(&desc);
584 info.limit_in_pages = LINUX_GET_LIMIT_PAGES(&desc);
585 info.seg_not_present = !LINUX_GET_PRESENT(&desc);
586 info.useable = LINUX_GET_USEABLE(&desc);
587
588 error = copyout(&info, args->desc, sizeof(struct l_user_desc));
589 if (error)
590 return (EFAULT);
591
592 return (0);
593 }
594
595 void
bsd_to_linux_regset(const struct reg * b_reg,struct linux_pt_regset * l_regset)596 bsd_to_linux_regset(const struct reg *b_reg,
597 struct linux_pt_regset *l_regset)
598 {
599
600 l_regset->ebx = b_reg->r_ebx;
601 l_regset->ecx = b_reg->r_ecx;
602 l_regset->edx = b_reg->r_edx;
603 l_regset->esi = b_reg->r_esi;
604 l_regset->edi = b_reg->r_edi;
605 l_regset->ebp = b_reg->r_ebp;
606 l_regset->eax = b_reg->r_eax;
607 l_regset->ds = b_reg->r_ds;
608 l_regset->es = b_reg->r_es;
609 l_regset->fs = b_reg->r_fs;
610 l_regset->gs = b_reg->r_gs;
611 l_regset->orig_eax = b_reg->r_eax;
612 l_regset->eip = b_reg->r_eip;
613 l_regset->cs = b_reg->r_cs;
614 l_regset->eflags = b_reg->r_eflags;
615 l_regset->esp = b_reg->r_esp;
616 l_regset->ss = b_reg->r_ss;
617 }
618
619 int
linux_uselib(struct thread * td,struct linux_uselib_args * args)620 linux_uselib(struct thread *td, struct linux_uselib_args *args)
621 {
622 struct nameidata ni;
623 struct vnode *vp;
624 struct exec *a_out;
625 vm_map_t map;
626 vm_map_entry_t entry;
627 struct vattr attr;
628 vm_offset_t vmaddr;
629 unsigned long file_offset;
630 unsigned long bss_size;
631 ssize_t aresid;
632 int error;
633 bool locked, opened, textset;
634
635 a_out = NULL;
636 vp = NULL;
637 locked = false;
638 textset = false;
639 opened = false;
640
641 NDINIT(&ni, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1,
642 UIO_USERSPACE, args->library);
643 error = namei(&ni);
644 if (error)
645 goto cleanup;
646
647 vp = ni.ni_vp;
648 NDFREE_PNBUF(&ni);
649
650 /*
651 * From here on down, we have a locked vnode that must be unlocked.
652 * XXX: The code below largely duplicates exec_check_permissions().
653 */
654 locked = true;
655
656 /* Executable? */
657 error = VOP_GETATTR(vp, &attr, td->td_ucred);
658 if (error)
659 goto cleanup;
660
661 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
662 ((attr.va_mode & 0111) == 0) || (attr.va_type != VREG)) {
663 /* EACCESS is what exec(2) returns. */
664 error = ENOEXEC;
665 goto cleanup;
666 }
667
668 /* Sensible size? */
669 if (attr.va_size == 0) {
670 error = ENOEXEC;
671 goto cleanup;
672 }
673
674 /* Can we access it? */
675 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
676 if (error)
677 goto cleanup;
678
679 /*
680 * XXX: This should use vn_open() so that it is properly authorized,
681 * and to reduce code redundancy all over the place here.
682 * XXX: Not really, it duplicates far more of exec_check_permissions()
683 * than vn_open().
684 */
685 #ifdef MAC
686 error = mac_vnode_check_open(td->td_ucred, vp, VREAD);
687 if (error)
688 goto cleanup;
689 #endif
690 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
691 if (error)
692 goto cleanup;
693 opened = true;
694
695 /* Pull in executable header into exec_map */
696 error = vm_mmap(exec_map, (vm_offset_t *)&a_out, PAGE_SIZE,
697 VM_PROT_READ, VM_PROT_READ, 0, OBJT_VNODE, vp, 0);
698 if (error)
699 goto cleanup;
700
701 /* Is it a Linux binary ? */
702 if (((a_out->a_magic >> 16) & 0xff) != 0x64) {
703 error = ENOEXEC;
704 goto cleanup;
705 }
706
707 /*
708 * While we are here, we should REALLY do some more checks
709 */
710
711 /* Set file/virtual offset based on a.out variant. */
712 switch ((int)(a_out->a_magic & 0xffff)) {
713 case 0413: /* ZMAGIC */
714 file_offset = 1024;
715 break;
716 case 0314: /* QMAGIC */
717 file_offset = 0;
718 break;
719 default:
720 error = ENOEXEC;
721 goto cleanup;
722 }
723
724 bss_size = round_page(a_out->a_bss);
725
726 /* Check various fields in header for validity/bounds. */
727 if (a_out->a_text & PAGE_MASK || a_out->a_data & PAGE_MASK) {
728 error = ENOEXEC;
729 goto cleanup;
730 }
731
732 /* text + data can't exceed file size */
733 if (a_out->a_data + a_out->a_text > attr.va_size) {
734 error = EFAULT;
735 goto cleanup;
736 }
737
738 /*
739 * text/data/bss must not exceed limits
740 * XXX - this is not complete. it should check current usage PLUS
741 * the resources needed by this library.
742 */
743 PROC_LOCK(td->td_proc);
744 if (a_out->a_text > maxtsiz ||
745 a_out->a_data + bss_size > lim_cur_proc(td->td_proc, RLIMIT_DATA) ||
746 racct_set(td->td_proc, RACCT_DATA, a_out->a_data +
747 bss_size) != 0) {
748 PROC_UNLOCK(td->td_proc);
749 error = ENOMEM;
750 goto cleanup;
751 }
752 PROC_UNLOCK(td->td_proc);
753
754 /*
755 * Prevent more writers.
756 */
757 error = VOP_SET_TEXT(vp);
758 if (error != 0)
759 goto cleanup;
760 textset = true;
761
762 /*
763 * Lock no longer needed
764 */
765 locked = false;
766 VOP_UNLOCK(vp);
767
768 /*
769 * Check if file_offset page aligned. Currently we cannot handle
770 * misalinged file offsets, and so we read in the entire image
771 * (what a waste).
772 */
773 if (file_offset & PAGE_MASK) {
774 /* Map text+data read/write/execute */
775
776 /* a_entry is the load address and is page aligned */
777 vmaddr = trunc_page(a_out->a_entry);
778
779 /* get anon user mapping, read+write+execute */
780 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
781 &vmaddr, a_out->a_text + a_out->a_data, 0, VMFS_NO_SPACE,
782 VM_PROT_ALL, VM_PROT_ALL, 0);
783 if (error)
784 goto cleanup;
785
786 error = vn_rdwr(UIO_READ, vp, (void *)vmaddr, file_offset,
787 a_out->a_text + a_out->a_data, UIO_USERSPACE, 0,
788 td->td_ucred, NOCRED, &aresid, td);
789 if (error != 0)
790 goto cleanup;
791 if (aresid != 0) {
792 error = ENOEXEC;
793 goto cleanup;
794 }
795 } else {
796 /*
797 * for QMAGIC, a_entry is 20 bytes beyond the load address
798 * to skip the executable header
799 */
800 vmaddr = trunc_page(a_out->a_entry);
801
802 /*
803 * Map it all into the process's space as a single
804 * copy-on-write "data" segment.
805 */
806 map = &td->td_proc->p_vmspace->vm_map;
807 error = vm_mmap(map, &vmaddr,
808 a_out->a_text + a_out->a_data, VM_PROT_ALL, VM_PROT_ALL,
809 MAP_PRIVATE | MAP_FIXED, OBJT_VNODE, vp, file_offset);
810 if (error)
811 goto cleanup;
812 vm_map_lock(map);
813 if (!vm_map_lookup_entry(map, vmaddr, &entry)) {
814 vm_map_unlock(map);
815 error = EDOOFUS;
816 goto cleanup;
817 }
818 entry->eflags |= MAP_ENTRY_VN_EXEC;
819 vm_map_unlock(map);
820 textset = false;
821 }
822
823 if (bss_size != 0) {
824 /* Calculate BSS start address */
825 vmaddr = trunc_page(a_out->a_entry) + a_out->a_text +
826 a_out->a_data;
827
828 /* allocate some 'anon' space */
829 error = vm_map_find(&td->td_proc->p_vmspace->vm_map, NULL, 0,
830 &vmaddr, bss_size, 0, VMFS_NO_SPACE, VM_PROT_ALL,
831 VM_PROT_ALL, 0);
832 if (error)
833 goto cleanup;
834 }
835
836 cleanup:
837 if (opened) {
838 if (locked)
839 VOP_UNLOCK(vp);
840 locked = false;
841 VOP_CLOSE(vp, FREAD, td->td_ucred, td);
842 }
843 if (textset) {
844 if (!locked) {
845 locked = true;
846 VOP_LOCK(vp, LK_SHARED | LK_RETRY);
847 }
848 VOP_UNSET_TEXT_CHECKED(vp);
849 }
850 if (locked)
851 VOP_UNLOCK(vp);
852
853 /* Release the temporary mapping. */
854 if (a_out)
855 kmap_free_wakeup(exec_map, (vm_offset_t)a_out, PAGE_SIZE);
856
857 return (error);
858 }
859