1 /* $NetBSD: exec_elf.c,v 1.98 2019/06/07 23:35:52 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1994, 2000, 2005, 2015 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Christos Zoulas and Maxime Villard. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1996 Christopher G. Demetriou 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. The name of the author may not be used to endorse or promote products 45 * derived from this software without specific prior written permission 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 #include <sys/cdefs.h> 60 __KERNEL_RCSID(1, "$NetBSD: exec_elf.c,v 1.98 2019/06/07 23:35:52 christos Exp $"); 61 62 #ifdef _KERNEL_OPT 63 #include "opt_pax.h" 64 #endif /* _KERNEL_OPT */ 65 66 #include <sys/param.h> 67 #include <sys/proc.h> 68 #include <sys/kmem.h> 69 #include <sys/namei.h> 70 #include <sys/vnode.h> 71 #include <sys/exec.h> 72 #include <sys/exec_elf.h> 73 #include <sys/syscall.h> 74 #include <sys/signalvar.h> 75 #include <sys/mount.h> 76 #include <sys/stat.h> 77 #include <sys/kauth.h> 78 #include <sys/bitops.h> 79 80 #include <sys/cpu.h> 81 #include <machine/reg.h> 82 83 #include <compat/common/compat_util.h> 84 85 #include <sys/pax.h> 86 #include <uvm/uvm_param.h> 87 88 extern struct emul emul_netbsd; 89 90 #define elf_check_header ELFNAME(check_header) 91 #define elf_copyargs ELFNAME(copyargs) 92 #define elf_populate_auxv ELFNAME(populate_auxv) 93 #define elf_load_interp ELFNAME(load_interp) 94 #define elf_load_psection ELFNAME(load_psection) 95 #define exec_elf_makecmds ELFNAME2(exec,makecmds) 96 #define netbsd_elf_signature ELFNAME2(netbsd,signature) 97 #define netbsd_elf_note ELFNAME2(netbsd,note) 98 #define netbsd_elf_probe ELFNAME2(netbsd,probe) 99 #define coredump ELFNAMEEND(coredump) 100 #define elf_free_emul_arg ELFNAME(free_emul_arg) 101 102 static int 103 elf_load_interp(struct lwp *, struct exec_package *, char *, 104 struct exec_vmcmd_set *, u_long *, Elf_Addr *); 105 static int 106 elf_load_psection(struct exec_vmcmd_set *, struct vnode *, const Elf_Phdr *, 107 Elf_Addr *, u_long *, int); 108 109 int netbsd_elf_signature(struct lwp *, struct exec_package *, Elf_Ehdr *); 110 int netbsd_elf_note(struct exec_package *, const Elf_Nhdr *, const char *, 111 const char *); 112 int netbsd_elf_probe(struct lwp *, struct exec_package *, void *, char *, 113 vaddr_t *); 114 115 static void elf_free_emul_arg(void *); 116 117 #ifdef DEBUG_ELF 118 #define DPRINTF(a, ...) printf("%s: " a "\n", __func__, ##__VA_ARGS__) 119 #else 120 #define DPRINTF(a, ...) 121 #endif 122 123 /* round up and down to page boundaries. */ 124 #define ELF_ROUND(a, b) (((a) + (b) - 1) & ~((b) - 1)) 125 #define ELF_TRUNC(a, b) ((a) & ~((b) - 1)) 126 127 static int 128 elf_placedynexec(struct exec_package *epp, Elf_Ehdr *eh, Elf_Phdr *ph) 129 { 130 Elf_Addr align, offset; 131 int i; 132 133 for (align = 1, i = 0; i < eh->e_phnum; i++) 134 if (ph[i].p_type == PT_LOAD && ph[i].p_align > align) 135 align = ph[i].p_align; 136 137 offset = (Elf_Addr)pax_aslr_exec_offset(epp, align); 138 if (offset < epp->ep_vm_minaddr) 139 offset = roundup(epp->ep_vm_minaddr, align); 140 if ((offset & (align - 1)) != 0) { 141 DPRINTF("bad offset=%#jx align=%#jx", 142 (uintmax_t)offset, (uintmax_t)align); 143 return EINVAL; 144 } 145 146 for (i = 0; i < eh->e_phnum; i++) 147 ph[i].p_vaddr += offset; 148 epp->ep_entryoffset = offset; 149 eh->e_entry += offset; 150 return 0; 151 } 152 153 154 int 155 elf_populate_auxv(struct lwp *l, struct exec_package *pack, char **stackp) 156 { 157 size_t len, vlen; 158 AuxInfo ai[ELF_AUX_ENTRIES], *a, *execname; 159 struct elf_args *ap; 160 int error; 161 162 a = ai; 163 164 memset(ai, 0, sizeof(ai)); 165 166 /* 167 * Push extra arguments on the stack needed by dynamically 168 * linked binaries 169 */ 170 if ((ap = (struct elf_args *)pack->ep_emul_arg)) { 171 struct vattr *vap = pack->ep_vap; 172 173 a->a_type = AT_PHDR; 174 a->a_v = ap->arg_phaddr; 175 a++; 176 177 a->a_type = AT_PHENT; 178 a->a_v = ap->arg_phentsize; 179 a++; 180 181 a->a_type = AT_PHNUM; 182 a->a_v = ap->arg_phnum; 183 a++; 184 185 a->a_type = AT_PAGESZ; 186 a->a_v = PAGE_SIZE; 187 a++; 188 189 a->a_type = AT_BASE; 190 a->a_v = ap->arg_interp; 191 a++; 192 193 a->a_type = AT_FLAGS; 194 a->a_v = 0; 195 a++; 196 197 a->a_type = AT_ENTRY; 198 a->a_v = ap->arg_entry; 199 a++; 200 201 a->a_type = AT_EUID; 202 if (vap->va_mode & S_ISUID) 203 a->a_v = vap->va_uid; 204 else 205 a->a_v = kauth_cred_geteuid(l->l_cred); 206 a++; 207 208 a->a_type = AT_RUID; 209 a->a_v = kauth_cred_getuid(l->l_cred); 210 a++; 211 212 a->a_type = AT_EGID; 213 if (vap->va_mode & S_ISGID) 214 a->a_v = vap->va_gid; 215 else 216 a->a_v = kauth_cred_getegid(l->l_cred); 217 a++; 218 219 a->a_type = AT_RGID; 220 a->a_v = kauth_cred_getgid(l->l_cred); 221 a++; 222 223 a->a_type = AT_STACKBASE; 224 a->a_v = l->l_proc->p_stackbase; 225 a++; 226 227 execname = a; 228 a->a_type = AT_SUN_EXECNAME; 229 a++; 230 231 exec_free_emul_arg(pack); 232 } else { 233 execname = NULL; 234 } 235 236 a->a_type = AT_NULL; 237 a->a_v = 0; 238 a++; 239 240 vlen = (a - ai) * sizeof(ai[0]); 241 242 KASSERT(vlen <= sizeof(ai)); 243 244 if (execname) { 245 char *path = l->l_proc->p_path; 246 execname->a_v = (uintptr_t)(*stackp + vlen); 247 len = strlen(path) + 1; 248 if ((error = copyout(path, (*stackp + vlen), len)) != 0) 249 return error; 250 len = ALIGN(len); 251 } else { 252 len = 0; 253 } 254 255 if ((error = copyout(ai, *stackp, vlen)) != 0) 256 return error; 257 *stackp += vlen + len; 258 259 return 0; 260 } 261 262 /* 263 * Copy arguments onto the stack in the normal way, but add some 264 * extra information in case of dynamic binding. 265 */ 266 int 267 elf_copyargs(struct lwp *l, struct exec_package *pack, 268 struct ps_strings *arginfo, char **stackp, void *argp) 269 { 270 int error; 271 272 if ((error = copyargs(l, pack, arginfo, stackp, argp)) != 0) 273 return error; 274 275 return elf_populate_auxv(l, pack, stackp); 276 } 277 278 /* 279 * elf_check_header(): 280 * 281 * Check header for validity; return 0 if ok, ENOEXEC if error 282 */ 283 int 284 elf_check_header(Elf_Ehdr *eh) 285 { 286 287 if (memcmp(eh->e_ident, ELFMAG, SELFMAG) != 0 || 288 eh->e_ident[EI_CLASS] != ELFCLASS) { 289 DPRINTF("bad magic e_ident[EI_MAG0,EI_MAG3] %#x%x%x%x, " 290 "e_ident[EI_CLASS] %#x", eh->e_ident[EI_MAG0], 291 eh->e_ident[EI_MAG1], eh->e_ident[EI_MAG2], 292 eh->e_ident[EI_MAG3], eh->e_ident[EI_CLASS]); 293 return ENOEXEC; 294 } 295 296 switch (eh->e_machine) { 297 298 ELFDEFNNAME(MACHDEP_ID_CASES) 299 300 default: 301 DPRINTF("bad machine %#x", eh->e_machine); 302 return ENOEXEC; 303 } 304 305 if (ELF_EHDR_FLAGS_OK(eh) == 0) { 306 DPRINTF("bad flags %#x", eh->e_flags); 307 return ENOEXEC; 308 } 309 310 if (eh->e_shnum > ELF_MAXSHNUM || eh->e_phnum > ELF_MAXPHNUM) { 311 DPRINTF("bad shnum/phnum %#x/%#x", eh->e_shnum, eh->e_phnum); 312 return ENOEXEC; 313 } 314 315 return 0; 316 } 317 318 /* 319 * elf_load_psection(): 320 * 321 * Load a psection at the appropriate address 322 */ 323 static int 324 elf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp, 325 const Elf_Phdr *ph, Elf_Addr *addr, u_long *size, int flags) 326 { 327 u_long msize, psize, rm, rf; 328 long diff, offset; 329 int vmprot = 0; 330 331 /* 332 * If the user specified an address, then we load there. 333 */ 334 if (*addr == ELFDEFNNAME(NO_ADDR)) 335 *addr = ph->p_vaddr; 336 337 if (ph->p_align > 1) { 338 /* 339 * Make sure we are virtually aligned as we are supposed to be. 340 */ 341 diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align); 342 if (*addr - diff != ELF_TRUNC(*addr, ph->p_align)) { 343 DPRINTF("bad alignment %#jx != %#jx\n", 344 (uintptr_t)(*addr - diff), 345 (uintptr_t)ELF_TRUNC(*addr, ph->p_align)); 346 return EINVAL; 347 } 348 /* 349 * But make sure to not map any pages before the start of the 350 * psection by limiting the difference to within a page. 351 */ 352 diff &= PAGE_MASK; 353 } else 354 diff = 0; 355 356 vmprot |= (ph->p_flags & PF_R) ? VM_PROT_READ : 0; 357 vmprot |= (ph->p_flags & PF_W) ? VM_PROT_WRITE : 0; 358 vmprot |= (ph->p_flags & PF_X) ? VM_PROT_EXECUTE : 0; 359 360 /* 361 * Adjust everything so it all starts on a page boundary. 362 */ 363 *addr -= diff; 364 offset = ph->p_offset - diff; 365 *size = ph->p_filesz + diff; 366 msize = ph->p_memsz + diff; 367 368 if (ph->p_align >= PAGE_SIZE) { 369 if ((ph->p_flags & PF_W) != 0) { 370 /* 371 * Because the pagedvn pager can't handle zero fill 372 * of the last data page if it's not page aligned we 373 * map the last page readvn. 374 */ 375 psize = trunc_page(*size); 376 } else { 377 psize = round_page(*size); 378 } 379 } else { 380 psize = *size; 381 } 382 383 if (psize > 0) { 384 NEW_VMCMD2(vcset, ph->p_align < PAGE_SIZE ? 385 vmcmd_map_readvn : vmcmd_map_pagedvn, psize, *addr, vp, 386 offset, vmprot, flags); 387 flags &= VMCMD_RELATIVE; 388 } 389 if (psize < *size) { 390 NEW_VMCMD2(vcset, vmcmd_map_readvn, *size - psize, 391 *addr + psize, vp, offset + psize, vmprot, flags); 392 } 393 394 /* 395 * Check if we need to extend the size of the segment (does 396 * bss extend page the next page boundary)? 397 */ 398 rm = round_page(*addr + msize); 399 rf = round_page(*addr + *size); 400 401 if (rm != rf) { 402 NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP, 403 0, vmprot, flags & VMCMD_RELATIVE); 404 *size = msize; 405 } 406 return 0; 407 } 408 409 /* 410 * elf_load_interp(): 411 * 412 * Load an interpreter pointed to by path. 413 */ 414 static int 415 elf_load_interp(struct lwp *l, struct exec_package *epp, char *path, 416 struct exec_vmcmd_set *vcset, u_long *entryoff, Elf_Addr *last) 417 { 418 int error, i; 419 struct vnode *vp; 420 struct vattr attr; 421 Elf_Ehdr eh; 422 Elf_Phdr *ph = NULL; 423 const Elf_Phdr *base_ph; 424 const Elf_Phdr *last_ph; 425 u_long phsize; 426 Elf_Addr addr = *last; 427 struct proc *p; 428 bool use_topdown; 429 430 p = l->l_proc; 431 432 KASSERT(p->p_vmspace); 433 KASSERT(p->p_vmspace != proc0.p_vmspace); 434 435 #ifdef __USE_TOPDOWN_VM 436 use_topdown = epp->ep_flags & EXEC_TOPDOWN_VM; 437 #else 438 use_topdown = false; 439 #endif 440 441 /* 442 * 1. open file 443 * 2. read filehdr 444 * 3. map text, data, and bss out of it using VM_* 445 */ 446 vp = epp->ep_interp; 447 if (vp == NULL) { 448 error = emul_find_interp(l, epp, path); 449 if (error != 0) 450 return error; 451 vp = epp->ep_interp; 452 } 453 /* We'll tidy this ourselves - otherwise we have locking issues */ 454 epp->ep_interp = NULL; 455 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 456 457 /* 458 * Similarly, if it's not marked as executable, or it's not a regular 459 * file, we don't allow it to be used. 460 */ 461 if (vp->v_type != VREG) { 462 error = EACCES; 463 goto badunlock; 464 } 465 if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0) 466 goto badunlock; 467 468 /* get attributes */ 469 if ((error = VOP_GETATTR(vp, &attr, l->l_cred)) != 0) 470 goto badunlock; 471 472 /* 473 * Check mount point. Though we're not trying to exec this binary, 474 * we will be executing code from it, so if the mount point 475 * disallows execution or set-id-ness, we punt or kill the set-id. 476 */ 477 if (vp->v_mount->mnt_flag & MNT_NOEXEC) { 478 error = EACCES; 479 goto badunlock; 480 } 481 if (vp->v_mount->mnt_flag & MNT_NOSUID) 482 epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID); 483 484 error = vn_marktext(vp); 485 if (error) 486 goto badunlock; 487 488 VOP_UNLOCK(vp); 489 490 if ((error = exec_read_from(l, vp, 0, &eh, sizeof(eh))) != 0) 491 goto bad; 492 493 if ((error = elf_check_header(&eh)) != 0) 494 goto bad; 495 if (eh.e_type != ET_DYN || eh.e_phnum == 0) { 496 DPRINTF("bad interpreter type %#x", eh.e_type); 497 error = ENOEXEC; 498 goto bad; 499 } 500 501 phsize = eh.e_phnum * sizeof(Elf_Phdr); 502 ph = kmem_alloc(phsize, KM_SLEEP); 503 504 if ((error = exec_read_from(l, vp, eh.e_phoff, ph, phsize)) != 0) 505 goto bad; 506 507 #ifdef ELF_INTERP_NON_RELOCATABLE 508 /* 509 * Evil hack: Only MIPS should be non-relocatable, and the 510 * psections should have a high address (typically 0x5ffe0000). 511 * If it's now relocatable, it should be linked at 0 and the 512 * psections should have zeros in the upper part of the address. 513 * Otherwise, force the load at the linked address. 514 */ 515 if (*last == ELF_LINK_ADDR && (ph->p_vaddr & 0xffff0000) == 0) 516 *last = ELFDEFNNAME(NO_ADDR); 517 #endif 518 519 /* 520 * If no position to load the interpreter was set by a probe 521 * function, pick the same address that a non-fixed mmap(0, ..) 522 * would (i.e. something safely out of the way). 523 */ 524 if (*last == ELFDEFNNAME(NO_ADDR)) { 525 u_long limit = 0; 526 /* 527 * Find the start and ending addresses of the psections to 528 * be loaded. This will give us the size. 529 */ 530 for (i = 0, base_ph = NULL; i < eh.e_phnum; i++) { 531 if (ph[i].p_type == PT_LOAD) { 532 u_long psize = ph[i].p_vaddr + ph[i].p_memsz; 533 if (base_ph == NULL) 534 base_ph = &ph[i]; 535 if (psize > limit) 536 limit = psize; 537 } 538 } 539 540 if (base_ph == NULL) { 541 DPRINTF("no interpreter loadable sections"); 542 error = ENOEXEC; 543 goto bad; 544 } 545 546 /* 547 * Now compute the size and load address. 548 */ 549 addr = (*epp->ep_esch->es_emul->e_vm_default_addr)(p, 550 epp->ep_daddr, 551 round_page(limit) - trunc_page(base_ph->p_vaddr), 552 use_topdown); 553 addr += (Elf_Addr)pax_aslr_rtld_offset(epp, base_ph->p_align, 554 use_topdown); 555 } else { 556 addr = *last; /* may be ELF_LINK_ADDR */ 557 } 558 559 /* 560 * Load all the necessary sections 561 */ 562 for (i = 0, base_ph = NULL, last_ph = NULL; i < eh.e_phnum; i++) { 563 switch (ph[i].p_type) { 564 case PT_LOAD: { 565 u_long size; 566 int flags; 567 568 if (base_ph == NULL) { 569 /* 570 * First encountered psection is always the 571 * base psection. Make sure it's aligned 572 * properly (align down for topdown and align 573 * upwards for not topdown). 574 */ 575 base_ph = &ph[i]; 576 flags = VMCMD_BASE; 577 if (addr == ELF_LINK_ADDR) 578 addr = ph[i].p_vaddr; 579 if (use_topdown) 580 addr = ELF_TRUNC(addr, ph[i].p_align); 581 else 582 addr = ELF_ROUND(addr, ph[i].p_align); 583 } else { 584 u_long limit = round_page(last_ph->p_vaddr 585 + last_ph->p_memsz); 586 u_long base = trunc_page(ph[i].p_vaddr); 587 588 /* 589 * If there is a gap in between the psections, 590 * map it as inaccessible so nothing else 591 * mmap'ed will be placed there. 592 */ 593 if (limit != base) { 594 NEW_VMCMD2(vcset, vmcmd_map_zero, 595 base - limit, 596 limit - base_ph->p_vaddr, NULLVP, 597 0, VM_PROT_NONE, VMCMD_RELATIVE); 598 } 599 600 addr = ph[i].p_vaddr - base_ph->p_vaddr; 601 flags = VMCMD_RELATIVE; 602 } 603 last_ph = &ph[i]; 604 if ((error = elf_load_psection(vcset, vp, &ph[i], &addr, 605 &size, flags)) != 0) 606 goto bad; 607 /* 608 * If entry is within this psection then this 609 * must contain the .text section. *entryoff is 610 * relative to the base psection. 611 */ 612 if (eh.e_entry >= ph[i].p_vaddr && 613 eh.e_entry < (ph[i].p_vaddr + size)) { 614 *entryoff = eh.e_entry - base_ph->p_vaddr; 615 } 616 addr += size; 617 break; 618 } 619 620 default: 621 break; 622 } 623 } 624 625 kmem_free(ph, phsize); 626 /* 627 * This value is ignored if TOPDOWN. 628 */ 629 *last = addr; 630 vrele(vp); 631 return 0; 632 633 badunlock: 634 VOP_UNLOCK(vp); 635 636 bad: 637 if (ph != NULL) 638 kmem_free(ph, phsize); 639 vrele(vp); 640 return error; 641 } 642 643 /* 644 * exec_elf_makecmds(): Prepare an Elf binary's exec package 645 * 646 * First, set of the various offsets/lengths in the exec package. 647 * 648 * Then, mark the text image busy (so it can be demand paged) or error 649 * out if this is not possible. Finally, set up vmcmds for the 650 * text, data, bss, and stack segments. 651 */ 652 int 653 exec_elf_makecmds(struct lwp *l, struct exec_package *epp) 654 { 655 Elf_Ehdr *eh = epp->ep_hdr; 656 Elf_Phdr *ph, *pp; 657 Elf_Addr phdr = 0, computed_phdr = 0, pos = 0, end_text = 0; 658 int error, i; 659 char *interp = NULL; 660 u_long phsize; 661 struct elf_args *ap; 662 bool is_dyn = false; 663 664 if (epp->ep_hdrvalid < sizeof(Elf_Ehdr)) { 665 DPRINTF("small header %#x", epp->ep_hdrvalid); 666 return ENOEXEC; 667 } 668 if ((error = elf_check_header(eh)) != 0) 669 return error; 670 671 if (eh->e_type == ET_DYN) 672 /* PIE, and some libs have an entry point */ 673 is_dyn = true; 674 else if (eh->e_type != ET_EXEC) { 675 DPRINTF("bad type %#x", eh->e_type); 676 return ENOEXEC; 677 } 678 679 if (eh->e_phnum == 0) { 680 DPRINTF("no program headers"); 681 return ENOEXEC; 682 } 683 684 error = vn_marktext(epp->ep_vp); 685 if (error) 686 return error; 687 688 /* 689 * Allocate space to hold all the program headers, and read them 690 * from the file 691 */ 692 phsize = eh->e_phnum * sizeof(Elf_Phdr); 693 ph = kmem_alloc(phsize, KM_SLEEP); 694 695 if ((error = exec_read_from(l, epp->ep_vp, eh->e_phoff, ph, phsize)) != 696 0) 697 goto bad; 698 699 epp->ep_taddr = epp->ep_tsize = ELFDEFNNAME(NO_ADDR); 700 epp->ep_daddr = epp->ep_dsize = ELFDEFNNAME(NO_ADDR); 701 702 for (i = 0; i < eh->e_phnum; i++) { 703 pp = &ph[i]; 704 if (pp->p_type == PT_INTERP) { 705 if (pp->p_filesz < 2 || pp->p_filesz > MAXPATHLEN) { 706 DPRINTF("bad interpreter namelen %#jx", 707 (uintmax_t)pp->p_filesz); 708 error = ENOEXEC; 709 goto bad; 710 } 711 interp = PNBUF_GET(); 712 if ((error = exec_read_from(l, epp->ep_vp, 713 pp->p_offset, interp, pp->p_filesz)) != 0) 714 goto bad; 715 /* Ensure interp is NUL-terminated and of the expected length */ 716 if (strnlen(interp, pp->p_filesz) != pp->p_filesz - 1) { 717 DPRINTF("bad interpreter name"); 718 error = ENOEXEC; 719 goto bad; 720 } 721 break; 722 } 723 } 724 725 /* 726 * On the same architecture, we may be emulating different systems. 727 * See which one will accept this executable. 728 * 729 * Probe functions would normally see if the interpreter (if any) 730 * exists. Emulation packages may possibly replace the interpreter in 731 * interp with a changed path (/emul/xxx/<path>). 732 */ 733 pos = ELFDEFNNAME(NO_ADDR); 734 if (epp->ep_esch->u.elf_probe_func) { 735 vaddr_t startp = (vaddr_t)pos; 736 737 error = (*epp->ep_esch->u.elf_probe_func)(l, epp, eh, interp, 738 &startp); 739 if (error) 740 goto bad; 741 pos = (Elf_Addr)startp; 742 } 743 744 if (is_dyn && (error = elf_placedynexec(epp, eh, ph)) != 0) 745 goto bad; 746 747 /* 748 * Load all the necessary sections 749 */ 750 for (i = 0; i < eh->e_phnum; i++) { 751 Elf_Addr addr = ELFDEFNNAME(NO_ADDR); 752 u_long size = 0; 753 754 switch (ph[i].p_type) { 755 case PT_LOAD: 756 if ((error = elf_load_psection(&epp->ep_vmcmds, 757 epp->ep_vp, &ph[i], &addr, &size, VMCMD_FIXED)) 758 != 0) 759 goto bad; 760 761 /* 762 * Consider this as text segment, if it is executable. 763 * If there is more than one text segment, pick the 764 * largest. 765 */ 766 if (ph[i].p_flags & PF_X) { 767 if (epp->ep_taddr == ELFDEFNNAME(NO_ADDR) || 768 size > epp->ep_tsize) { 769 epp->ep_taddr = addr; 770 epp->ep_tsize = size; 771 } 772 end_text = addr + size; 773 } else { 774 epp->ep_daddr = addr; 775 epp->ep_dsize = size; 776 } 777 if (ph[i].p_offset == 0) { 778 computed_phdr = ph[i].p_vaddr + eh->e_phoff; 779 } 780 break; 781 782 case PT_SHLIB: 783 /* SCO has these sections. */ 784 case PT_INTERP: 785 /* Already did this one. */ 786 case PT_DYNAMIC: 787 case PT_NOTE: 788 break; 789 case PT_PHDR: 790 /* Note address of program headers (in text segment) */ 791 phdr = ph[i].p_vaddr; 792 break; 793 794 default: 795 /* 796 * Not fatal; we don't need to understand everything. 797 */ 798 break; 799 } 800 } 801 802 if (epp->ep_vmcmds.evs_used == 0) { 803 /* No VMCMD; there was no PT_LOAD section, or those 804 * sections were empty */ 805 DPRINTF("no vmcommands"); 806 error = ENOEXEC; 807 goto bad; 808 } 809 810 if (epp->ep_daddr == ELFDEFNNAME(NO_ADDR)) { 811 epp->ep_daddr = round_page(end_text); 812 epp->ep_dsize = 0; 813 } 814 815 /* 816 * Check if we found a dynamically linked binary and arrange to load 817 * its interpreter 818 */ 819 if (interp) { 820 u_int nused = epp->ep_vmcmds.evs_used; 821 u_long interp_offset = 0; 822 823 if ((error = elf_load_interp(l, epp, interp, 824 &epp->ep_vmcmds, &interp_offset, &pos)) != 0) { 825 goto bad; 826 } 827 if (epp->ep_vmcmds.evs_used == nused) { 828 /* elf_load_interp() has not set up any new VMCMD */ 829 DPRINTF("no vmcommands for interpreter"); 830 error = ENOEXEC; 831 goto bad; 832 } 833 834 ap = kmem_alloc(sizeof(*ap), KM_SLEEP); 835 ap->arg_interp = epp->ep_vmcmds.evs_cmds[nused].ev_addr; 836 epp->ep_entryoffset = interp_offset; 837 epp->ep_entry = ap->arg_interp + interp_offset; 838 PNBUF_PUT(interp); 839 interp = NULL; 840 } else { 841 epp->ep_entry = eh->e_entry; 842 if (epp->ep_flags & EXEC_FORCEAUX) { 843 ap = kmem_zalloc(sizeof(*ap), KM_SLEEP); 844 ap->arg_interp = (vaddr_t)NULL; 845 } else { 846 ap = NULL; 847 } 848 } 849 850 if (ap) { 851 ap->arg_phaddr = phdr ? phdr : computed_phdr; 852 ap->arg_phentsize = eh->e_phentsize; 853 ap->arg_phnum = eh->e_phnum; 854 ap->arg_entry = eh->e_entry; 855 epp->ep_emul_arg = ap; 856 epp->ep_emul_arg_free = elf_free_emul_arg; 857 } 858 859 #ifdef ELF_MAP_PAGE_ZERO 860 /* Dell SVR4 maps page zero, yeuch! */ 861 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0, 862 epp->ep_vp, 0, VM_PROT_READ); 863 #endif 864 865 error = (*epp->ep_esch->es_setup_stack)(l, epp); 866 if (error) 867 goto bad; 868 869 kmem_free(ph, phsize); 870 return 0; 871 872 bad: 873 if (interp) 874 PNBUF_PUT(interp); 875 exec_free_emul_arg(epp); 876 kmem_free(ph, phsize); 877 kill_vmcmds(&epp->ep_vmcmds); 878 return error; 879 } 880 881 int 882 netbsd_elf_signature(struct lwp *l, struct exec_package *epp, 883 Elf_Ehdr *eh) 884 { 885 size_t i; 886 Elf_Phdr *ph; 887 size_t phsize; 888 char *nbuf; 889 int error; 890 int isnetbsd = 0; 891 892 epp->ep_pax_flags = 0; 893 894 if (eh->e_phnum > ELF_MAXPHNUM || eh->e_phnum == 0) { 895 DPRINTF("no signature %#x", eh->e_phnum); 896 return ENOEXEC; 897 } 898 899 phsize = eh->e_phnum * sizeof(Elf_Phdr); 900 ph = kmem_alloc(phsize, KM_SLEEP); 901 error = exec_read_from(l, epp->ep_vp, eh->e_phoff, ph, phsize); 902 if (error) 903 goto out; 904 905 nbuf = kmem_alloc(ELF_MAXNOTESIZE, KM_SLEEP); 906 for (i = 0; i < eh->e_phnum; i++) { 907 const char *nptr; 908 size_t nlen; 909 910 if (ph[i].p_type != PT_NOTE || 911 ph[i].p_filesz > ELF_MAXNOTESIZE) 912 continue; 913 914 nlen = ph[i].p_filesz; 915 error = exec_read_from(l, epp->ep_vp, ph[i].p_offset, 916 nbuf, nlen); 917 if (error) 918 continue; 919 920 nptr = nbuf; 921 while (nlen > 0) { 922 const Elf_Nhdr *np; 923 const char *ndata, *ndesc; 924 925 /* note header */ 926 np = (const Elf_Nhdr *)nptr; 927 if (nlen < sizeof(*np)) { 928 break; 929 } 930 nptr += sizeof(*np); 931 nlen -= sizeof(*np); 932 933 /* note name */ 934 ndata = nptr; 935 if (nlen < roundup(np->n_namesz, 4)) { 936 break; 937 } 938 nptr += roundup(np->n_namesz, 4); 939 nlen -= roundup(np->n_namesz, 4); 940 941 /* note description */ 942 ndesc = nptr; 943 if (nlen < roundup(np->n_descsz, 4)) { 944 break; 945 } 946 nptr += roundup(np->n_descsz, 4); 947 nlen -= roundup(np->n_descsz, 4); 948 949 isnetbsd |= netbsd_elf_note(epp, np, ndata, ndesc); 950 } 951 } 952 kmem_free(nbuf, ELF_MAXNOTESIZE); 953 954 error = isnetbsd ? 0 : ENOEXEC; 955 #ifdef DEBUG_ELF 956 if (error) 957 DPRINTF("not netbsd"); 958 #endif 959 out: 960 kmem_free(ph, phsize); 961 return error; 962 } 963 964 int 965 netbsd_elf_note(struct exec_package *epp, 966 const Elf_Nhdr *np, const char *ndata, const char *ndesc) 967 { 968 int isnetbsd = 0; 969 970 #ifdef DIAGNOSTIC 971 const char *badnote; 972 #define BADNOTE(n) badnote = (n) 973 #else 974 #define BADNOTE(n) 975 #endif 976 977 switch (np->n_type) { 978 case ELF_NOTE_TYPE_NETBSD_TAG: 979 /* It is us */ 980 if (np->n_namesz == ELF_NOTE_NETBSD_NAMESZ && 981 np->n_descsz == ELF_NOTE_NETBSD_DESCSZ && 982 memcmp(ndata, ELF_NOTE_NETBSD_NAME, 983 ELF_NOTE_NETBSD_NAMESZ) == 0) { 984 memcpy(&epp->ep_osversion, ndesc, 985 ELF_NOTE_NETBSD_DESCSZ); 986 isnetbsd = 1; 987 break; 988 } 989 990 /* 991 * Ignore SuSE tags; SuSE's n_type is the same the 992 * NetBSD one. 993 */ 994 if (np->n_namesz == ELF_NOTE_SUSE_NAMESZ && 995 memcmp(ndata, ELF_NOTE_SUSE_NAME, 996 ELF_NOTE_SUSE_NAMESZ) == 0) 997 break; 998 /* 999 * Ignore old GCC 1000 */ 1001 if (np->n_namesz == ELF_NOTE_OGCC_NAMESZ && 1002 memcmp(ndata, ELF_NOTE_OGCC_NAME, 1003 ELF_NOTE_OGCC_NAMESZ) == 0) 1004 break; 1005 BADNOTE("NetBSD tag"); 1006 goto bad; 1007 1008 case ELF_NOTE_TYPE_PAX_TAG: 1009 if (np->n_namesz == ELF_NOTE_PAX_NAMESZ && 1010 np->n_descsz == ELF_NOTE_PAX_DESCSZ && 1011 memcmp(ndata, ELF_NOTE_PAX_NAME, 1012 ELF_NOTE_PAX_NAMESZ) == 0) { 1013 uint32_t flags; 1014 memcpy(&flags, ndesc, sizeof(flags)); 1015 /* Convert the flags and insert them into 1016 * the exec package. */ 1017 pax_setup_elf_flags(epp, flags); 1018 break; 1019 } 1020 BADNOTE("PaX tag"); 1021 goto bad; 1022 1023 case ELF_NOTE_TYPE_MARCH_TAG: 1024 /* Copy the machine arch into the package. */ 1025 if (np->n_namesz == ELF_NOTE_MARCH_NAMESZ 1026 && memcmp(ndata, ELF_NOTE_MARCH_NAME, 1027 ELF_NOTE_MARCH_NAMESZ) == 0) { 1028 /* Do not truncate the buffer */ 1029 if (np->n_descsz > sizeof(epp->ep_machine_arch)) { 1030 BADNOTE("description size limit"); 1031 goto bad; 1032 } 1033 /* 1034 * Ensure ndesc is NUL-terminated and of the 1035 * expected length. 1036 */ 1037 if (strnlen(ndesc, np->n_descsz) + 1 != 1038 np->n_descsz) { 1039 BADNOTE("description size"); 1040 goto bad; 1041 } 1042 strlcpy(epp->ep_machine_arch, ndesc, 1043 sizeof(epp->ep_machine_arch)); 1044 break; 1045 } 1046 BADNOTE("march tag"); 1047 goto bad; 1048 1049 case ELF_NOTE_TYPE_MCMODEL_TAG: 1050 /* arch specific check for code model */ 1051 #ifdef ELF_MD_MCMODEL_CHECK 1052 if (np->n_namesz == ELF_NOTE_MCMODEL_NAMESZ 1053 && memcmp(ndata, ELF_NOTE_MCMODEL_NAME, 1054 ELF_NOTE_MCMODEL_NAMESZ) == 0) { 1055 ELF_MD_MCMODEL_CHECK(epp, ndesc, np->n_descsz); 1056 break; 1057 } 1058 BADNOTE("mcmodel tag"); 1059 goto bad; 1060 #endif 1061 break; 1062 1063 case ELF_NOTE_TYPE_SUSE_VERSION_TAG: 1064 break; 1065 1066 case ELF_NOTE_TYPE_GO_BUILDID_TAG: 1067 break; 1068 1069 case ELF_NOTE_TYPE_NETBSD_EMUL_TAG: 1070 /* Ancient NetBSD version tag */ 1071 break; 1072 1073 default: 1074 BADNOTE("unknown tag"); 1075 bad: 1076 #ifdef DIAGNOSTIC 1077 /* Ignore GNU tags */ 1078 if (np->n_namesz == ELF_NOTE_GNU_NAMESZ && 1079 memcmp(ndata, ELF_NOTE_GNU_NAME, 1080 ELF_NOTE_GNU_NAMESZ) == 0) 1081 break; 1082 1083 int ns = (int)np->n_namesz; 1084 printf("%s: Unknown elf note type %d (%s): " 1085 "[namesz=%d, descsz=%d name=%-*.*s]\n", 1086 epp->ep_kname, np->n_type, badnote, np->n_namesz, 1087 np->n_descsz, ns, ns, ndata); 1088 #endif 1089 break; 1090 } 1091 1092 return isnetbsd; 1093 } 1094 1095 int 1096 netbsd_elf_probe(struct lwp *l, struct exec_package *epp, void *eh, char *itp, 1097 vaddr_t *pos) 1098 { 1099 int error; 1100 1101 if ((error = netbsd_elf_signature(l, epp, eh)) != 0) 1102 return error; 1103 #ifdef ELF_MD_PROBE_FUNC 1104 if ((error = ELF_MD_PROBE_FUNC(l, epp, eh, itp, pos)) != 0) 1105 return error; 1106 #elif defined(ELF_INTERP_NON_RELOCATABLE) 1107 *pos = ELF_LINK_ADDR; 1108 #endif 1109 epp->ep_flags |= EXEC_FORCEAUX; 1110 return 0; 1111 } 1112 1113 void 1114 elf_free_emul_arg(void *arg) 1115 { 1116 struct elf_args *ap = arg; 1117 KASSERT(ap != NULL); 1118 kmem_free(ap, sizeof(*ap)); 1119 } 1120