1 /* $NetBSD: exec_elf.c,v 1.87 2016/09/15 18:40:34 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1994, 2000, 2005, 2015 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Christos Zoulas and Maxime Villard. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1996 Christopher G. Demetriou 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. The name of the author may not be used to endorse or promote products 45 * derived from this software without specific prior written permission 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 #include <sys/cdefs.h> 60 __KERNEL_RCSID(1, "$NetBSD: exec_elf.c,v 1.87 2016/09/15 18:40:34 christos Exp $"); 61 62 #ifdef _KERNEL_OPT 63 #include "opt_pax.h" 64 #endif /* _KERNEL_OPT */ 65 66 #include <sys/param.h> 67 #include <sys/proc.h> 68 #include <sys/kmem.h> 69 #include <sys/namei.h> 70 #include <sys/vnode.h> 71 #include <sys/exec.h> 72 #include <sys/exec_elf.h> 73 #include <sys/syscall.h> 74 #include <sys/signalvar.h> 75 #include <sys/mount.h> 76 #include <sys/stat.h> 77 #include <sys/kauth.h> 78 #include <sys/bitops.h> 79 80 #include <sys/cpu.h> 81 #include <machine/reg.h> 82 83 #include <compat/common/compat_util.h> 84 85 #include <sys/pax.h> 86 #include <uvm/uvm_param.h> 87 88 extern struct emul emul_netbsd; 89 90 #define elf_check_header ELFNAME(check_header) 91 #define elf_copyargs ELFNAME(copyargs) 92 #define elf_load_interp ELFNAME(load_interp) 93 #define elf_load_psection ELFNAME(load_psection) 94 #define exec_elf_makecmds ELFNAME2(exec,makecmds) 95 #define netbsd_elf_signature ELFNAME2(netbsd,signature) 96 #define netbsd_elf_probe ELFNAME2(netbsd,probe) 97 #define coredump ELFNAMEEND(coredump) 98 #define elf_free_emul_arg ELFNAME(free_emul_arg) 99 100 static int 101 elf_load_interp(struct lwp *, struct exec_package *, char *, 102 struct exec_vmcmd_set *, u_long *, Elf_Addr *); 103 static void 104 elf_load_psection(struct exec_vmcmd_set *, struct vnode *, const Elf_Phdr *, 105 Elf_Addr *, u_long *, int); 106 107 int netbsd_elf_signature(struct lwp *, struct exec_package *, Elf_Ehdr *); 108 int netbsd_elf_probe(struct lwp *, struct exec_package *, void *, char *, 109 vaddr_t *); 110 111 static void elf_free_emul_arg(void *); 112 113 #ifdef DEBUG_ELF 114 #define DPRINTF(a, ...) printf("%s: " a "\n", __func__, ##__VA_ARGS__) 115 #else 116 #define DPRINTF(a, ...) 117 #endif 118 119 /* round up and down to page boundaries. */ 120 #define ELF_ROUND(a, b) (((a) + (b) - 1) & ~((b) - 1)) 121 #define ELF_TRUNC(a, b) ((a) & ~((b) - 1)) 122 123 static void 124 elf_placedynexec(struct exec_package *epp, Elf_Ehdr *eh, Elf_Phdr *ph) 125 { 126 Elf_Addr align, offset; 127 int i; 128 129 for (align = i = 0; i < eh->e_phnum; i++) 130 if (ph[i].p_type == PT_LOAD && ph[i].p_align > align) 131 align = ph[i].p_align; 132 133 offset = (Elf_Addr)pax_aslr_exec_offset(epp, align); 134 offset += epp->ep_vm_minaddr; 135 136 for (i = 0; i < eh->e_phnum; i++) 137 ph[i].p_vaddr += offset; 138 epp->ep_entryoffset = offset; 139 eh->e_entry += offset; 140 } 141 142 /* 143 * Copy arguments onto the stack in the normal way, but add some 144 * extra information in case of dynamic binding. 145 */ 146 int 147 elf_copyargs(struct lwp *l, struct exec_package *pack, 148 struct ps_strings *arginfo, char **stackp, void *argp) 149 { 150 size_t len, vlen; 151 AuxInfo ai[ELF_AUX_ENTRIES], *a, *execname; 152 struct elf_args *ap; 153 int error; 154 155 if ((error = copyargs(l, pack, arginfo, stackp, argp)) != 0) 156 return error; 157 158 a = ai; 159 execname = NULL; 160 161 memset(ai, 0, sizeof(ai)); 162 163 /* 164 * Push extra arguments on the stack needed by dynamically 165 * linked binaries 166 */ 167 if ((ap = (struct elf_args *)pack->ep_emul_arg)) { 168 struct vattr *vap = pack->ep_vap; 169 170 a->a_type = AT_PHDR; 171 a->a_v = ap->arg_phaddr; 172 a++; 173 174 a->a_type = AT_PHENT; 175 a->a_v = ap->arg_phentsize; 176 a++; 177 178 a->a_type = AT_PHNUM; 179 a->a_v = ap->arg_phnum; 180 a++; 181 182 a->a_type = AT_PAGESZ; 183 a->a_v = PAGE_SIZE; 184 a++; 185 186 a->a_type = AT_BASE; 187 a->a_v = ap->arg_interp; 188 a++; 189 190 a->a_type = AT_FLAGS; 191 a->a_v = 0; 192 a++; 193 194 a->a_type = AT_ENTRY; 195 a->a_v = ap->arg_entry; 196 a++; 197 198 a->a_type = AT_EUID; 199 if (vap->va_mode & S_ISUID) 200 a->a_v = vap->va_uid; 201 else 202 a->a_v = kauth_cred_geteuid(l->l_cred); 203 a++; 204 205 a->a_type = AT_RUID; 206 a->a_v = kauth_cred_getuid(l->l_cred); 207 a++; 208 209 a->a_type = AT_EGID; 210 if (vap->va_mode & S_ISGID) 211 a->a_v = vap->va_gid; 212 else 213 a->a_v = kauth_cred_getegid(l->l_cred); 214 a++; 215 216 a->a_type = AT_RGID; 217 a->a_v = kauth_cred_getgid(l->l_cred); 218 a++; 219 220 a->a_type = AT_STACKBASE; 221 a->a_v = l->l_proc->p_stackbase; 222 a++; 223 224 if (pack->ep_path) { 225 execname = a; 226 a->a_type = AT_SUN_EXECNAME; 227 a++; 228 } 229 230 exec_free_emul_arg(pack); 231 } 232 233 a->a_type = AT_NULL; 234 a->a_v = 0; 235 a++; 236 237 vlen = (a - ai) * sizeof(ai[0]); 238 239 KASSERT(vlen <= sizeof(ai)); 240 241 if (execname) { 242 char *path = pack->ep_path; 243 execname->a_v = (uintptr_t)(*stackp + vlen); 244 len = strlen(path) + 1; 245 if ((error = copyout(path, (*stackp + vlen), len)) != 0) 246 return error; 247 len = ALIGN(len); 248 } else 249 len = 0; 250 251 if ((error = copyout(ai, *stackp, vlen)) != 0) 252 return error; 253 *stackp += vlen + len; 254 255 return 0; 256 } 257 258 /* 259 * elf_check_header(): 260 * 261 * Check header for validity; return 0 if ok, ENOEXEC if error 262 */ 263 int 264 elf_check_header(Elf_Ehdr *eh) 265 { 266 267 if (memcmp(eh->e_ident, ELFMAG, SELFMAG) != 0 || 268 eh->e_ident[EI_CLASS] != ELFCLASS) { 269 DPRINTF("bad magic %#x%x%x", eh->e_ident[0], eh->e_ident[1], 270 eh->e_ident[2]); 271 return ENOEXEC; 272 } 273 274 switch (eh->e_machine) { 275 276 ELFDEFNNAME(MACHDEP_ID_CASES) 277 278 default: 279 DPRINTF("bad machine %#x", eh->e_machine); 280 return ENOEXEC; 281 } 282 283 if (ELF_EHDR_FLAGS_OK(eh) == 0) { 284 DPRINTF("bad flags %#x", eh->e_flags); 285 return ENOEXEC; 286 } 287 288 if (eh->e_shnum > ELF_MAXSHNUM || eh->e_phnum > ELF_MAXPHNUM) { 289 DPRINTF("bad shnum/phnum %#x/%#x", eh->e_shnum, eh->e_phnum); 290 return ENOEXEC; 291 } 292 293 return 0; 294 } 295 296 /* 297 * elf_load_psection(): 298 * 299 * Load a psection at the appropriate address 300 */ 301 static void 302 elf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp, 303 const Elf_Phdr *ph, Elf_Addr *addr, u_long *size, int flags) 304 { 305 u_long msize, psize, rm, rf; 306 long diff, offset; 307 int vmprot = 0; 308 309 /* 310 * If the user specified an address, then we load there. 311 */ 312 if (*addr == ELFDEFNNAME(NO_ADDR)) 313 *addr = ph->p_vaddr; 314 315 if (ph->p_align > 1) { 316 /* 317 * Make sure we are virtually aligned as we are supposed to be. 318 */ 319 diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align); 320 KASSERT(*addr - diff == ELF_TRUNC(*addr, ph->p_align)); 321 /* 322 * But make sure to not map any pages before the start of the 323 * psection by limiting the difference to within a page. 324 */ 325 diff &= PAGE_MASK; 326 } else 327 diff = 0; 328 329 vmprot |= (ph->p_flags & PF_R) ? VM_PROT_READ : 0; 330 vmprot |= (ph->p_flags & PF_W) ? VM_PROT_WRITE : 0; 331 vmprot |= (ph->p_flags & PF_X) ? VM_PROT_EXECUTE : 0; 332 333 /* 334 * Adjust everything so it all starts on a page boundary. 335 */ 336 *addr -= diff; 337 offset = ph->p_offset - diff; 338 *size = ph->p_filesz + diff; 339 msize = ph->p_memsz + diff; 340 341 if (ph->p_align >= PAGE_SIZE) { 342 if ((ph->p_flags & PF_W) != 0) { 343 /* 344 * Because the pagedvn pager can't handle zero fill 345 * of the last data page if it's not page aligned we 346 * map the last page readvn. 347 */ 348 psize = trunc_page(*size); 349 } else { 350 psize = round_page(*size); 351 } 352 } else { 353 psize = *size; 354 } 355 356 if (psize > 0) { 357 NEW_VMCMD2(vcset, ph->p_align < PAGE_SIZE ? 358 vmcmd_map_readvn : vmcmd_map_pagedvn, psize, *addr, vp, 359 offset, vmprot, flags); 360 flags &= VMCMD_RELATIVE; 361 } 362 if (psize < *size) { 363 NEW_VMCMD2(vcset, vmcmd_map_readvn, *size - psize, 364 *addr + psize, vp, offset + psize, vmprot, flags); 365 } 366 367 /* 368 * Check if we need to extend the size of the segment (does 369 * bss extend page the next page boundary)? 370 */ 371 rm = round_page(*addr + msize); 372 rf = round_page(*addr + *size); 373 374 if (rm != rf) { 375 NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP, 376 0, vmprot, flags & VMCMD_RELATIVE); 377 *size = msize; 378 } 379 } 380 381 /* 382 * elf_load_interp(): 383 * 384 * Load an interpreter pointed to by path. 385 */ 386 static int 387 elf_load_interp(struct lwp *l, struct exec_package *epp, char *path, 388 struct exec_vmcmd_set *vcset, u_long *entryoff, Elf_Addr *last) 389 { 390 int error, i; 391 struct vnode *vp; 392 struct vattr attr; 393 Elf_Ehdr eh; 394 Elf_Phdr *ph = NULL; 395 const Elf_Phdr *base_ph; 396 const Elf_Phdr *last_ph; 397 u_long phsize; 398 Elf_Addr addr = *last; 399 struct proc *p; 400 bool use_topdown; 401 402 p = l->l_proc; 403 404 KASSERT(p->p_vmspace); 405 KASSERT(p->p_vmspace != proc0.p_vmspace); 406 407 #ifdef __USE_TOPDOWN_VM 408 use_topdown = epp->ep_flags & EXEC_TOPDOWN_VM; 409 #else 410 use_topdown = false; 411 #endif 412 413 /* 414 * 1. open file 415 * 2. read filehdr 416 * 3. map text, data, and bss out of it using VM_* 417 */ 418 vp = epp->ep_interp; 419 if (vp == NULL) { 420 error = emul_find_interp(l, epp, path); 421 if (error != 0) 422 return error; 423 vp = epp->ep_interp; 424 } 425 /* We'll tidy this ourselves - otherwise we have locking issues */ 426 epp->ep_interp = NULL; 427 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 428 429 /* 430 * Similarly, if it's not marked as executable, or it's not a regular 431 * file, we don't allow it to be used. 432 */ 433 if (vp->v_type != VREG) { 434 error = EACCES; 435 goto badunlock; 436 } 437 if ((error = VOP_ACCESS(vp, VEXEC, l->l_cred)) != 0) 438 goto badunlock; 439 440 /* get attributes */ 441 if ((error = VOP_GETATTR(vp, &attr, l->l_cred)) != 0) 442 goto badunlock; 443 444 /* 445 * Check mount point. Though we're not trying to exec this binary, 446 * we will be executing code from it, so if the mount point 447 * disallows execution or set-id-ness, we punt or kill the set-id. 448 */ 449 if (vp->v_mount->mnt_flag & MNT_NOEXEC) { 450 error = EACCES; 451 goto badunlock; 452 } 453 if (vp->v_mount->mnt_flag & MNT_NOSUID) 454 epp->ep_vap->va_mode &= ~(S_ISUID | S_ISGID); 455 456 error = vn_marktext(vp); 457 if (error) 458 goto badunlock; 459 460 VOP_UNLOCK(vp); 461 462 if ((error = exec_read_from(l, vp, 0, &eh, sizeof(eh))) != 0) 463 goto bad; 464 465 if ((error = elf_check_header(&eh)) != 0) 466 goto bad; 467 if (eh.e_type != ET_DYN || eh.e_phnum == 0) { 468 DPRINTF("bad interpreter type %#x", eh.e_type); 469 error = ENOEXEC; 470 goto bad; 471 } 472 473 phsize = eh.e_phnum * sizeof(Elf_Phdr); 474 ph = kmem_alloc(phsize, KM_SLEEP); 475 476 if ((error = exec_read_from(l, vp, eh.e_phoff, ph, phsize)) != 0) 477 goto bad; 478 479 #ifdef ELF_INTERP_NON_RELOCATABLE 480 /* 481 * Evil hack: Only MIPS should be non-relocatable, and the 482 * psections should have a high address (typically 0x5ffe0000). 483 * If it's now relocatable, it should be linked at 0 and the 484 * psections should have zeros in the upper part of the address. 485 * Otherwise, force the load at the linked address. 486 */ 487 if (*last == ELF_LINK_ADDR && (ph->p_vaddr & 0xffff0000) == 0) 488 *last = ELFDEFNNAME(NO_ADDR); 489 #endif 490 491 /* 492 * If no position to load the interpreter was set by a probe 493 * function, pick the same address that a non-fixed mmap(0, ..) 494 * would (i.e. something safely out of the way). 495 */ 496 if (*last == ELFDEFNNAME(NO_ADDR)) { 497 u_long limit = 0; 498 /* 499 * Find the start and ending addresses of the psections to 500 * be loaded. This will give us the size. 501 */ 502 for (i = 0, base_ph = NULL; i < eh.e_phnum; i++) { 503 if (ph[i].p_type == PT_LOAD) { 504 u_long psize = ph[i].p_vaddr + ph[i].p_memsz; 505 if (base_ph == NULL) 506 base_ph = &ph[i]; 507 if (psize > limit) 508 limit = psize; 509 } 510 } 511 512 if (base_ph == NULL) { 513 DPRINTF("no interpreter loadable sections"); 514 error = ENOEXEC; 515 goto bad; 516 } 517 518 /* 519 * Now compute the size and load address. 520 */ 521 addr = (*epp->ep_esch->es_emul->e_vm_default_addr)(p, 522 epp->ep_daddr, 523 round_page(limit) - trunc_page(base_ph->p_vaddr), 524 use_topdown); 525 addr += (Elf_Addr)pax_aslr_rtld_offset(epp, base_ph->p_align, 526 use_topdown); 527 } else { 528 addr = *last; /* may be ELF_LINK_ADDR */ 529 } 530 531 /* 532 * Load all the necessary sections 533 */ 534 for (i = 0, base_ph = NULL, last_ph = NULL; i < eh.e_phnum; i++) { 535 switch (ph[i].p_type) { 536 case PT_LOAD: { 537 u_long size; 538 int flags; 539 540 if (base_ph == NULL) { 541 /* 542 * First encountered psection is always the 543 * base psection. Make sure it's aligned 544 * properly (align down for topdown and align 545 * upwards for not topdown). 546 */ 547 base_ph = &ph[i]; 548 flags = VMCMD_BASE; 549 if (addr == ELF_LINK_ADDR) 550 addr = ph[i].p_vaddr; 551 if (use_topdown) 552 addr = ELF_TRUNC(addr, ph[i].p_align); 553 else 554 addr = ELF_ROUND(addr, ph[i].p_align); 555 } else { 556 u_long limit = round_page(last_ph->p_vaddr 557 + last_ph->p_memsz); 558 u_long base = trunc_page(ph[i].p_vaddr); 559 560 /* 561 * If there is a gap in between the psections, 562 * map it as inaccessible so nothing else 563 * mmap'ed will be placed there. 564 */ 565 if (limit != base) { 566 NEW_VMCMD2(vcset, vmcmd_map_zero, 567 base - limit, 568 limit - base_ph->p_vaddr, NULLVP, 569 0, VM_PROT_NONE, VMCMD_RELATIVE); 570 } 571 572 addr = ph[i].p_vaddr - base_ph->p_vaddr; 573 flags = VMCMD_RELATIVE; 574 } 575 last_ph = &ph[i]; 576 elf_load_psection(vcset, vp, &ph[i], &addr, 577 &size, flags); 578 /* 579 * If entry is within this psection then this 580 * must contain the .text section. *entryoff is 581 * relative to the base psection. 582 */ 583 if (eh.e_entry >= ph[i].p_vaddr && 584 eh.e_entry < (ph[i].p_vaddr + size)) { 585 *entryoff = eh.e_entry - base_ph->p_vaddr; 586 } 587 addr += size; 588 break; 589 } 590 591 default: 592 break; 593 } 594 } 595 596 kmem_free(ph, phsize); 597 /* 598 * This value is ignored if TOPDOWN. 599 */ 600 *last = addr; 601 vrele(vp); 602 return 0; 603 604 badunlock: 605 VOP_UNLOCK(vp); 606 607 bad: 608 if (ph != NULL) 609 kmem_free(ph, phsize); 610 vrele(vp); 611 return error; 612 } 613 614 /* 615 * exec_elf_makecmds(): Prepare an Elf binary's exec package 616 * 617 * First, set of the various offsets/lengths in the exec package. 618 * 619 * Then, mark the text image busy (so it can be demand paged) or error 620 * out if this is not possible. Finally, set up vmcmds for the 621 * text, data, bss, and stack segments. 622 */ 623 int 624 exec_elf_makecmds(struct lwp *l, struct exec_package *epp) 625 { 626 Elf_Ehdr *eh = epp->ep_hdr; 627 Elf_Phdr *ph, *pp; 628 Elf_Addr phdr = 0, computed_phdr = 0, pos = 0, end_text = 0; 629 int error, i; 630 char *interp = NULL; 631 u_long phsize; 632 struct elf_args *ap; 633 bool is_dyn = false; 634 635 if (epp->ep_hdrvalid < sizeof(Elf_Ehdr)) { 636 DPRINTF("small header %#x", epp->ep_hdrvalid); 637 return ENOEXEC; 638 } 639 if ((error = elf_check_header(eh)) != 0) 640 return error; 641 642 if (eh->e_type == ET_DYN) 643 /* PIE, and some libs have an entry point */ 644 is_dyn = true; 645 else if (eh->e_type != ET_EXEC) { 646 DPRINTF("bad type %#x", eh->e_type); 647 return ENOEXEC; 648 } 649 650 if (eh->e_phnum == 0) { 651 DPRINTF("no program headers"); 652 return ENOEXEC; 653 } 654 655 error = vn_marktext(epp->ep_vp); 656 if (error) 657 return error; 658 659 /* 660 * Allocate space to hold all the program headers, and read them 661 * from the file 662 */ 663 phsize = eh->e_phnum * sizeof(Elf_Phdr); 664 ph = kmem_alloc(phsize, KM_SLEEP); 665 666 if ((error = exec_read_from(l, epp->ep_vp, eh->e_phoff, ph, phsize)) != 667 0) 668 goto bad; 669 670 epp->ep_taddr = epp->ep_tsize = ELFDEFNNAME(NO_ADDR); 671 epp->ep_daddr = epp->ep_dsize = ELFDEFNNAME(NO_ADDR); 672 673 for (i = 0; i < eh->e_phnum; i++) { 674 pp = &ph[i]; 675 if (pp->p_type == PT_INTERP) { 676 if (pp->p_filesz < 2 || pp->p_filesz > MAXPATHLEN) { 677 DPRINTF("bad interpreter namelen %#jx", 678 (uintmax_t)pp->p_filesz); 679 error = ENOEXEC; 680 goto bad; 681 } 682 interp = PNBUF_GET(); 683 if ((error = exec_read_from(l, epp->ep_vp, 684 pp->p_offset, interp, pp->p_filesz)) != 0) 685 goto bad; 686 /* Ensure interp is NUL-terminated and of the expected length */ 687 if (strnlen(interp, pp->p_filesz) != pp->p_filesz - 1) { 688 DPRINTF("bad interpreter name"); 689 error = ENOEXEC; 690 goto bad; 691 } 692 break; 693 } 694 } 695 696 /* 697 * On the same architecture, we may be emulating different systems. 698 * See which one will accept this executable. 699 * 700 * Probe functions would normally see if the interpreter (if any) 701 * exists. Emulation packages may possibly replace the interpreter in 702 * interp with a changed path (/emul/xxx/<path>). 703 */ 704 pos = ELFDEFNNAME(NO_ADDR); 705 if (epp->ep_esch->u.elf_probe_func) { 706 vaddr_t startp = (vaddr_t)pos; 707 708 error = (*epp->ep_esch->u.elf_probe_func)(l, epp, eh, interp, 709 &startp); 710 if (error) 711 goto bad; 712 pos = (Elf_Addr)startp; 713 } 714 715 if (is_dyn) 716 elf_placedynexec(epp, eh, ph); 717 718 /* 719 * Load all the necessary sections 720 */ 721 for (i = 0; i < eh->e_phnum; i++) { 722 Elf_Addr addr = ELFDEFNNAME(NO_ADDR); 723 u_long size = 0; 724 725 switch (ph[i].p_type) { 726 case PT_LOAD: 727 elf_load_psection(&epp->ep_vmcmds, epp->ep_vp, 728 &ph[i], &addr, &size, VMCMD_FIXED); 729 730 /* 731 * Consider this as text segment, if it is executable. 732 * If there is more than one text segment, pick the 733 * largest. 734 */ 735 if (ph[i].p_flags & PF_X) { 736 if (epp->ep_taddr == ELFDEFNNAME(NO_ADDR) || 737 size > epp->ep_tsize) { 738 epp->ep_taddr = addr; 739 epp->ep_tsize = size; 740 } 741 end_text = addr + size; 742 } else { 743 epp->ep_daddr = addr; 744 epp->ep_dsize = size; 745 } 746 if (ph[i].p_offset == 0) { 747 computed_phdr = ph[i].p_vaddr + eh->e_phoff; 748 } 749 break; 750 751 case PT_SHLIB: 752 /* SCO has these sections. */ 753 case PT_INTERP: 754 /* Already did this one. */ 755 case PT_DYNAMIC: 756 case PT_NOTE: 757 break; 758 case PT_PHDR: 759 /* Note address of program headers (in text segment) */ 760 phdr = ph[i].p_vaddr; 761 break; 762 763 default: 764 /* 765 * Not fatal; we don't need to understand everything. 766 */ 767 break; 768 } 769 } 770 771 if (epp->ep_vmcmds.evs_used == 0) { 772 /* No VMCMD; there was no PT_LOAD section, or those 773 * sections were empty */ 774 DPRINTF("no vmcommands"); 775 error = ENOEXEC; 776 goto bad; 777 } 778 779 if (epp->ep_daddr == ELFDEFNNAME(NO_ADDR)) { 780 epp->ep_daddr = round_page(end_text); 781 epp->ep_dsize = 0; 782 } 783 784 /* 785 * Check if we found a dynamically linked binary and arrange to load 786 * its interpreter 787 */ 788 if (interp) { 789 u_int nused = epp->ep_vmcmds.evs_used; 790 u_long interp_offset = 0; 791 792 if ((error = elf_load_interp(l, epp, interp, 793 &epp->ep_vmcmds, &interp_offset, &pos)) != 0) { 794 goto bad; 795 } 796 if (epp->ep_vmcmds.evs_used == nused) { 797 /* elf_load_interp() has not set up any new VMCMD */ 798 DPRINTF("no vmcommands for interpreter"); 799 error = ENOEXEC; 800 goto bad; 801 } 802 803 ap = kmem_alloc(sizeof(*ap), KM_SLEEP); 804 ap->arg_interp = epp->ep_vmcmds.evs_cmds[nused].ev_addr; 805 epp->ep_entryoffset = interp_offset; 806 epp->ep_entry = ap->arg_interp + interp_offset; 807 PNBUF_PUT(interp); 808 interp = NULL; 809 } else { 810 epp->ep_entry = eh->e_entry; 811 if (epp->ep_flags & EXEC_FORCEAUX) { 812 ap = kmem_alloc(sizeof(*ap), KM_SLEEP); 813 ap->arg_interp = (vaddr_t)NULL; 814 } else 815 ap = NULL; 816 } 817 818 if (ap) { 819 ap->arg_phaddr = phdr ? phdr : computed_phdr; 820 ap->arg_phentsize = eh->e_phentsize; 821 ap->arg_phnum = eh->e_phnum; 822 ap->arg_entry = eh->e_entry; 823 epp->ep_emul_arg = ap; 824 epp->ep_emul_arg_free = elf_free_emul_arg; 825 } 826 827 #ifdef ELF_MAP_PAGE_ZERO 828 /* Dell SVR4 maps page zero, yeuch! */ 829 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_readvn, PAGE_SIZE, 0, 830 epp->ep_vp, 0, VM_PROT_READ); 831 #endif 832 833 error = (*epp->ep_esch->es_setup_stack)(l, epp); 834 if (error) 835 goto bad; 836 837 kmem_free(ph, phsize); 838 return 0; 839 840 bad: 841 if (interp) 842 PNBUF_PUT(interp); 843 exec_free_emul_arg(epp); 844 kmem_free(ph, phsize); 845 kill_vmcmds(&epp->ep_vmcmds); 846 return error; 847 } 848 849 int 850 netbsd_elf_signature(struct lwp *l, struct exec_package *epp, 851 Elf_Ehdr *eh) 852 { 853 size_t i; 854 Elf_Shdr *sh; 855 Elf_Nhdr *np; 856 size_t shsize, nsize; 857 int error; 858 int isnetbsd = 0; 859 char *ndata, *ndesc; 860 861 #ifdef DIAGNOSTIC 862 const char *badnote; 863 #define BADNOTE(n) badnote = (n) 864 #else 865 #define BADNOTE(n) 866 #endif 867 868 epp->ep_pax_flags = 0; 869 if (eh->e_shnum > ELF_MAXSHNUM || eh->e_shnum == 0) { 870 DPRINTF("no signature %#x", eh->e_shnum); 871 return ENOEXEC; 872 } 873 874 shsize = eh->e_shnum * sizeof(Elf_Shdr); 875 sh = kmem_alloc(shsize, KM_SLEEP); 876 error = exec_read_from(l, epp->ep_vp, eh->e_shoff, sh, shsize); 877 if (error) 878 goto out; 879 880 np = kmem_alloc(ELF_MAXNOTESIZE, KM_SLEEP); 881 for (i = 0; i < eh->e_shnum; i++) { 882 Elf_Shdr *shp = &sh[i]; 883 884 if (shp->sh_type != SHT_NOTE || 885 shp->sh_size > ELF_MAXNOTESIZE || 886 shp->sh_size < sizeof(Elf_Nhdr) + ELF_NOTE_NETBSD_NAMESZ) 887 continue; 888 889 error = exec_read_from(l, epp->ep_vp, shp->sh_offset, np, 890 shp->sh_size); 891 if (error) 892 continue; 893 894 /* Point to the note, skip the header */ 895 ndata = (char *)(np + 1); 896 897 /* 898 * Padding is present if necessary to ensure 4-byte alignment. 899 * The actual section size is therefore: 900 * header size + 4-byte aligned name + 4-byte aligned desc 901 * Ensure this size is consistent with what is indicated 902 * in sh_size. The first check avoids integer overflows. 903 * 904 * Binaries from before NetBSD 1.6 have two notes in the same 905 * note section. The second note was never used, so as long as 906 * the section is at least as big as it should be, it's ok. 907 * These binaries also have a second note section with a note of 908 * type ELF_NOTE_TYPE_NETBSD_TAG, which can be ignored as well. 909 */ 910 if (np->n_namesz > shp->sh_size || np->n_descsz > shp->sh_size) { 911 BADNOTE("note size limit"); 912 goto bad; 913 } 914 nsize = sizeof(*np) + roundup(np->n_namesz, 4) + 915 roundup(np->n_descsz, 4); 916 if (nsize > shp->sh_size) { 917 BADNOTE("note size"); 918 goto bad; 919 } 920 ndesc = ndata + roundup(np->n_namesz, 4); 921 922 switch (np->n_type) { 923 case ELF_NOTE_TYPE_NETBSD_TAG: 924 /* It is us */ 925 if (np->n_namesz == ELF_NOTE_NETBSD_NAMESZ && 926 np->n_descsz == ELF_NOTE_NETBSD_DESCSZ && 927 memcmp(ndata, ELF_NOTE_NETBSD_NAME, 928 ELF_NOTE_NETBSD_NAMESZ) == 0) { 929 memcpy(&epp->ep_osversion, ndesc, 930 ELF_NOTE_NETBSD_DESCSZ); 931 isnetbsd = 1; 932 break; 933 } 934 935 /* 936 * Ignore SuSE tags; SuSE's n_type is the same the 937 * NetBSD one. 938 */ 939 if (np->n_namesz == ELF_NOTE_SUSE_NAMESZ && 940 memcmp(ndata, ELF_NOTE_SUSE_NAME, 941 ELF_NOTE_SUSE_NAMESZ) == 0) 942 break; 943 /* 944 * Ignore old GCC 945 */ 946 if (np->n_namesz == ELF_NOTE_OGCC_NAMESZ && 947 memcmp(ndata, ELF_NOTE_OGCC_NAME, 948 ELF_NOTE_OGCC_NAMESZ) == 0) 949 break; 950 BADNOTE("NetBSD tag"); 951 goto bad; 952 953 case ELF_NOTE_TYPE_PAX_TAG: 954 if (np->n_namesz == ELF_NOTE_PAX_NAMESZ && 955 np->n_descsz == ELF_NOTE_PAX_DESCSZ && 956 memcmp(ndata, ELF_NOTE_PAX_NAME, 957 ELF_NOTE_PAX_NAMESZ) == 0) { 958 uint32_t flags; 959 memcpy(&flags, ndesc, sizeof(flags)); 960 /* Convert the flags and insert them into 961 * the exec package. */ 962 pax_setup_elf_flags(epp, flags); 963 break; 964 } 965 BADNOTE("PaX tag"); 966 goto bad; 967 968 case ELF_NOTE_TYPE_MARCH_TAG: 969 /* Copy the machine arch into the package. */ 970 if (np->n_namesz == ELF_NOTE_MARCH_NAMESZ 971 && memcmp(ndata, ELF_NOTE_MARCH_NAME, 972 ELF_NOTE_MARCH_NAMESZ) == 0) { 973 /* Do not truncate the buffer */ 974 if (np->n_descsz > sizeof(epp->ep_machine_arch)) { 975 BADNOTE("description size limit"); 976 goto bad; 977 } 978 /* 979 * Ensure ndesc is NUL-terminated and of the 980 * expected length. 981 */ 982 if (strnlen(ndesc, np->n_descsz) + 1 != 983 np->n_descsz) { 984 BADNOTE("description size"); 985 goto bad; 986 } 987 strlcpy(epp->ep_machine_arch, ndesc, 988 sizeof(epp->ep_machine_arch)); 989 break; 990 } 991 BADNOTE("march tag"); 992 goto bad; 993 994 case ELF_NOTE_TYPE_MCMODEL_TAG: 995 /* arch specific check for code model */ 996 #ifdef ELF_MD_MCMODEL_CHECK 997 if (np->n_namesz == ELF_NOTE_MCMODEL_NAMESZ 998 && memcmp(ndata, ELF_NOTE_MCMODEL_NAME, 999 ELF_NOTE_MCMODEL_NAMESZ) == 0) { 1000 ELF_MD_MCMODEL_CHECK(epp, ndesc, np->n_descsz); 1001 break; 1002 } 1003 BADNOTE("mcmodel tag"); 1004 goto bad; 1005 #endif 1006 break; 1007 1008 case ELF_NOTE_TYPE_SUSE_VERSION_TAG: 1009 break; 1010 1011 case ELF_NOTE_TYPE_GO_BUILDID_TAG: 1012 break; 1013 1014 default: 1015 BADNOTE("unknown tag"); 1016 bad: 1017 #ifdef DIAGNOSTIC 1018 /* Ignore GNU tags */ 1019 if (np->n_namesz == ELF_NOTE_GNU_NAMESZ && 1020 memcmp(ndata, ELF_NOTE_GNU_NAME, 1021 ELF_NOTE_GNU_NAMESZ) == 0) 1022 break; 1023 1024 int ns = MIN(np->n_namesz, shp->sh_size - sizeof(*np)); 1025 printf("%s: Unknown elf note type %d (%s): " 1026 "[namesz=%d, descsz=%d name=%-*.*s]\n", 1027 epp->ep_kname, np->n_type, badnote, np->n_namesz, 1028 np->n_descsz, ns, ns, ndata); 1029 #endif 1030 break; 1031 } 1032 } 1033 kmem_free(np, ELF_MAXNOTESIZE); 1034 1035 error = isnetbsd ? 0 : ENOEXEC; 1036 #ifdef DEBUG_ELF 1037 if (error) 1038 DPRINTF("not netbsd"); 1039 #endif 1040 out: 1041 kmem_free(sh, shsize); 1042 return error; 1043 } 1044 1045 int 1046 netbsd_elf_probe(struct lwp *l, struct exec_package *epp, void *eh, char *itp, 1047 vaddr_t *pos) 1048 { 1049 int error; 1050 1051 if ((error = netbsd_elf_signature(l, epp, eh)) != 0) 1052 return error; 1053 #ifdef ELF_MD_PROBE_FUNC 1054 if ((error = ELF_MD_PROBE_FUNC(l, epp, eh, itp, pos)) != 0) 1055 return error; 1056 #elif defined(ELF_INTERP_NON_RELOCATABLE) 1057 *pos = ELF_LINK_ADDR; 1058 #endif 1059 epp->ep_flags |= EXEC_FORCEAUX; 1060 return 0; 1061 } 1062 1063 void 1064 elf_free_emul_arg(void *arg) 1065 { 1066 struct elf_args *ap = arg; 1067 KASSERT(ap != NULL); 1068 kmem_free(ap, sizeof(*ap)); 1069 } 1070