1 /* $OpenBSD: exec_elf.c,v 1.169 2022/10/21 18:10:56 deraadt Exp $ */ 2 3 /* 4 * Copyright (c) 1996 Per Fogelstrom 5 * All rights reserved. 6 * 7 * Copyright (c) 1994 Christos Zoulas 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 */ 33 34 /* 35 * Copyright (c) 2001 Wasabi Systems, Inc. 36 * All rights reserved. 37 * 38 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed for the NetBSD Project by 51 * Wasabi Systems, Inc. 52 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 53 * or promote products derived from this software without specific prior 54 * written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 58 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 59 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 60 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 61 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 62 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 63 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 64 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 65 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 * POSSIBILITY OF SUCH DAMAGE. 67 */ 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/proc.h> 72 #include <sys/malloc.h> 73 #include <sys/pool.h> 74 #include <sys/mount.h> 75 #include <sys/namei.h> 76 #include <sys/vnode.h> 77 #include <sys/core.h> 78 #include <sys/exec.h> 79 #include <sys/exec_elf.h> 80 #include <sys/fcntl.h> 81 #include <sys/ptrace.h> 82 #include <sys/signalvar.h> 83 #include <sys/pledge.h> 84 85 #include <sys/mman.h> 86 87 #include <uvm/uvm_extern.h> 88 89 #include <machine/reg.h> 90 #include <machine/exec.h> 91 92 int elf_load_file(struct proc *, char *, struct exec_package *, 93 struct elf_args *); 94 int elf_check_header(Elf_Ehdr *); 95 int elf_read_from(struct proc *, struct vnode *, u_long, void *, int); 96 void elf_load_psection(struct exec_vmcmd_set *, struct vnode *, 97 Elf_Phdr *, Elf_Addr *, Elf_Addr *, int *, int); 98 int elf_os_pt_note_name(Elf_Note *); 99 int elf_os_pt_note(struct proc *, struct exec_package *, Elf_Ehdr *, int *); 100 101 /* round up and down to page boundaries. */ 102 #define ELF_ROUND(a, b) (((a) + (b) - 1) & ~((b) - 1)) 103 #define ELF_TRUNC(a, b) ((a) & ~((b) - 1)) 104 105 /* 106 * We limit the number of program headers to 32, this should 107 * be a reasonable limit for ELF, the most we have seen so far is 12 108 */ 109 #define ELF_MAX_VALID_PHDR 32 110 111 #define ELF_NOTE_NAME_OPENBSD 0x01 112 113 struct elf_note_name { 114 char *name; 115 int id; 116 } elf_note_names[] = { 117 { "OpenBSD", ELF_NOTE_NAME_OPENBSD }, 118 }; 119 120 #define ELFROUNDSIZE sizeof(Elf_Word) 121 #define elfround(x) roundup((x), ELFROUNDSIZE) 122 123 124 /* 125 * Check header for validity; return 0 for ok, ENOEXEC if error 126 */ 127 int 128 elf_check_header(Elf_Ehdr *ehdr) 129 { 130 /* 131 * We need to check magic, class size, endianness, and version before 132 * we look at the rest of the Elf_Ehdr structure. These few elements 133 * are represented in a machine independent fashion. 134 */ 135 if (!IS_ELF(*ehdr) || 136 ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 137 ehdr->e_ident[EI_DATA] != ELF_TARG_DATA || 138 ehdr->e_ident[EI_VERSION] != ELF_TARG_VER) 139 return (ENOEXEC); 140 141 /* Now check the machine dependent header */ 142 if (ehdr->e_machine != ELF_TARG_MACH || 143 ehdr->e_version != ELF_TARG_VER) 144 return (ENOEXEC); 145 146 /* Don't allow an insane amount of sections. */ 147 if (ehdr->e_phnum > ELF_MAX_VALID_PHDR) 148 return (ENOEXEC); 149 150 return (0); 151 } 152 153 /* 154 * Load a psection at the appropriate address 155 */ 156 void 157 elf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp, 158 Elf_Phdr *ph, Elf_Addr *addr, Elf_Addr *size, int *prot, int flags) 159 { 160 u_long msize, lsize, psize, rm, rf; 161 long diff, offset, bdiff; 162 Elf_Addr base; 163 164 /* 165 * If the user specified an address, then we load there. 166 */ 167 if (*addr != ELF_NO_ADDR) { 168 if (ph->p_align > 1) { 169 *addr = ELF_TRUNC(*addr, ph->p_align); 170 diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align); 171 /* page align vaddr */ 172 base = *addr + trunc_page(ph->p_vaddr) 173 - ELF_TRUNC(ph->p_vaddr, ph->p_align); 174 } else { 175 diff = 0; 176 base = *addr + trunc_page(ph->p_vaddr) - ph->p_vaddr; 177 } 178 } else { 179 *addr = ph->p_vaddr; 180 if (ph->p_align > 1) 181 *addr = ELF_TRUNC(*addr, ph->p_align); 182 base = trunc_page(ph->p_vaddr); 183 diff = ph->p_vaddr - *addr; 184 } 185 bdiff = ph->p_vaddr - trunc_page(ph->p_vaddr); 186 187 /* 188 * Enforce W^X and map W|X segments without X permission 189 * initially. The dynamic linker will make these read-only 190 * and add back X permission after relocation processing. 191 * Static executables with W|X segments will probably crash. 192 * Apply immutability as much as possible, but not for RELRO 193 * or PT_OPENBSD_MUTABLE sections, or LOADS marked 194 * PF_OPENBSD_MUTABLE, or LOADS which violate W^X. Userland 195 * (meaning crt0 or ld.so) will repair those regions. 196 */ 197 *prot |= (ph->p_flags & PF_R) ? PROT_READ : 0; 198 *prot |= (ph->p_flags & PF_W) ? PROT_WRITE : 0; 199 if ((ph->p_flags & PF_W) == 0) 200 *prot |= (ph->p_flags & PF_X) ? PROT_EXEC : 0; 201 if ((ph->p_flags & (PF_X | PF_W)) != (PF_X | PF_W) && 202 (ph->p_flags & PF_OPENBSD_MUTABLE) == 0) 203 flags |= VMCMD_IMMUTABLE; 204 205 msize = ph->p_memsz + diff; 206 offset = ph->p_offset - bdiff; 207 lsize = ph->p_filesz + bdiff; 208 psize = round_page(lsize); 209 210 /* 211 * Because the pagedvn pager can't handle zero fill of the last 212 * data page if it's not page aligned we map the last page readvn. 213 */ 214 if (ph->p_flags & PF_W) { 215 psize = trunc_page(lsize); 216 if (psize > 0) 217 NEW_VMCMD2(vcset, vmcmd_map_pagedvn, psize, base, vp, 218 offset, *prot, flags); 219 if (psize != lsize) { 220 NEW_VMCMD2(vcset, vmcmd_map_readvn, lsize - psize, 221 base + psize, vp, offset + psize, *prot, flags); 222 } 223 } else { 224 NEW_VMCMD2(vcset, vmcmd_map_pagedvn, psize, base, vp, offset, 225 *prot, flags); 226 } 227 228 /* 229 * Check if we need to extend the size of the segment 230 */ 231 rm = round_page(*addr + ph->p_memsz + diff); 232 rf = round_page(*addr + ph->p_filesz + diff); 233 234 if (rm != rf) { 235 NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP, 0, 236 *prot, flags); 237 } 238 *size = msize; 239 } 240 241 /* 242 * Read from vnode into buffer at offset. 243 */ 244 int 245 elf_read_from(struct proc *p, struct vnode *vp, u_long off, void *buf, 246 int size) 247 { 248 int error; 249 size_t resid; 250 251 if ((error = vn_rdwr(UIO_READ, vp, buf, size, off, UIO_SYSSPACE, 252 0, p->p_ucred, &resid, p)) != 0) 253 return error; 254 /* 255 * See if we got all of it 256 */ 257 if (resid != 0) 258 return (ENOEXEC); 259 return (0); 260 } 261 262 /* 263 * Load a file (interpreter/library) pointed to by path [stolen from 264 * coff_load_shlib()]. Made slightly generic so it might be used externally. 265 */ 266 int 267 elf_load_file(struct proc *p, char *path, struct exec_package *epp, 268 struct elf_args *ap) 269 { 270 int error, i; 271 struct nameidata nd; 272 Elf_Ehdr eh; 273 Elf_Phdr *ph = NULL; 274 u_long phsize = 0; 275 Elf_Addr addr; 276 struct vnode *vp; 277 Elf_Phdr *base_ph = NULL; 278 struct interp_ld_sec { 279 Elf_Addr vaddr; 280 u_long memsz; 281 } loadmap[ELF_MAX_VALID_PHDR]; 282 int nload, idx = 0; 283 Elf_Addr pos; 284 int file_align; 285 int loop; 286 size_t randomizequota = ELF_RANDOMIZE_LIMIT; 287 288 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p); 289 nd.ni_pledge = PLEDGE_RPATH; 290 nd.ni_unveil = UNVEIL_READ; 291 if ((error = namei(&nd)) != 0) { 292 return (error); 293 } 294 vp = nd.ni_vp; 295 if (vp->v_type != VREG) { 296 error = EACCES; 297 goto bad; 298 } 299 if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0) 300 goto bad; 301 if (vp->v_mount->mnt_flag & MNT_NOEXEC) { 302 error = EACCES; 303 goto bad; 304 } 305 if ((error = VOP_ACCESS(vp, VREAD, p->p_ucred, p)) != 0) 306 goto bad1; 307 if ((error = elf_read_from(p, nd.ni_vp, 0, &eh, sizeof(eh))) != 0) 308 goto bad1; 309 310 if (elf_check_header(&eh) || eh.e_type != ET_DYN) { 311 error = ENOEXEC; 312 goto bad1; 313 } 314 315 ph = mallocarray(eh.e_phnum, sizeof(Elf_Phdr), M_TEMP, M_WAITOK); 316 phsize = eh.e_phnum * sizeof(Elf_Phdr); 317 318 if ((error = elf_read_from(p, nd.ni_vp, eh.e_phoff, ph, phsize)) != 0) 319 goto bad1; 320 321 for (i = 0; i < eh.e_phnum; i++) { 322 if (ph[i].p_type == PT_LOAD) { 323 if (ph[i].p_filesz > ph[i].p_memsz || 324 ph[i].p_memsz == 0) { 325 error = EINVAL; 326 goto bad1; 327 } 328 loadmap[idx].vaddr = trunc_page(ph[i].p_vaddr); 329 loadmap[idx].memsz = round_page (ph[i].p_vaddr + 330 ph[i].p_memsz - loadmap[idx].vaddr); 331 file_align = ph[i].p_align; 332 idx++; 333 } 334 } 335 nload = idx; 336 337 /* 338 * Load the interpreter where a non-fixed mmap(NULL, ...) 339 * would (i.e. something safely out of the way). 340 */ 341 pos = uvm_map_hint(p->p_vmspace, PROT_EXEC, VM_MIN_ADDRESS, 342 VM_MAXUSER_ADDRESS); 343 pos = ELF_ROUND(pos, file_align); 344 345 loop = 0; 346 for (i = 0; i < nload;/**/) { 347 vaddr_t addr; 348 struct uvm_object *uobj; 349 off_t uoff; 350 size_t size; 351 352 #ifdef this_needs_fixing 353 if (i == 0) { 354 uobj = &vp->v_uvm.u_obj; 355 /* need to fix uoff */ 356 } else { 357 #endif 358 uobj = NULL; 359 uoff = 0; 360 #ifdef this_needs_fixing 361 } 362 #endif 363 364 addr = trunc_page(pos + loadmap[i].vaddr); 365 size = round_page(addr + loadmap[i].memsz) - addr; 366 367 /* CRAP - map_findspace does not avoid daddr+BRKSIZ */ 368 if ((addr + size > (vaddr_t)p->p_vmspace->vm_daddr) && 369 (addr < (vaddr_t)p->p_vmspace->vm_daddr + BRKSIZ)) 370 addr = round_page((vaddr_t)p->p_vmspace->vm_daddr + 371 BRKSIZ); 372 373 if (uvm_map_mquery(&p->p_vmspace->vm_map, &addr, size, 374 (i == 0 ? uoff : UVM_UNKNOWN_OFFSET), 0) != 0) { 375 if (loop == 0) { 376 loop = 1; 377 i = 0; 378 pos = 0; 379 continue; 380 } 381 error = ENOMEM; 382 goto bad1; 383 } 384 if (addr != pos + loadmap[i].vaddr) { 385 /* base changed. */ 386 pos = addr - trunc_page(loadmap[i].vaddr); 387 pos = ELF_ROUND(pos,file_align); 388 i = 0; 389 continue; 390 } 391 392 i++; 393 } 394 395 /* 396 * Load all the necessary sections 397 */ 398 for (i = 0; i < eh.e_phnum; i++) { 399 Elf_Addr size = 0; 400 int prot = 0; 401 int flags; 402 403 switch (ph[i].p_type) { 404 case PT_LOAD: 405 if (base_ph == NULL) { 406 flags = VMCMD_BASE; 407 addr = pos; 408 base_ph = &ph[i]; 409 } else { 410 flags = VMCMD_RELATIVE; 411 addr = ph[i].p_vaddr - base_ph->p_vaddr; 412 } 413 elf_load_psection(&epp->ep_vmcmds, nd.ni_vp, 414 &ph[i], &addr, &size, &prot, flags | VMCMD_SYSCALL); 415 /* If entry is within this section it must be text */ 416 if (eh.e_entry >= ph[i].p_vaddr && 417 eh.e_entry < (ph[i].p_vaddr + size)) { 418 epp->ep_entry = addr + eh.e_entry - 419 ELF_TRUNC(ph[i].p_vaddr,ph[i].p_align); 420 if (flags == VMCMD_RELATIVE) 421 epp->ep_entry += pos; 422 ap->arg_interp = pos; 423 } 424 addr += size; 425 break; 426 427 case PT_DYNAMIC: 428 case PT_PHDR: 429 case PT_NOTE: 430 break; 431 432 case PT_OPENBSD_RANDOMIZE: 433 if (ph[i].p_memsz > randomizequota) { 434 error = ENOMEM; 435 goto bad1; 436 } 437 randomizequota -= ph[i].p_memsz; 438 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_randomize, 439 ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0); 440 break; 441 442 case PT_GNU_RELRO: 443 case PT_OPENBSD_MUTABLE: 444 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable, 445 ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0); 446 break; 447 448 default: 449 break; 450 } 451 } 452 453 vn_marktext(nd.ni_vp); 454 455 bad1: 456 VOP_CLOSE(nd.ni_vp, FREAD, p->p_ucred, p); 457 bad: 458 free(ph, M_TEMP, phsize); 459 460 vput(nd.ni_vp); 461 return (error); 462 } 463 464 /* 465 * Prepare an Elf binary's exec package 466 * 467 * First, set of the various offsets/lengths in the exec package. 468 * 469 * Then, mark the text image busy (so it can be demand paged) or error out if 470 * this is not possible. Finally, set up vmcmds for the text, data, bss, and 471 * stack segments. 472 */ 473 int 474 exec_elf_makecmds(struct proc *p, struct exec_package *epp) 475 { 476 Elf_Ehdr *eh = epp->ep_hdr; 477 Elf_Phdr *ph, *pp, *base_ph = NULL; 478 Elf_Addr phdr = 0, exe_base = 0; 479 int error, i, has_phdr = 0, names = 0; 480 char *interp = NULL; 481 u_long phsize; 482 size_t randomizequota = ELF_RANDOMIZE_LIMIT; 483 484 if (epp->ep_hdrvalid < sizeof(Elf_Ehdr)) 485 return (ENOEXEC); 486 487 if (elf_check_header(eh) || 488 (eh->e_type != ET_EXEC && eh->e_type != ET_DYN)) 489 return (ENOEXEC); 490 491 /* 492 * check if vnode is in open for writing, because we want to demand- 493 * page out of it. if it is, don't do it, for various reasons. 494 */ 495 if (epp->ep_vp->v_writecount != 0) { 496 #ifdef DIAGNOSTIC 497 if (epp->ep_vp->v_flag & VTEXT) 498 panic("exec: a VTEXT vnode has writecount != 0"); 499 #endif 500 return (ETXTBSY); 501 } 502 /* 503 * Allocate space to hold all the program headers, and read them 504 * from the file 505 */ 506 ph = mallocarray(eh->e_phnum, sizeof(Elf_Phdr), M_TEMP, M_WAITOK); 507 phsize = eh->e_phnum * sizeof(Elf_Phdr); 508 509 if ((error = elf_read_from(p, epp->ep_vp, eh->e_phoff, ph, 510 phsize)) != 0) 511 goto bad; 512 513 epp->ep_tsize = ELF_NO_ADDR; 514 epp->ep_dsize = ELF_NO_ADDR; 515 516 for (i = 0, pp = ph; i < eh->e_phnum; i++, pp++) { 517 if (pp->p_type == PT_INTERP && !interp) { 518 if (pp->p_filesz < 2 || pp->p_filesz > MAXPATHLEN) 519 goto bad; 520 interp = pool_get(&namei_pool, PR_WAITOK); 521 if ((error = elf_read_from(p, epp->ep_vp, 522 pp->p_offset, interp, pp->p_filesz)) != 0) { 523 goto bad; 524 } 525 if (interp[pp->p_filesz - 1] != '\0') 526 goto bad; 527 } else if (pp->p_type == PT_LOAD) { 528 if (pp->p_filesz > pp->p_memsz || 529 pp->p_memsz == 0) { 530 error = EINVAL; 531 goto bad; 532 } 533 if (base_ph == NULL) 534 base_ph = pp; 535 } else if (pp->p_type == PT_PHDR) { 536 has_phdr = 1; 537 } 538 } 539 540 if (eh->e_type == ET_DYN) { 541 /* need phdr and load sections for PIE */ 542 if (!has_phdr || base_ph == NULL) { 543 error = EINVAL; 544 goto bad; 545 } 546 /* randomize exe_base for PIE */ 547 exe_base = uvm_map_pie(base_ph->p_align); 548 } 549 550 /* 551 * Verify this is an OpenBSD executable. If it's marked that way 552 * via a PT_NOTE then also check for a PT_OPENBSD_WXNEEDED segment. 553 */ 554 if ((error = elf_os_pt_note(p, epp, epp->ep_hdr, &names)) != 0) 555 goto bad; 556 if (eh->e_ident[EI_OSABI] == ELFOSABI_OPENBSD) 557 names |= ELF_NOTE_NAME_OPENBSD; 558 559 /* 560 * Load all the necessary sections 561 */ 562 for (i = 0, pp = ph; i < eh->e_phnum; i++, pp++) { 563 Elf_Addr addr, size = 0; 564 int prot = 0; 565 int flags = 0; 566 567 switch (pp->p_type) { 568 case PT_LOAD: 569 if (exe_base != 0) { 570 if (pp == base_ph) { 571 flags = VMCMD_BASE; 572 addr = exe_base; 573 } else { 574 flags = VMCMD_RELATIVE; 575 addr = pp->p_vaddr - base_ph->p_vaddr; 576 } 577 } else 578 addr = ELF_NO_ADDR; 579 580 /* Permit system calls in specific main-programs */ 581 if (interp == NULL) { 582 /* statics. Also block the ld.so syscall-grant */ 583 flags |= VMCMD_SYSCALL; 584 p->p_vmspace->vm_map.flags |= VM_MAP_SYSCALL_ONCE; 585 } 586 587 /* 588 * Calculates size of text and data segments 589 * by starting at first and going to end of last. 590 * 'rwx' sections are treated as data. 591 * this is correct for BSS_PLT, but may not be 592 * for DATA_PLT, is fine for TEXT_PLT. 593 */ 594 elf_load_psection(&epp->ep_vmcmds, epp->ep_vp, 595 pp, &addr, &size, &prot, flags); 596 597 /* 598 * Update exe_base in case alignment was off. 599 * For PIE, addr is relative to exe_base so 600 * adjust it (non PIE exe_base is 0 so no change). 601 */ 602 if (flags == VMCMD_BASE) 603 exe_base = addr; 604 else 605 addr += exe_base; 606 607 /* 608 * Decide whether it's text or data by looking 609 * at the protection of the section 610 */ 611 if (prot & PROT_WRITE) { 612 /* data section */ 613 if (epp->ep_dsize == ELF_NO_ADDR) { 614 epp->ep_daddr = addr; 615 epp->ep_dsize = size; 616 } else { 617 if (addr < epp->ep_daddr) { 618 epp->ep_dsize = 619 epp->ep_dsize + 620 epp->ep_daddr - 621 addr; 622 epp->ep_daddr = addr; 623 } else 624 epp->ep_dsize = addr+size - 625 epp->ep_daddr; 626 } 627 } else if (prot & PROT_EXEC) { 628 /* text section */ 629 if (epp->ep_tsize == ELF_NO_ADDR) { 630 epp->ep_taddr = addr; 631 epp->ep_tsize = size; 632 } else { 633 if (addr < epp->ep_taddr) { 634 epp->ep_tsize = 635 epp->ep_tsize + 636 epp->ep_taddr - 637 addr; 638 epp->ep_taddr = addr; 639 } else 640 epp->ep_tsize = addr+size - 641 epp->ep_taddr; 642 } 643 } 644 break; 645 646 case PT_SHLIB: 647 error = ENOEXEC; 648 goto bad; 649 650 case PT_INTERP: 651 /* Already did this one */ 652 case PT_DYNAMIC: 653 case PT_NOTE: 654 break; 655 656 case PT_PHDR: 657 /* Note address of program headers (in text segment) */ 658 phdr = pp->p_vaddr; 659 break; 660 661 case PT_OPENBSD_RANDOMIZE: 662 if (ph[i].p_memsz > randomizequota) { 663 error = ENOMEM; 664 goto bad; 665 } 666 randomizequota -= ph[i].p_memsz; 667 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_randomize, 668 ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0); 669 break; 670 671 case PT_GNU_RELRO: 672 case PT_OPENBSD_MUTABLE: 673 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable, 674 ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0); 675 break; 676 677 default: 678 /* 679 * Not fatal, we don't need to understand everything 680 * :-) 681 */ 682 break; 683 } 684 } 685 686 phdr += exe_base; 687 688 /* 689 * Strangely some linux programs may have all load sections marked 690 * writeable, in this case, textsize is not -1, but rather 0; 691 */ 692 if (epp->ep_tsize == ELF_NO_ADDR) 693 epp->ep_tsize = 0; 694 /* 695 * Another possibility is that it has all load sections marked 696 * read-only. Fake a zero-sized data segment right after the 697 * text segment. 698 */ 699 if (epp->ep_dsize == ELF_NO_ADDR) { 700 epp->ep_daddr = round_page(epp->ep_taddr + epp->ep_tsize); 701 epp->ep_dsize = 0; 702 } 703 704 epp->ep_interp = interp; 705 epp->ep_entry = eh->e_entry + exe_base; 706 707 /* 708 * Check if we found a dynamically linked binary and arrange to load 709 * its interpreter when the exec file is released. 710 */ 711 if (interp || eh->e_type == ET_DYN) { 712 struct elf_args *ap; 713 714 ap = malloc(sizeof(*ap), M_TEMP, M_WAITOK); 715 716 ap->arg_phaddr = phdr; 717 ap->arg_phentsize = eh->e_phentsize; 718 ap->arg_phnum = eh->e_phnum; 719 ap->arg_entry = eh->e_entry + exe_base; 720 ap->arg_interp = exe_base; 721 722 epp->ep_args = ap; 723 } 724 725 free(ph, M_TEMP, phsize); 726 vn_marktext(epp->ep_vp); 727 return (exec_setup_stack(p, epp)); 728 729 bad: 730 if (interp) 731 pool_put(&namei_pool, interp); 732 free(ph, M_TEMP, phsize); 733 kill_vmcmds(&epp->ep_vmcmds); 734 if (error == 0) 735 return (ENOEXEC); 736 return (error); 737 } 738 739 /* 740 * Phase II of load. It is now safe to load the interpreter. Info collected 741 * when loading the program is available for setup of the interpreter. 742 */ 743 int 744 exec_elf_fixup(struct proc *p, struct exec_package *epp) 745 { 746 char *interp; 747 int error = 0; 748 struct elf_args *ap; 749 AuxInfo ai[ELF_AUX_ENTRIES], *a; 750 751 ap = epp->ep_args; 752 if (ap == NULL) { 753 return (0); 754 } 755 756 interp = epp->ep_interp; 757 758 if (interp && 759 (error = elf_load_file(p, interp, epp, ap)) != 0) { 760 uprintf("execve: cannot load %s\n", interp); 761 free(ap, M_TEMP, sizeof *ap); 762 pool_put(&namei_pool, interp); 763 kill_vmcmds(&epp->ep_vmcmds); 764 return (error); 765 } 766 /* 767 * We have to do this ourselves... 768 */ 769 error = exec_process_vmcmds(p, epp); 770 771 /* 772 * Push extra arguments on the stack needed by dynamically 773 * linked binaries 774 */ 775 if (error == 0) { 776 memset(&ai, 0, sizeof ai); 777 a = ai; 778 779 a->au_id = AUX_phdr; 780 a->au_v = ap->arg_phaddr; 781 a++; 782 783 a->au_id = AUX_phent; 784 a->au_v = ap->arg_phentsize; 785 a++; 786 787 a->au_id = AUX_phnum; 788 a->au_v = ap->arg_phnum; 789 a++; 790 791 a->au_id = AUX_pagesz; 792 a->au_v = PAGE_SIZE; 793 a++; 794 795 a->au_id = AUX_base; 796 a->au_v = ap->arg_interp; 797 a++; 798 799 a->au_id = AUX_flags; 800 a->au_v = 0; 801 a++; 802 803 a->au_id = AUX_entry; 804 a->au_v = ap->arg_entry; 805 a++; 806 807 a->au_id = AUX_openbsd_timekeep; 808 a->au_v = p->p_p->ps_timekeep; 809 a++; 810 811 a->au_id = AUX_null; 812 a->au_v = 0; 813 a++; 814 815 error = copyout(ai, epp->ep_auxinfo, sizeof ai); 816 } 817 free(ap, M_TEMP, sizeof *ap); 818 if (interp) 819 pool_put(&namei_pool, interp); 820 return (error); 821 } 822 823 int 824 elf_os_pt_note_name(Elf_Note *np) 825 { 826 int i, j; 827 828 for (i = 0; i < nitems(elf_note_names); i++) { 829 size_t namlen = strlen(elf_note_names[i].name); 830 if (np->namesz < namlen) 831 continue; 832 /* verify name padding (after the NUL) is NUL */ 833 for (j = namlen + 1; j < elfround(np->namesz); j++) 834 if (((char *)(np + 1))[j] != '\0') 835 continue; 836 /* verify desc padding is NUL */ 837 for (j = np->descsz; j < elfround(np->descsz); j++) 838 if (((char *)(np + 1))[j] != '\0') 839 continue; 840 if (strcmp((char *)(np + 1), elf_note_names[i].name) == 0) 841 return elf_note_names[i].id; 842 } 843 return (0); 844 } 845 846 int 847 elf_os_pt_note(struct proc *p, struct exec_package *epp, Elf_Ehdr *eh, int *namesp) 848 { 849 Elf_Phdr *hph, *ph; 850 Elf_Note *np = NULL; 851 size_t phsize, offset, pfilesz = 0, total; 852 int error, names = 0; 853 854 hph = mallocarray(eh->e_phnum, sizeof(Elf_Phdr), M_TEMP, M_WAITOK); 855 phsize = eh->e_phnum * sizeof(Elf_Phdr); 856 if ((error = elf_read_from(p, epp->ep_vp, eh->e_phoff, 857 hph, phsize)) != 0) 858 goto out1; 859 860 for (ph = hph; ph < &hph[eh->e_phnum]; ph++) { 861 if (ph->p_type == PT_OPENBSD_WXNEEDED) { 862 epp->ep_flags |= EXEC_WXNEEDED; 863 continue; 864 } 865 866 if (ph->p_type != PT_NOTE || ph->p_filesz > 1024) 867 continue; 868 869 if (np && ph->p_filesz != pfilesz) { 870 free(np, M_TEMP, pfilesz); 871 np = NULL; 872 } 873 if (!np) 874 np = malloc(ph->p_filesz, M_TEMP, M_WAITOK); 875 pfilesz = ph->p_filesz; 876 if ((error = elf_read_from(p, epp->ep_vp, ph->p_offset, 877 np, ph->p_filesz)) != 0) 878 goto out2; 879 880 for (offset = 0; offset < ph->p_filesz; offset += total) { 881 Elf_Note *np2 = (Elf_Note *)((char *)np + offset); 882 883 if (offset + sizeof(Elf_Note) > ph->p_filesz) 884 break; 885 total = sizeof(Elf_Note) + elfround(np2->namesz) + 886 elfround(np2->descsz); 887 if (offset + total > ph->p_filesz) 888 break; 889 names |= elf_os_pt_note_name(np2); 890 } 891 } 892 893 out2: 894 free(np, M_TEMP, pfilesz); 895 out1: 896 free(hph, M_TEMP, phsize); 897 *namesp = names; 898 return ((names & ELF_NOTE_NAME_OPENBSD) ? 0 : ENOEXEC); 899 } 900 901 /* 902 * Start of routines related to dumping core 903 */ 904 905 #ifdef SMALL_KERNEL 906 int 907 coredump_elf(struct proc *p, void *cookie) 908 { 909 return EPERM; 910 } 911 #else /* !SMALL_KERNEL */ 912 913 struct writesegs_state { 914 off_t notestart; 915 off_t secstart; 916 off_t secoff; 917 struct proc *p; 918 void *iocookie; 919 Elf_Phdr *psections; 920 size_t psectionslen; 921 size_t notesize; 922 int npsections; 923 }; 924 925 uvm_coredump_setup_cb coredump_setup_elf; 926 uvm_coredump_walk_cb coredump_walk_elf; 927 928 int coredump_notes_elf(struct proc *, void *, size_t *); 929 int coredump_note_elf(struct proc *, void *, size_t *); 930 int coredump_writenote_elf(struct proc *, void *, Elf_Note *, 931 const char *, void *); 932 933 int 934 coredump_elf(struct proc *p, void *cookie) 935 { 936 #ifdef DIAGNOSTIC 937 off_t offset; 938 #endif 939 struct writesegs_state ws; 940 size_t notesize; 941 int error, i; 942 943 ws.p = p; 944 ws.iocookie = cookie; 945 ws.psections = NULL; 946 947 /* 948 * Walk the map to get all the segment offsets and lengths, 949 * write out the ELF header. 950 */ 951 error = uvm_coredump_walkmap(p, coredump_setup_elf, 952 coredump_walk_elf, &ws); 953 if (error) 954 goto out; 955 956 error = coredump_write(cookie, UIO_SYSSPACE, ws.psections, 957 ws.psectionslen); 958 if (error) 959 goto out; 960 961 /* Write out the notes. */ 962 error = coredump_notes_elf(p, cookie, ¬esize); 963 if (error) 964 goto out; 965 966 #ifdef DIAGNOSTIC 967 if (notesize != ws.notesize) 968 panic("coredump: notesize changed: %zu != %zu", 969 ws.notesize, notesize); 970 offset = ws.notestart + notesize; 971 if (offset != ws.secstart) 972 panic("coredump: offset %lld != secstart %lld", 973 (long long) offset, (long long) ws.secstart); 974 #endif 975 976 /* Pass 3: finally, write the sections themselves. */ 977 for (i = 0; i < ws.npsections - 1; i++) { 978 Elf_Phdr *pent = &ws.psections[i]; 979 if (pent->p_filesz == 0) 980 continue; 981 982 #ifdef DIAGNOSTIC 983 if (offset != pent->p_offset) 984 panic("coredump: offset %lld != p_offset[%d] %lld", 985 (long long) offset, i, 986 (long long) pent->p_filesz); 987 #endif 988 989 error = coredump_write(cookie, UIO_USERSPACE, 990 (void *)(vaddr_t)pent->p_vaddr, pent->p_filesz); 991 if (error) 992 goto out; 993 994 coredump_unmap(cookie, (vaddr_t)pent->p_vaddr, 995 (vaddr_t)pent->p_vaddr + pent->p_filesz); 996 997 #ifdef DIAGNOSTIC 998 offset += ws.psections[i].p_filesz; 999 #endif 1000 } 1001 1002 out: 1003 free(ws.psections, M_TEMP, ws.psectionslen); 1004 return (error); 1005 } 1006 1007 1008 /* 1009 * Normally we lay out core files like this: 1010 * [ELF Header] [Program headers] [Notes] [data for PT_LOAD segments] 1011 * 1012 * However, if there's >= 65535 segments then it overflows the field 1013 * in the ELF header, so the standard specifies putting a magic 1014 * number there and saving the real count in the .sh_info field of 1015 * the first *section* header...which requires generating a section 1016 * header. To avoid confusing tools, we include an .shstrtab section 1017 * as well so all the indexes look valid. So in this case we lay 1018 * out the core file like this: 1019 * [ELF Header] [Section Headers] [.shstrtab] [Program headers] \ 1020 * [Notes] [data for PT_LOAD segments] 1021 * 1022 * The 'shstrtab' structure below is data for the second of the two 1023 * section headers, plus the .shstrtab itself, in one const buffer. 1024 */ 1025 static const struct { 1026 Elf_Shdr shdr; 1027 char shstrtab[sizeof(ELF_SHSTRTAB) + 1]; 1028 } shstrtab = { 1029 .shdr = { 1030 .sh_name = 1, /* offset in .shstrtab below */ 1031 .sh_type = SHT_STRTAB, 1032 .sh_offset = sizeof(Elf_Ehdr) + 2*sizeof(Elf_Shdr), 1033 .sh_size = sizeof(ELF_SHSTRTAB) + 1, 1034 .sh_addralign = 1, 1035 }, 1036 .shstrtab = "\0" ELF_SHSTRTAB, 1037 }; 1038 1039 int 1040 coredump_setup_elf(int segment_count, void *cookie) 1041 { 1042 Elf_Ehdr ehdr; 1043 struct writesegs_state *ws = cookie; 1044 Elf_Phdr *note; 1045 int error; 1046 1047 /* Get the count of segments, plus one for the PT_NOTE */ 1048 ws->npsections = segment_count + 1; 1049 1050 /* Get the size of the notes. */ 1051 error = coredump_notes_elf(ws->p, NULL, &ws->notesize); 1052 if (error) 1053 return error; 1054 1055 /* Setup the ELF header */ 1056 memset(&ehdr, 0, sizeof(ehdr)); 1057 memcpy(ehdr.e_ident, ELFMAG, SELFMAG); 1058 ehdr.e_ident[EI_CLASS] = ELF_TARG_CLASS; 1059 ehdr.e_ident[EI_DATA] = ELF_TARG_DATA; 1060 ehdr.e_ident[EI_VERSION] = EV_CURRENT; 1061 /* XXX Should be the OSABI/ABI version of the executable. */ 1062 ehdr.e_ident[EI_OSABI] = ELFOSABI_SYSV; 1063 ehdr.e_ident[EI_ABIVERSION] = 0; 1064 ehdr.e_type = ET_CORE; 1065 /* XXX This should be the e_machine of the executable. */ 1066 ehdr.e_machine = ELF_TARG_MACH; 1067 ehdr.e_version = EV_CURRENT; 1068 ehdr.e_entry = 0; 1069 ehdr.e_flags = 0; 1070 ehdr.e_ehsize = sizeof(ehdr); 1071 ehdr.e_phentsize = sizeof(Elf_Phdr); 1072 1073 if (ws->npsections < PN_XNUM) { 1074 ehdr.e_phoff = sizeof(ehdr); 1075 ehdr.e_shoff = 0; 1076 ehdr.e_phnum = ws->npsections; 1077 ehdr.e_shentsize = 0; 1078 ehdr.e_shnum = 0; 1079 ehdr.e_shstrndx = 0; 1080 } else { 1081 /* too many segments, use extension setup */ 1082 ehdr.e_shoff = sizeof(ehdr); 1083 ehdr.e_phnum = PN_XNUM; 1084 ehdr.e_shentsize = sizeof(Elf_Shdr); 1085 ehdr.e_shnum = 2; 1086 ehdr.e_shstrndx = 1; 1087 ehdr.e_phoff = shstrtab.shdr.sh_offset + shstrtab.shdr.sh_size; 1088 } 1089 1090 /* Write out the ELF header. */ 1091 error = coredump_write(ws->iocookie, UIO_SYSSPACE, &ehdr, sizeof(ehdr)); 1092 if (error) 1093 return error; 1094 1095 /* 1096 * If an section header is needed to store extension info, write 1097 * it out after the ELF header and before the program header. 1098 */ 1099 if (ehdr.e_shnum != 0) { 1100 Elf_Shdr shdr = { .sh_info = ws->npsections }; 1101 error = coredump_write(ws->iocookie, UIO_SYSSPACE, &shdr, 1102 sizeof shdr); 1103 if (error) 1104 return error; 1105 error = coredump_write(ws->iocookie, UIO_SYSSPACE, &shstrtab, 1106 sizeof(shstrtab.shdr) + sizeof(shstrtab.shstrtab)); 1107 if (error) 1108 return error; 1109 } 1110 1111 /* 1112 * Allocate the segment header array and setup to collect 1113 * the section sizes and offsets 1114 */ 1115 ws->psections = mallocarray(ws->npsections, sizeof(Elf_Phdr), 1116 M_TEMP, M_WAITOK|M_CANFAIL|M_ZERO); 1117 if (ws->psections == NULL) 1118 return ENOMEM; 1119 ws->psectionslen = ws->npsections * sizeof(Elf_Phdr); 1120 1121 ws->notestart = ehdr.e_phoff + ws->psectionslen; 1122 ws->secstart = ws->notestart + ws->notesize; 1123 ws->secoff = ws->secstart; 1124 1125 /* Fill in the PT_NOTE segment header in the last slot */ 1126 note = &ws->psections[ws->npsections - 1]; 1127 note->p_type = PT_NOTE; 1128 note->p_offset = ws->notestart; 1129 note->p_vaddr = 0; 1130 note->p_paddr = 0; 1131 note->p_filesz = ws->notesize; 1132 note->p_memsz = 0; 1133 note->p_flags = PF_R; 1134 note->p_align = ELFROUNDSIZE; 1135 1136 return (0); 1137 } 1138 1139 int 1140 coredump_walk_elf(vaddr_t start, vaddr_t realend, vaddr_t end, vm_prot_t prot, 1141 int nsegment, void *cookie) 1142 { 1143 struct writesegs_state *ws = cookie; 1144 Elf_Phdr phdr; 1145 vsize_t size, realsize; 1146 1147 size = end - start; 1148 realsize = realend - start; 1149 1150 phdr.p_type = PT_LOAD; 1151 phdr.p_offset = ws->secoff; 1152 phdr.p_vaddr = start; 1153 phdr.p_paddr = 0; 1154 phdr.p_filesz = realsize; 1155 phdr.p_memsz = size; 1156 phdr.p_flags = 0; 1157 if (prot & PROT_READ) 1158 phdr.p_flags |= PF_R; 1159 if (prot & PROT_WRITE) 1160 phdr.p_flags |= PF_W; 1161 if (prot & PROT_EXEC) 1162 phdr.p_flags |= PF_X; 1163 phdr.p_align = PAGE_SIZE; 1164 1165 ws->secoff += phdr.p_filesz; 1166 ws->psections[nsegment] = phdr; 1167 1168 return (0); 1169 } 1170 1171 int 1172 coredump_notes_elf(struct proc *p, void *iocookie, size_t *sizep) 1173 { 1174 struct ps_strings pss; 1175 struct iovec iov; 1176 struct uio uio; 1177 struct elfcore_procinfo cpi; 1178 Elf_Note nhdr; 1179 struct process *pr = p->p_p; 1180 struct proc *q; 1181 size_t size, notesize; 1182 int error; 1183 1184 KASSERT(!P_HASSIBLING(p) || pr->ps_single != NULL); 1185 size = 0; 1186 1187 /* First, write an elfcore_procinfo. */ 1188 notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD")) + 1189 elfround(sizeof(cpi)); 1190 if (iocookie) { 1191 memset(&cpi, 0, sizeof(cpi)); 1192 1193 cpi.cpi_version = ELFCORE_PROCINFO_VERSION; 1194 cpi.cpi_cpisize = sizeof(cpi); 1195 cpi.cpi_signo = p->p_sisig; 1196 cpi.cpi_sigcode = p->p_sicode; 1197 1198 cpi.cpi_sigpend = p->p_siglist | pr->ps_siglist; 1199 cpi.cpi_sigmask = p->p_sigmask; 1200 cpi.cpi_sigignore = pr->ps_sigacts->ps_sigignore; 1201 cpi.cpi_sigcatch = pr->ps_sigacts->ps_sigcatch; 1202 1203 cpi.cpi_pid = pr->ps_pid; 1204 cpi.cpi_ppid = pr->ps_ppid; 1205 cpi.cpi_pgrp = pr->ps_pgid; 1206 if (pr->ps_session->s_leader) 1207 cpi.cpi_sid = pr->ps_session->s_leader->ps_pid; 1208 else 1209 cpi.cpi_sid = 0; 1210 1211 cpi.cpi_ruid = p->p_ucred->cr_ruid; 1212 cpi.cpi_euid = p->p_ucred->cr_uid; 1213 cpi.cpi_svuid = p->p_ucred->cr_svuid; 1214 1215 cpi.cpi_rgid = p->p_ucred->cr_rgid; 1216 cpi.cpi_egid = p->p_ucred->cr_gid; 1217 cpi.cpi_svgid = p->p_ucred->cr_svgid; 1218 1219 (void)strlcpy(cpi.cpi_name, pr->ps_comm, sizeof(cpi.cpi_name)); 1220 1221 nhdr.namesz = sizeof("OpenBSD"); 1222 nhdr.descsz = sizeof(cpi); 1223 nhdr.type = NT_OPENBSD_PROCINFO; 1224 1225 error = coredump_writenote_elf(p, iocookie, &nhdr, 1226 "OpenBSD", &cpi); 1227 if (error) 1228 return (error); 1229 } 1230 size += notesize; 1231 1232 /* Second, write an NT_OPENBSD_AUXV note. */ 1233 notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD")) + 1234 elfround(ELF_AUX_WORDS * sizeof(char *)); 1235 if (iocookie) { 1236 iov.iov_base = &pss; 1237 iov.iov_len = sizeof(pss); 1238 uio.uio_iov = &iov; 1239 uio.uio_iovcnt = 1; 1240 uio.uio_offset = (off_t)pr->ps_strings; 1241 uio.uio_resid = sizeof(pss); 1242 uio.uio_segflg = UIO_SYSSPACE; 1243 uio.uio_rw = UIO_READ; 1244 uio.uio_procp = NULL; 1245 1246 error = uvm_io(&p->p_vmspace->vm_map, &uio, 0); 1247 if (error) 1248 return (error); 1249 1250 if (pss.ps_envstr == NULL) 1251 return (EIO); 1252 1253 nhdr.namesz = sizeof("OpenBSD"); 1254 nhdr.descsz = ELF_AUX_WORDS * sizeof(char *); 1255 nhdr.type = NT_OPENBSD_AUXV; 1256 1257 error = coredump_write(iocookie, UIO_SYSSPACE, 1258 &nhdr, sizeof(nhdr)); 1259 if (error) 1260 return (error); 1261 1262 error = coredump_write(iocookie, UIO_SYSSPACE, 1263 "OpenBSD", elfround(nhdr.namesz)); 1264 if (error) 1265 return (error); 1266 1267 error = coredump_write(iocookie, UIO_USERSPACE, 1268 pss.ps_envstr + pss.ps_nenvstr + 1, nhdr.descsz); 1269 if (error) 1270 return (error); 1271 } 1272 size += notesize; 1273 1274 #ifdef PT_WCOOKIE 1275 notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD")) + 1276 elfround(sizeof(register_t)); 1277 if (iocookie) { 1278 register_t wcookie; 1279 1280 nhdr.namesz = sizeof("OpenBSD"); 1281 nhdr.descsz = sizeof(register_t); 1282 nhdr.type = NT_OPENBSD_WCOOKIE; 1283 1284 wcookie = process_get_wcookie(p); 1285 error = coredump_writenote_elf(p, iocookie, &nhdr, 1286 "OpenBSD", &wcookie); 1287 if (error) 1288 return (error); 1289 } 1290 size += notesize; 1291 #endif 1292 1293 /* 1294 * Now write the register info for the thread that caused the 1295 * coredump. 1296 */ 1297 error = coredump_note_elf(p, iocookie, ¬esize); 1298 if (error) 1299 return (error); 1300 size += notesize; 1301 1302 /* 1303 * Now, for each thread, write the register info and any other 1304 * per-thread notes. Since we're dumping core, all the other 1305 * threads in the process have been stopped and the list can't 1306 * change. 1307 */ 1308 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 1309 if (q == p) /* we've taken care of this thread */ 1310 continue; 1311 error = coredump_note_elf(q, iocookie, ¬esize); 1312 if (error) 1313 return (error); 1314 size += notesize; 1315 } 1316 1317 *sizep = size; 1318 return (0); 1319 } 1320 1321 int 1322 coredump_note_elf(struct proc *p, void *iocookie, size_t *sizep) 1323 { 1324 Elf_Note nhdr; 1325 int size, notesize, error; 1326 int namesize; 1327 char name[64+ELFROUNDSIZE]; 1328 struct reg intreg; 1329 #ifdef PT_GETFPREGS 1330 struct fpreg freg; 1331 #endif 1332 1333 size = 0; 1334 1335 snprintf(name, sizeof(name)-ELFROUNDSIZE, "%s@%d", 1336 "OpenBSD", p->p_tid + THREAD_PID_OFFSET); 1337 namesize = strlen(name) + 1; 1338 memset(name + namesize, 0, elfround(namesize) - namesize); 1339 1340 notesize = sizeof(nhdr) + elfround(namesize) + elfround(sizeof(intreg)); 1341 if (iocookie) { 1342 error = process_read_regs(p, &intreg); 1343 if (error) 1344 return (error); 1345 1346 nhdr.namesz = namesize; 1347 nhdr.descsz = sizeof(intreg); 1348 nhdr.type = NT_OPENBSD_REGS; 1349 1350 error = coredump_writenote_elf(p, iocookie, &nhdr, 1351 name, &intreg); 1352 if (error) 1353 return (error); 1354 1355 } 1356 size += notesize; 1357 1358 #ifdef PT_GETFPREGS 1359 notesize = sizeof(nhdr) + elfround(namesize) + elfround(sizeof(freg)); 1360 if (iocookie) { 1361 error = process_read_fpregs(p, &freg); 1362 if (error) 1363 return (error); 1364 1365 nhdr.namesz = namesize; 1366 nhdr.descsz = sizeof(freg); 1367 nhdr.type = NT_OPENBSD_FPREGS; 1368 1369 error = coredump_writenote_elf(p, iocookie, &nhdr, name, &freg); 1370 if (error) 1371 return (error); 1372 } 1373 size += notesize; 1374 #endif 1375 1376 *sizep = size; 1377 /* XXX Add hook for machdep per-LWP notes. */ 1378 return (0); 1379 } 1380 1381 int 1382 coredump_writenote_elf(struct proc *p, void *cookie, Elf_Note *nhdr, 1383 const char *name, void *data) 1384 { 1385 int error; 1386 1387 error = coredump_write(cookie, UIO_SYSSPACE, nhdr, sizeof(*nhdr)); 1388 if (error) 1389 return error; 1390 1391 error = coredump_write(cookie, UIO_SYSSPACE, name, 1392 elfround(nhdr->namesz)); 1393 if (error) 1394 return error; 1395 1396 return coredump_write(cookie, UIO_SYSSPACE, data, nhdr->descsz); 1397 } 1398 #endif /* !SMALL_KERNEL */ 1399