1 /* $OpenBSD: exec_elf.c,v 1.174 2022/11/05 10:31:16 deraadt Exp $ */ 2 3 /* 4 * Copyright (c) 1996 Per Fogelstrom 5 * All rights reserved. 6 * 7 * Copyright (c) 1994 Christos Zoulas 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 */ 33 34 /* 35 * Copyright (c) 2001 Wasabi Systems, Inc. 36 * All rights reserved. 37 * 38 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed for the NetBSD Project by 51 * Wasabi Systems, Inc. 52 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 53 * or promote products derived from this software without specific prior 54 * written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 58 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 59 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 60 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 61 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 62 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 63 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 64 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 65 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 * POSSIBILITY OF SUCH DAMAGE. 67 */ 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/proc.h> 72 #include <sys/malloc.h> 73 #include <sys/pool.h> 74 #include <sys/mount.h> 75 #include <sys/namei.h> 76 #include <sys/vnode.h> 77 #include <sys/core.h> 78 #include <sys/exec.h> 79 #include <sys/exec_elf.h> 80 #include <sys/fcntl.h> 81 #include <sys/ptrace.h> 82 #include <sys/signalvar.h> 83 #include <sys/pledge.h> 84 85 #include <sys/mman.h> 86 87 #include <uvm/uvm_extern.h> 88 89 #include <machine/reg.h> 90 #include <machine/exec.h> 91 92 int elf_load_file(struct proc *, char *, struct exec_package *, 93 struct elf_args *); 94 int elf_check_header(Elf_Ehdr *); 95 int elf_read_from(struct proc *, struct vnode *, u_long, void *, int); 96 void elf_load_psection(struct exec_vmcmd_set *, struct vnode *, 97 Elf_Phdr *, Elf_Addr *, Elf_Addr *, int *, int); 98 int elf_os_pt_note_name(Elf_Note *); 99 int elf_os_pt_note(struct proc *, struct exec_package *, Elf_Ehdr *, int *); 100 101 /* round up and down to page boundaries. */ 102 #define ELF_ROUND(a, b) (((a) + (b) - 1) & ~((b) - 1)) 103 #define ELF_TRUNC(a, b) ((a) & ~((b) - 1)) 104 105 /* 106 * We limit the number of program headers to 32, this should 107 * be a reasonable limit for ELF, the most we have seen so far is 12 108 */ 109 #define ELF_MAX_VALID_PHDR 32 110 111 #define ELF_NOTE_NAME_OPENBSD 0x01 112 113 struct elf_note_name { 114 char *name; 115 int id; 116 } elf_note_names[] = { 117 { "OpenBSD", ELF_NOTE_NAME_OPENBSD }, 118 }; 119 120 #define ELFROUNDSIZE sizeof(Elf_Word) 121 #define elfround(x) roundup((x), ELFROUNDSIZE) 122 123 124 /* 125 * Check header for validity; return 0 for ok, ENOEXEC if error 126 */ 127 int 128 elf_check_header(Elf_Ehdr *ehdr) 129 { 130 /* 131 * We need to check magic, class size, endianness, and version before 132 * we look at the rest of the Elf_Ehdr structure. These few elements 133 * are represented in a machine independent fashion. 134 */ 135 if (!IS_ELF(*ehdr) || 136 ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 137 ehdr->e_ident[EI_DATA] != ELF_TARG_DATA || 138 ehdr->e_ident[EI_VERSION] != ELF_TARG_VER) 139 return (ENOEXEC); 140 141 /* Now check the machine dependent header */ 142 if (ehdr->e_machine != ELF_TARG_MACH || 143 ehdr->e_version != ELF_TARG_VER) 144 return (ENOEXEC); 145 146 /* Don't allow an insane amount of sections. */ 147 if (ehdr->e_phnum > ELF_MAX_VALID_PHDR) 148 return (ENOEXEC); 149 150 return (0); 151 } 152 153 /* 154 * Load a psection at the appropriate address 155 */ 156 void 157 elf_load_psection(struct exec_vmcmd_set *vcset, struct vnode *vp, 158 Elf_Phdr *ph, Elf_Addr *addr, Elf_Addr *size, int *prot, int flags) 159 { 160 u_long msize, lsize, psize, rm, rf; 161 long diff, offset, bdiff; 162 Elf_Addr base; 163 164 /* 165 * If the user specified an address, then we load there. 166 */ 167 if (*addr != ELF_NO_ADDR) { 168 if (ph->p_align > 1) { 169 *addr = ELF_TRUNC(*addr, ph->p_align); 170 diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align); 171 /* page align vaddr */ 172 base = *addr + trunc_page(ph->p_vaddr) 173 - ELF_TRUNC(ph->p_vaddr, ph->p_align); 174 } else { 175 diff = 0; 176 base = *addr + trunc_page(ph->p_vaddr) - ph->p_vaddr; 177 } 178 } else { 179 *addr = ph->p_vaddr; 180 if (ph->p_align > 1) 181 *addr = ELF_TRUNC(*addr, ph->p_align); 182 base = trunc_page(ph->p_vaddr); 183 diff = ph->p_vaddr - *addr; 184 } 185 bdiff = ph->p_vaddr - trunc_page(ph->p_vaddr); 186 187 /* 188 * Enforce W^X and map W|X segments without X permission 189 * initially. The dynamic linker will make these read-only 190 * and add back X permission after relocation processing. 191 * Static executables with W|X segments will probably crash. 192 */ 193 *prot |= (ph->p_flags & PF_R) ? PROT_READ : 0; 194 *prot |= (ph->p_flags & PF_W) ? PROT_WRITE : 0; 195 if ((ph->p_flags & PF_W) == 0) 196 *prot |= (ph->p_flags & PF_X) ? PROT_EXEC : 0; 197 198 /* 199 * Apply immutability as much as possible, but not text/rodata 200 * segments of textrel binaries, or RELRO or PT_OPENBSD_MUTABLE 201 * sections, or LOADS marked PF_OPENBSD_MUTABLE, or LOADS which 202 * violate W^X. 203 * Userland (meaning crt0 or ld.so) will repair those regions. 204 */ 205 if ((ph->p_flags & (PF_X | PF_W)) != (PF_X | PF_W) && 206 ((ph->p_flags & PF_OPENBSD_MUTABLE) == 0)) 207 flags |= VMCMD_IMMUTABLE; 208 if ((flags & VMCMD_TEXTREL) && (ph->p_flags & PF_W) == 0) 209 flags &= ~VMCMD_IMMUTABLE; 210 211 msize = ph->p_memsz + diff; 212 offset = ph->p_offset - bdiff; 213 lsize = ph->p_filesz + bdiff; 214 psize = round_page(lsize); 215 216 /* 217 * Because the pagedvn pager can't handle zero fill of the last 218 * data page if it's not page aligned we map the last page readvn. 219 */ 220 if (ph->p_flags & PF_W) { 221 psize = trunc_page(lsize); 222 if (psize > 0) 223 NEW_VMCMD2(vcset, vmcmd_map_pagedvn, psize, base, vp, 224 offset, *prot, flags); 225 if (psize != lsize) { 226 NEW_VMCMD2(vcset, vmcmd_map_readvn, lsize - psize, 227 base + psize, vp, offset + psize, *prot, flags); 228 } 229 } else { 230 NEW_VMCMD2(vcset, vmcmd_map_pagedvn, psize, base, vp, offset, 231 *prot, flags); 232 } 233 234 /* 235 * Check if we need to extend the size of the segment 236 */ 237 rm = round_page(*addr + ph->p_memsz + diff); 238 rf = round_page(*addr + ph->p_filesz + diff); 239 240 if (rm != rf) { 241 NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP, 0, 242 *prot, flags); 243 } 244 *size = msize; 245 } 246 247 /* 248 * Read from vnode into buffer at offset. 249 */ 250 int 251 elf_read_from(struct proc *p, struct vnode *vp, u_long off, void *buf, 252 int size) 253 { 254 int error; 255 size_t resid; 256 257 if ((error = vn_rdwr(UIO_READ, vp, buf, size, off, UIO_SYSSPACE, 258 0, p->p_ucred, &resid, p)) != 0) 259 return error; 260 /* 261 * See if we got all of it 262 */ 263 if (resid != 0) 264 return (ENOEXEC); 265 return (0); 266 } 267 268 /* 269 * Load a file (interpreter/library) pointed to by path [stolen from 270 * coff_load_shlib()]. Made slightly generic so it might be used externally. 271 */ 272 int 273 elf_load_file(struct proc *p, char *path, struct exec_package *epp, 274 struct elf_args *ap) 275 { 276 int error, i; 277 struct nameidata nd; 278 Elf_Ehdr eh; 279 Elf_Phdr *ph = NULL; 280 u_long phsize = 0; 281 Elf_Addr addr; 282 struct vnode *vp; 283 Elf_Phdr *base_ph = NULL; 284 struct interp_ld_sec { 285 Elf_Addr vaddr; 286 u_long memsz; 287 } loadmap[ELF_MAX_VALID_PHDR]; 288 int nload, idx = 0; 289 Elf_Addr pos; 290 int file_align; 291 int loop; 292 size_t randomizequota = ELF_RANDOMIZE_LIMIT; 293 294 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p); 295 nd.ni_pledge = PLEDGE_RPATH; 296 nd.ni_unveil = UNVEIL_READ; 297 if ((error = namei(&nd)) != 0) { 298 return (error); 299 } 300 vp = nd.ni_vp; 301 if (vp->v_type != VREG) { 302 error = EACCES; 303 goto bad; 304 } 305 if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0) 306 goto bad; 307 if (vp->v_mount->mnt_flag & MNT_NOEXEC) { 308 error = EACCES; 309 goto bad; 310 } 311 if ((error = VOP_ACCESS(vp, VREAD, p->p_ucred, p)) != 0) 312 goto bad1; 313 if ((error = elf_read_from(p, nd.ni_vp, 0, &eh, sizeof(eh))) != 0) 314 goto bad1; 315 316 if (elf_check_header(&eh) || eh.e_type != ET_DYN) { 317 error = ENOEXEC; 318 goto bad1; 319 } 320 321 ph = mallocarray(eh.e_phnum, sizeof(Elf_Phdr), M_TEMP, M_WAITOK); 322 phsize = eh.e_phnum * sizeof(Elf_Phdr); 323 324 if ((error = elf_read_from(p, nd.ni_vp, eh.e_phoff, ph, phsize)) != 0) 325 goto bad1; 326 327 for (i = 0; i < eh.e_phnum; i++) { 328 if (ph[i].p_type == PT_LOAD) { 329 if (ph[i].p_filesz > ph[i].p_memsz || 330 ph[i].p_memsz == 0) { 331 error = EINVAL; 332 goto bad1; 333 } 334 loadmap[idx].vaddr = trunc_page(ph[i].p_vaddr); 335 loadmap[idx].memsz = round_page (ph[i].p_vaddr + 336 ph[i].p_memsz - loadmap[idx].vaddr); 337 file_align = ph[i].p_align; 338 idx++; 339 } 340 } 341 nload = idx; 342 343 /* 344 * Load the interpreter where a non-fixed mmap(NULL, ...) 345 * would (i.e. something safely out of the way). 346 */ 347 pos = uvm_map_hint(p->p_vmspace, PROT_EXEC, VM_MIN_ADDRESS, 348 VM_MAXUSER_ADDRESS); 349 pos = ELF_ROUND(pos, file_align); 350 351 loop = 0; 352 for (i = 0; i < nload;/**/) { 353 vaddr_t addr; 354 struct uvm_object *uobj; 355 off_t uoff; 356 size_t size; 357 358 #ifdef this_needs_fixing 359 if (i == 0) { 360 uobj = &vp->v_uvm.u_obj; 361 /* need to fix uoff */ 362 } else { 363 #endif 364 uobj = NULL; 365 uoff = 0; 366 #ifdef this_needs_fixing 367 } 368 #endif 369 370 addr = trunc_page(pos + loadmap[i].vaddr); 371 size = round_page(addr + loadmap[i].memsz) - addr; 372 373 /* CRAP - map_findspace does not avoid daddr+BRKSIZ */ 374 if ((addr + size > (vaddr_t)p->p_vmspace->vm_daddr) && 375 (addr < (vaddr_t)p->p_vmspace->vm_daddr + BRKSIZ)) 376 addr = round_page((vaddr_t)p->p_vmspace->vm_daddr + 377 BRKSIZ); 378 379 if (uvm_map_mquery(&p->p_vmspace->vm_map, &addr, size, 380 (i == 0 ? uoff : UVM_UNKNOWN_OFFSET), 0) != 0) { 381 if (loop == 0) { 382 loop = 1; 383 i = 0; 384 pos = 0; 385 continue; 386 } 387 error = ENOMEM; 388 goto bad1; 389 } 390 if (addr != pos + loadmap[i].vaddr) { 391 /* base changed. */ 392 pos = addr - trunc_page(loadmap[i].vaddr); 393 pos = ELF_ROUND(pos,file_align); 394 i = 0; 395 continue; 396 } 397 398 i++; 399 } 400 401 /* 402 * Load all the necessary sections 403 */ 404 for (i = 0; i < eh.e_phnum; i++) { 405 Elf_Addr size = 0; 406 int prot = 0; 407 int flags; 408 409 switch (ph[i].p_type) { 410 case PT_LOAD: 411 if (base_ph == NULL) { 412 flags = VMCMD_BASE; 413 addr = pos; 414 base_ph = &ph[i]; 415 } else { 416 flags = VMCMD_RELATIVE; 417 addr = ph[i].p_vaddr - base_ph->p_vaddr; 418 } 419 elf_load_psection(&epp->ep_vmcmds, nd.ni_vp, 420 &ph[i], &addr, &size, &prot, flags | VMCMD_SYSCALL); 421 /* If entry is within this section it must be text */ 422 if (eh.e_entry >= ph[i].p_vaddr && 423 eh.e_entry < (ph[i].p_vaddr + size)) { 424 epp->ep_entry = addr + eh.e_entry - 425 ELF_TRUNC(ph[i].p_vaddr,ph[i].p_align); 426 if (flags == VMCMD_RELATIVE) 427 epp->ep_entry += pos; 428 ap->arg_interp = pos; 429 } 430 addr += size; 431 break; 432 433 case PT_PHDR: 434 case PT_NOTE: 435 break; 436 437 case PT_OPENBSD_RANDOMIZE: 438 if (ph[i].p_memsz > randomizequota) { 439 error = ENOMEM; 440 goto bad1; 441 } 442 randomizequota -= ph[i].p_memsz; 443 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_randomize, 444 ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0); 445 break; 446 447 case PT_DYNAMIC: 448 #if defined (__mips__) 449 /* DT_DEBUG is not ready on mips */ 450 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable, 451 ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0); 452 #endif 453 break; 454 case PT_GNU_RELRO: 455 case PT_OPENBSD_MUTABLE: 456 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable, 457 ph[i].p_memsz, ph[i].p_vaddr + pos, NULLVP, 0, 0); 458 break; 459 460 default: 461 break; 462 } 463 } 464 465 vn_marktext(nd.ni_vp); 466 467 bad1: 468 VOP_CLOSE(nd.ni_vp, FREAD, p->p_ucred, p); 469 bad: 470 free(ph, M_TEMP, phsize); 471 472 vput(nd.ni_vp); 473 return (error); 474 } 475 476 /* 477 * Prepare an Elf binary's exec package 478 * 479 * First, set of the various offsets/lengths in the exec package. 480 * 481 * Then, mark the text image busy (so it can be demand paged) or error out if 482 * this is not possible. Finally, set up vmcmds for the text, data, bss, and 483 * stack segments. 484 */ 485 int 486 exec_elf_makecmds(struct proc *p, struct exec_package *epp) 487 { 488 Elf_Ehdr *eh = epp->ep_hdr; 489 Elf_Phdr *ph, *pp, *base_ph = NULL; 490 Elf_Addr phdr = 0, exe_base = 0; 491 int error, i, has_phdr = 0, names = 0, textrel = 0; 492 char *interp = NULL; 493 u_long phsize; 494 size_t randomizequota = ELF_RANDOMIZE_LIMIT; 495 496 if (epp->ep_hdrvalid < sizeof(Elf_Ehdr)) 497 return (ENOEXEC); 498 499 if (elf_check_header(eh) || 500 (eh->e_type != ET_EXEC && eh->e_type != ET_DYN)) 501 return (ENOEXEC); 502 503 /* 504 * check if vnode is in open for writing, because we want to demand- 505 * page out of it. if it is, don't do it, for various reasons. 506 */ 507 if (epp->ep_vp->v_writecount != 0) { 508 #ifdef DIAGNOSTIC 509 if (epp->ep_vp->v_flag & VTEXT) 510 panic("exec: a VTEXT vnode has writecount != 0"); 511 #endif 512 return (ETXTBSY); 513 } 514 /* 515 * Allocate space to hold all the program headers, and read them 516 * from the file 517 */ 518 ph = mallocarray(eh->e_phnum, sizeof(Elf_Phdr), M_TEMP, M_WAITOK); 519 phsize = eh->e_phnum * sizeof(Elf_Phdr); 520 521 if ((error = elf_read_from(p, epp->ep_vp, eh->e_phoff, ph, 522 phsize)) != 0) 523 goto bad; 524 525 epp->ep_tsize = ELF_NO_ADDR; 526 epp->ep_dsize = ELF_NO_ADDR; 527 528 for (i = 0, pp = ph; i < eh->e_phnum; i++, pp++) { 529 if (pp->p_type == PT_INTERP && !interp) { 530 if (pp->p_filesz < 2 || pp->p_filesz > MAXPATHLEN) 531 goto bad; 532 interp = pool_get(&namei_pool, PR_WAITOK); 533 if ((error = elf_read_from(p, epp->ep_vp, 534 pp->p_offset, interp, pp->p_filesz)) != 0) { 535 goto bad; 536 } 537 if (interp[pp->p_filesz - 1] != '\0') 538 goto bad; 539 } else if (pp->p_type == PT_LOAD) { 540 if (pp->p_filesz > pp->p_memsz || 541 pp->p_memsz == 0) { 542 error = EINVAL; 543 goto bad; 544 } 545 if (base_ph == NULL) 546 base_ph = pp; 547 } else if (pp->p_type == PT_PHDR) { 548 has_phdr = 1; 549 } 550 } 551 552 /* 553 * Verify this is an OpenBSD executable. If it's marked that way 554 * via a PT_NOTE then also check for a PT_OPENBSD_WXNEEDED segment. 555 */ 556 if ((error = elf_os_pt_note(p, epp, epp->ep_hdr, &names)) != 0) 557 goto bad; 558 if (eh->e_ident[EI_OSABI] == ELFOSABI_OPENBSD) 559 names |= ELF_NOTE_NAME_OPENBSD; 560 561 if (eh->e_type == ET_DYN) { 562 /* need phdr and load sections for PIE */ 563 if (!has_phdr || base_ph == NULL) { 564 error = EINVAL; 565 goto bad; 566 } 567 /* randomize exe_base for PIE */ 568 exe_base = uvm_map_pie(base_ph->p_align); 569 570 /* 571 * Check if DYNAMIC contains DT_TEXTREL 572 */ 573 for (i = 0, pp = ph; i < eh->e_phnum; i++, pp++) { 574 Elf32_Dyn *dt; 575 int j; 576 577 switch (pp->p_type) { 578 case PT_DYNAMIC: 579 if (pp->p_filesz > 64*1024) 580 break; 581 dt = malloc(pp->p_filesz, M_TEMP, M_WAITOK); 582 error = vn_rdwr(UIO_READ, epp->ep_vp, 583 (caddr_t)dt, pp->p_filesz, pp->p_offset, 584 UIO_SYSSPACE, IO_UNIT, p->p_ucred, NULL, p); 585 if (error) { 586 free(dt, M_TEMP, pp->p_filesz); 587 break; 588 } 589 for (j = 0; j * sizeof(*dt) < pp->p_filesz; j++) { 590 if (dt[j].d_tag == DT_TEXTREL) { 591 textrel = VMCMD_TEXTREL; 592 break; 593 } 594 } 595 free(dt, M_TEMP, pp->p_filesz); 596 break; 597 default: 598 break; 599 } 600 } 601 } 602 603 /* 604 * Load all the necessary sections 605 */ 606 for (i = 0, pp = ph; i < eh->e_phnum; i++, pp++) { 607 Elf_Addr addr, size = 0; 608 int prot = 0, syscall = 0; 609 int flags = 0; 610 611 switch (pp->p_type) { 612 case PT_LOAD: 613 if (exe_base != 0) { 614 if (pp == base_ph) { 615 flags = VMCMD_BASE; 616 addr = exe_base; 617 } else { 618 flags = VMCMD_RELATIVE; 619 addr = pp->p_vaddr - base_ph->p_vaddr; 620 } 621 } else 622 addr = ELF_NO_ADDR; 623 624 /* Permit system calls in specific main-programs */ 625 if (interp == NULL) { 626 /* statics. Also block the ld.so syscall-grant */ 627 syscall = VMCMD_SYSCALL; 628 p->p_vmspace->vm_map.flags |= VM_MAP_SYSCALL_ONCE; 629 } 630 631 /* 632 * Calculates size of text and data segments 633 * by starting at first and going to end of last. 634 * 'rwx' sections are treated as data. 635 * this is correct for BSS_PLT, but may not be 636 * for DATA_PLT, is fine for TEXT_PLT. 637 */ 638 elf_load_psection(&epp->ep_vmcmds, epp->ep_vp, 639 pp, &addr, &size, &prot, flags | textrel | syscall); 640 641 /* 642 * Update exe_base in case alignment was off. 643 * For PIE, addr is relative to exe_base so 644 * adjust it (non PIE exe_base is 0 so no change). 645 */ 646 if (flags == VMCMD_BASE) 647 exe_base = addr; 648 else 649 addr += exe_base; 650 651 /* 652 * Decide whether it's text or data by looking 653 * at the protection of the section 654 */ 655 if (prot & PROT_WRITE) { 656 /* data section */ 657 if (epp->ep_dsize == ELF_NO_ADDR) { 658 epp->ep_daddr = addr; 659 epp->ep_dsize = size; 660 } else { 661 if (addr < epp->ep_daddr) { 662 epp->ep_dsize = 663 epp->ep_dsize + 664 epp->ep_daddr - 665 addr; 666 epp->ep_daddr = addr; 667 } else 668 epp->ep_dsize = addr+size - 669 epp->ep_daddr; 670 } 671 } else if (prot & PROT_EXEC) { 672 /* text section */ 673 if (epp->ep_tsize == ELF_NO_ADDR) { 674 epp->ep_taddr = addr; 675 epp->ep_tsize = size; 676 } else { 677 if (addr < epp->ep_taddr) { 678 epp->ep_tsize = 679 epp->ep_tsize + 680 epp->ep_taddr - 681 addr; 682 epp->ep_taddr = addr; 683 } else 684 epp->ep_tsize = addr+size - 685 epp->ep_taddr; 686 } 687 } 688 break; 689 690 case PT_SHLIB: 691 error = ENOEXEC; 692 goto bad; 693 694 case PT_INTERP: 695 /* Already did this one */ 696 case PT_NOTE: 697 break; 698 699 case PT_PHDR: 700 /* Note address of program headers (in text segment) */ 701 phdr = pp->p_vaddr; 702 break; 703 704 case PT_OPENBSD_RANDOMIZE: 705 if (ph[i].p_memsz > randomizequota) { 706 error = ENOMEM; 707 goto bad; 708 } 709 randomizequota -= ph[i].p_memsz; 710 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_randomize, 711 ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0); 712 break; 713 714 case PT_DYNAMIC: 715 #if defined (__mips__) 716 /* DT_DEBUG is not ready on mips */ 717 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable, 718 ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0); 719 #endif 720 break; 721 case PT_GNU_RELRO: 722 case PT_OPENBSD_MUTABLE: 723 NEW_VMCMD(&epp->ep_vmcmds, vmcmd_mutable, 724 ph[i].p_memsz, ph[i].p_vaddr + exe_base, NULLVP, 0, 0); 725 break; 726 727 default: 728 /* 729 * Not fatal, we don't need to understand everything 730 * :-) 731 */ 732 break; 733 } 734 } 735 736 phdr += exe_base; 737 738 /* 739 * Strangely some linux programs may have all load sections marked 740 * writeable, in this case, textsize is not -1, but rather 0; 741 */ 742 if (epp->ep_tsize == ELF_NO_ADDR) 743 epp->ep_tsize = 0; 744 /* 745 * Another possibility is that it has all load sections marked 746 * read-only. Fake a zero-sized data segment right after the 747 * text segment. 748 */ 749 if (epp->ep_dsize == ELF_NO_ADDR) { 750 epp->ep_daddr = round_page(epp->ep_taddr + epp->ep_tsize); 751 epp->ep_dsize = 0; 752 } 753 754 epp->ep_interp = interp; 755 epp->ep_entry = eh->e_entry + exe_base; 756 757 /* 758 * Check if we found a dynamically linked binary and arrange to load 759 * its interpreter when the exec file is released. 760 */ 761 if (interp || eh->e_type == ET_DYN) { 762 struct elf_args *ap; 763 764 ap = malloc(sizeof(*ap), M_TEMP, M_WAITOK); 765 766 ap->arg_phaddr = phdr; 767 ap->arg_phentsize = eh->e_phentsize; 768 ap->arg_phnum = eh->e_phnum; 769 ap->arg_entry = eh->e_entry + exe_base; 770 ap->arg_interp = exe_base; 771 772 epp->ep_args = ap; 773 } 774 775 free(ph, M_TEMP, phsize); 776 vn_marktext(epp->ep_vp); 777 return (exec_setup_stack(p, epp)); 778 779 bad: 780 if (interp) 781 pool_put(&namei_pool, interp); 782 free(ph, M_TEMP, phsize); 783 kill_vmcmds(&epp->ep_vmcmds); 784 if (error == 0) 785 return (ENOEXEC); 786 return (error); 787 } 788 789 /* 790 * Phase II of load. It is now safe to load the interpreter. Info collected 791 * when loading the program is available for setup of the interpreter. 792 */ 793 int 794 exec_elf_fixup(struct proc *p, struct exec_package *epp) 795 { 796 char *interp; 797 int error = 0; 798 struct elf_args *ap; 799 AuxInfo ai[ELF_AUX_ENTRIES], *a; 800 801 ap = epp->ep_args; 802 if (ap == NULL) { 803 return (0); 804 } 805 806 interp = epp->ep_interp; 807 808 if (interp && 809 (error = elf_load_file(p, interp, epp, ap)) != 0) { 810 uprintf("execve: cannot load %s\n", interp); 811 free(ap, M_TEMP, sizeof *ap); 812 pool_put(&namei_pool, interp); 813 kill_vmcmds(&epp->ep_vmcmds); 814 return (error); 815 } 816 /* 817 * We have to do this ourselves... 818 */ 819 error = exec_process_vmcmds(p, epp); 820 821 /* 822 * Push extra arguments on the stack needed by dynamically 823 * linked binaries 824 */ 825 if (error == 0) { 826 memset(&ai, 0, sizeof ai); 827 a = ai; 828 829 a->au_id = AUX_phdr; 830 a->au_v = ap->arg_phaddr; 831 a++; 832 833 a->au_id = AUX_phent; 834 a->au_v = ap->arg_phentsize; 835 a++; 836 837 a->au_id = AUX_phnum; 838 a->au_v = ap->arg_phnum; 839 a++; 840 841 a->au_id = AUX_pagesz; 842 a->au_v = PAGE_SIZE; 843 a++; 844 845 a->au_id = AUX_base; 846 a->au_v = ap->arg_interp; 847 a++; 848 849 a->au_id = AUX_flags; 850 a->au_v = 0; 851 a++; 852 853 a->au_id = AUX_entry; 854 a->au_v = ap->arg_entry; 855 a++; 856 857 a->au_id = AUX_openbsd_timekeep; 858 a->au_v = p->p_p->ps_timekeep; 859 a++; 860 861 a->au_id = AUX_null; 862 a->au_v = 0; 863 a++; 864 865 error = copyout(ai, epp->ep_auxinfo, sizeof ai); 866 } 867 free(ap, M_TEMP, sizeof *ap); 868 if (interp) 869 pool_put(&namei_pool, interp); 870 return (error); 871 } 872 873 int 874 elf_os_pt_note_name(Elf_Note *np) 875 { 876 int i, j; 877 878 for (i = 0; i < nitems(elf_note_names); i++) { 879 size_t namlen = strlen(elf_note_names[i].name); 880 if (np->namesz < namlen) 881 continue; 882 /* verify name padding (after the NUL) is NUL */ 883 for (j = namlen + 1; j < elfround(np->namesz); j++) 884 if (((char *)(np + 1))[j] != '\0') 885 continue; 886 /* verify desc padding is NUL */ 887 for (j = np->descsz; j < elfround(np->descsz); j++) 888 if (((char *)(np + 1))[j] != '\0') 889 continue; 890 if (strcmp((char *)(np + 1), elf_note_names[i].name) == 0) 891 return elf_note_names[i].id; 892 } 893 return (0); 894 } 895 896 int 897 elf_os_pt_note(struct proc *p, struct exec_package *epp, Elf_Ehdr *eh, int *namesp) 898 { 899 Elf_Phdr *hph, *ph; 900 Elf_Note *np = NULL; 901 size_t phsize, offset, pfilesz = 0, total; 902 int error, names = 0; 903 904 hph = mallocarray(eh->e_phnum, sizeof(Elf_Phdr), M_TEMP, M_WAITOK); 905 phsize = eh->e_phnum * sizeof(Elf_Phdr); 906 if ((error = elf_read_from(p, epp->ep_vp, eh->e_phoff, 907 hph, phsize)) != 0) 908 goto out1; 909 910 for (ph = hph; ph < &hph[eh->e_phnum]; ph++) { 911 if (ph->p_type == PT_OPENBSD_WXNEEDED) { 912 epp->ep_flags |= EXEC_WXNEEDED; 913 continue; 914 } 915 916 if (ph->p_type != PT_NOTE || ph->p_filesz > 1024) 917 continue; 918 919 if (np && ph->p_filesz != pfilesz) { 920 free(np, M_TEMP, pfilesz); 921 np = NULL; 922 } 923 if (!np) 924 np = malloc(ph->p_filesz, M_TEMP, M_WAITOK); 925 pfilesz = ph->p_filesz; 926 if ((error = elf_read_from(p, epp->ep_vp, ph->p_offset, 927 np, ph->p_filesz)) != 0) 928 goto out2; 929 930 for (offset = 0; offset < ph->p_filesz; offset += total) { 931 Elf_Note *np2 = (Elf_Note *)((char *)np + offset); 932 933 if (offset + sizeof(Elf_Note) > ph->p_filesz) 934 break; 935 total = sizeof(Elf_Note) + elfround(np2->namesz) + 936 elfround(np2->descsz); 937 if (offset + total > ph->p_filesz) 938 break; 939 names |= elf_os_pt_note_name(np2); 940 } 941 } 942 943 out2: 944 free(np, M_TEMP, pfilesz); 945 out1: 946 free(hph, M_TEMP, phsize); 947 *namesp = names; 948 return ((names & ELF_NOTE_NAME_OPENBSD) ? 0 : ENOEXEC); 949 } 950 951 /* 952 * Start of routines related to dumping core 953 */ 954 955 #ifdef SMALL_KERNEL 956 int 957 coredump_elf(struct proc *p, void *cookie) 958 { 959 return EPERM; 960 } 961 #else /* !SMALL_KERNEL */ 962 963 struct writesegs_state { 964 off_t notestart; 965 off_t secstart; 966 off_t secoff; 967 struct proc *p; 968 void *iocookie; 969 Elf_Phdr *psections; 970 size_t psectionslen; 971 size_t notesize; 972 int npsections; 973 }; 974 975 uvm_coredump_setup_cb coredump_setup_elf; 976 uvm_coredump_walk_cb coredump_walk_elf; 977 978 int coredump_notes_elf(struct proc *, void *, size_t *); 979 int coredump_note_elf(struct proc *, void *, size_t *); 980 int coredump_writenote_elf(struct proc *, void *, Elf_Note *, 981 const char *, void *); 982 983 int 984 coredump_elf(struct proc *p, void *cookie) 985 { 986 #ifdef DIAGNOSTIC 987 off_t offset; 988 #endif 989 struct writesegs_state ws; 990 size_t notesize; 991 int error, i; 992 993 ws.p = p; 994 ws.iocookie = cookie; 995 ws.psections = NULL; 996 997 /* 998 * Walk the map to get all the segment offsets and lengths, 999 * write out the ELF header. 1000 */ 1001 error = uvm_coredump_walkmap(p, coredump_setup_elf, 1002 coredump_walk_elf, &ws); 1003 if (error) 1004 goto out; 1005 1006 error = coredump_write(cookie, UIO_SYSSPACE, ws.psections, 1007 ws.psectionslen); 1008 if (error) 1009 goto out; 1010 1011 /* Write out the notes. */ 1012 error = coredump_notes_elf(p, cookie, ¬esize); 1013 if (error) 1014 goto out; 1015 1016 #ifdef DIAGNOSTIC 1017 if (notesize != ws.notesize) 1018 panic("coredump: notesize changed: %zu != %zu", 1019 ws.notesize, notesize); 1020 offset = ws.notestart + notesize; 1021 if (offset != ws.secstart) 1022 panic("coredump: offset %lld != secstart %lld", 1023 (long long) offset, (long long) ws.secstart); 1024 #endif 1025 1026 /* Pass 3: finally, write the sections themselves. */ 1027 for (i = 0; i < ws.npsections - 1; i++) { 1028 Elf_Phdr *pent = &ws.psections[i]; 1029 if (pent->p_filesz == 0) 1030 continue; 1031 1032 #ifdef DIAGNOSTIC 1033 if (offset != pent->p_offset) 1034 panic("coredump: offset %lld != p_offset[%d] %lld", 1035 (long long) offset, i, 1036 (long long) pent->p_filesz); 1037 #endif 1038 1039 error = coredump_write(cookie, UIO_USERSPACE, 1040 (void *)(vaddr_t)pent->p_vaddr, pent->p_filesz); 1041 if (error) 1042 goto out; 1043 1044 coredump_unmap(cookie, (vaddr_t)pent->p_vaddr, 1045 (vaddr_t)pent->p_vaddr + pent->p_filesz); 1046 1047 #ifdef DIAGNOSTIC 1048 offset += ws.psections[i].p_filesz; 1049 #endif 1050 } 1051 1052 out: 1053 free(ws.psections, M_TEMP, ws.psectionslen); 1054 return (error); 1055 } 1056 1057 1058 /* 1059 * Normally we lay out core files like this: 1060 * [ELF Header] [Program headers] [Notes] [data for PT_LOAD segments] 1061 * 1062 * However, if there's >= 65535 segments then it overflows the field 1063 * in the ELF header, so the standard specifies putting a magic 1064 * number there and saving the real count in the .sh_info field of 1065 * the first *section* header...which requires generating a section 1066 * header. To avoid confusing tools, we include an .shstrtab section 1067 * as well so all the indexes look valid. So in this case we lay 1068 * out the core file like this: 1069 * [ELF Header] [Section Headers] [.shstrtab] [Program headers] \ 1070 * [Notes] [data for PT_LOAD segments] 1071 * 1072 * The 'shstrtab' structure below is data for the second of the two 1073 * section headers, plus the .shstrtab itself, in one const buffer. 1074 */ 1075 static const struct { 1076 Elf_Shdr shdr; 1077 char shstrtab[sizeof(ELF_SHSTRTAB) + 1]; 1078 } shstrtab = { 1079 .shdr = { 1080 .sh_name = 1, /* offset in .shstrtab below */ 1081 .sh_type = SHT_STRTAB, 1082 .sh_offset = sizeof(Elf_Ehdr) + 2*sizeof(Elf_Shdr), 1083 .sh_size = sizeof(ELF_SHSTRTAB) + 1, 1084 .sh_addralign = 1, 1085 }, 1086 .shstrtab = "\0" ELF_SHSTRTAB, 1087 }; 1088 1089 int 1090 coredump_setup_elf(int segment_count, void *cookie) 1091 { 1092 Elf_Ehdr ehdr; 1093 struct writesegs_state *ws = cookie; 1094 Elf_Phdr *note; 1095 int error; 1096 1097 /* Get the count of segments, plus one for the PT_NOTE */ 1098 ws->npsections = segment_count + 1; 1099 1100 /* Get the size of the notes. */ 1101 error = coredump_notes_elf(ws->p, NULL, &ws->notesize); 1102 if (error) 1103 return error; 1104 1105 /* Setup the ELF header */ 1106 memset(&ehdr, 0, sizeof(ehdr)); 1107 memcpy(ehdr.e_ident, ELFMAG, SELFMAG); 1108 ehdr.e_ident[EI_CLASS] = ELF_TARG_CLASS; 1109 ehdr.e_ident[EI_DATA] = ELF_TARG_DATA; 1110 ehdr.e_ident[EI_VERSION] = EV_CURRENT; 1111 /* XXX Should be the OSABI/ABI version of the executable. */ 1112 ehdr.e_ident[EI_OSABI] = ELFOSABI_SYSV; 1113 ehdr.e_ident[EI_ABIVERSION] = 0; 1114 ehdr.e_type = ET_CORE; 1115 /* XXX This should be the e_machine of the executable. */ 1116 ehdr.e_machine = ELF_TARG_MACH; 1117 ehdr.e_version = EV_CURRENT; 1118 ehdr.e_entry = 0; 1119 ehdr.e_flags = 0; 1120 ehdr.e_ehsize = sizeof(ehdr); 1121 ehdr.e_phentsize = sizeof(Elf_Phdr); 1122 1123 if (ws->npsections < PN_XNUM) { 1124 ehdr.e_phoff = sizeof(ehdr); 1125 ehdr.e_shoff = 0; 1126 ehdr.e_phnum = ws->npsections; 1127 ehdr.e_shentsize = 0; 1128 ehdr.e_shnum = 0; 1129 ehdr.e_shstrndx = 0; 1130 } else { 1131 /* too many segments, use extension setup */ 1132 ehdr.e_shoff = sizeof(ehdr); 1133 ehdr.e_phnum = PN_XNUM; 1134 ehdr.e_shentsize = sizeof(Elf_Shdr); 1135 ehdr.e_shnum = 2; 1136 ehdr.e_shstrndx = 1; 1137 ehdr.e_phoff = shstrtab.shdr.sh_offset + shstrtab.shdr.sh_size; 1138 } 1139 1140 /* Write out the ELF header. */ 1141 error = coredump_write(ws->iocookie, UIO_SYSSPACE, &ehdr, sizeof(ehdr)); 1142 if (error) 1143 return error; 1144 1145 /* 1146 * If an section header is needed to store extension info, write 1147 * it out after the ELF header and before the program header. 1148 */ 1149 if (ehdr.e_shnum != 0) { 1150 Elf_Shdr shdr = { .sh_info = ws->npsections }; 1151 error = coredump_write(ws->iocookie, UIO_SYSSPACE, &shdr, 1152 sizeof shdr); 1153 if (error) 1154 return error; 1155 error = coredump_write(ws->iocookie, UIO_SYSSPACE, &shstrtab, 1156 sizeof(shstrtab.shdr) + sizeof(shstrtab.shstrtab)); 1157 if (error) 1158 return error; 1159 } 1160 1161 /* 1162 * Allocate the segment header array and setup to collect 1163 * the section sizes and offsets 1164 */ 1165 ws->psections = mallocarray(ws->npsections, sizeof(Elf_Phdr), 1166 M_TEMP, M_WAITOK|M_CANFAIL|M_ZERO); 1167 if (ws->psections == NULL) 1168 return ENOMEM; 1169 ws->psectionslen = ws->npsections * sizeof(Elf_Phdr); 1170 1171 ws->notestart = ehdr.e_phoff + ws->psectionslen; 1172 ws->secstart = ws->notestart + ws->notesize; 1173 ws->secoff = ws->secstart; 1174 1175 /* Fill in the PT_NOTE segment header in the last slot */ 1176 note = &ws->psections[ws->npsections - 1]; 1177 note->p_type = PT_NOTE; 1178 note->p_offset = ws->notestart; 1179 note->p_vaddr = 0; 1180 note->p_paddr = 0; 1181 note->p_filesz = ws->notesize; 1182 note->p_memsz = 0; 1183 note->p_flags = PF_R; 1184 note->p_align = ELFROUNDSIZE; 1185 1186 return (0); 1187 } 1188 1189 int 1190 coredump_walk_elf(vaddr_t start, vaddr_t realend, vaddr_t end, vm_prot_t prot, 1191 int nsegment, void *cookie) 1192 { 1193 struct writesegs_state *ws = cookie; 1194 Elf_Phdr phdr; 1195 vsize_t size, realsize; 1196 1197 size = end - start; 1198 realsize = realend - start; 1199 1200 phdr.p_type = PT_LOAD; 1201 phdr.p_offset = ws->secoff; 1202 phdr.p_vaddr = start; 1203 phdr.p_paddr = 0; 1204 phdr.p_filesz = realsize; 1205 phdr.p_memsz = size; 1206 phdr.p_flags = 0; 1207 if (prot & PROT_READ) 1208 phdr.p_flags |= PF_R; 1209 if (prot & PROT_WRITE) 1210 phdr.p_flags |= PF_W; 1211 if (prot & PROT_EXEC) 1212 phdr.p_flags |= PF_X; 1213 phdr.p_align = PAGE_SIZE; 1214 1215 ws->secoff += phdr.p_filesz; 1216 ws->psections[nsegment] = phdr; 1217 1218 return (0); 1219 } 1220 1221 int 1222 coredump_notes_elf(struct proc *p, void *iocookie, size_t *sizep) 1223 { 1224 struct ps_strings pss; 1225 struct iovec iov; 1226 struct uio uio; 1227 struct elfcore_procinfo cpi; 1228 Elf_Note nhdr; 1229 struct process *pr = p->p_p; 1230 struct proc *q; 1231 size_t size, notesize; 1232 int error; 1233 1234 KASSERT(!P_HASSIBLING(p) || pr->ps_single != NULL); 1235 size = 0; 1236 1237 /* First, write an elfcore_procinfo. */ 1238 notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD")) + 1239 elfround(sizeof(cpi)); 1240 if (iocookie) { 1241 memset(&cpi, 0, sizeof(cpi)); 1242 1243 cpi.cpi_version = ELFCORE_PROCINFO_VERSION; 1244 cpi.cpi_cpisize = sizeof(cpi); 1245 cpi.cpi_signo = p->p_sisig; 1246 cpi.cpi_sigcode = p->p_sicode; 1247 1248 cpi.cpi_sigpend = p->p_siglist | pr->ps_siglist; 1249 cpi.cpi_sigmask = p->p_sigmask; 1250 cpi.cpi_sigignore = pr->ps_sigacts->ps_sigignore; 1251 cpi.cpi_sigcatch = pr->ps_sigacts->ps_sigcatch; 1252 1253 cpi.cpi_pid = pr->ps_pid; 1254 cpi.cpi_ppid = pr->ps_ppid; 1255 cpi.cpi_pgrp = pr->ps_pgid; 1256 if (pr->ps_session->s_leader) 1257 cpi.cpi_sid = pr->ps_session->s_leader->ps_pid; 1258 else 1259 cpi.cpi_sid = 0; 1260 1261 cpi.cpi_ruid = p->p_ucred->cr_ruid; 1262 cpi.cpi_euid = p->p_ucred->cr_uid; 1263 cpi.cpi_svuid = p->p_ucred->cr_svuid; 1264 1265 cpi.cpi_rgid = p->p_ucred->cr_rgid; 1266 cpi.cpi_egid = p->p_ucred->cr_gid; 1267 cpi.cpi_svgid = p->p_ucred->cr_svgid; 1268 1269 (void)strlcpy(cpi.cpi_name, pr->ps_comm, sizeof(cpi.cpi_name)); 1270 1271 nhdr.namesz = sizeof("OpenBSD"); 1272 nhdr.descsz = sizeof(cpi); 1273 nhdr.type = NT_OPENBSD_PROCINFO; 1274 1275 error = coredump_writenote_elf(p, iocookie, &nhdr, 1276 "OpenBSD", &cpi); 1277 if (error) 1278 return (error); 1279 } 1280 size += notesize; 1281 1282 /* Second, write an NT_OPENBSD_AUXV note. */ 1283 notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD")) + 1284 elfround(ELF_AUX_WORDS * sizeof(char *)); 1285 if (iocookie) { 1286 iov.iov_base = &pss; 1287 iov.iov_len = sizeof(pss); 1288 uio.uio_iov = &iov; 1289 uio.uio_iovcnt = 1; 1290 uio.uio_offset = (off_t)pr->ps_strings; 1291 uio.uio_resid = sizeof(pss); 1292 uio.uio_segflg = UIO_SYSSPACE; 1293 uio.uio_rw = UIO_READ; 1294 uio.uio_procp = NULL; 1295 1296 error = uvm_io(&p->p_vmspace->vm_map, &uio, 0); 1297 if (error) 1298 return (error); 1299 1300 if (pss.ps_envstr == NULL) 1301 return (EIO); 1302 1303 nhdr.namesz = sizeof("OpenBSD"); 1304 nhdr.descsz = ELF_AUX_WORDS * sizeof(char *); 1305 nhdr.type = NT_OPENBSD_AUXV; 1306 1307 error = coredump_write(iocookie, UIO_SYSSPACE, 1308 &nhdr, sizeof(nhdr)); 1309 if (error) 1310 return (error); 1311 1312 error = coredump_write(iocookie, UIO_SYSSPACE, 1313 "OpenBSD", elfround(nhdr.namesz)); 1314 if (error) 1315 return (error); 1316 1317 error = coredump_write(iocookie, UIO_USERSPACE, 1318 pss.ps_envstr + pss.ps_nenvstr + 1, nhdr.descsz); 1319 if (error) 1320 return (error); 1321 } 1322 size += notesize; 1323 1324 #ifdef PT_WCOOKIE 1325 notesize = sizeof(nhdr) + elfround(sizeof("OpenBSD")) + 1326 elfround(sizeof(register_t)); 1327 if (iocookie) { 1328 register_t wcookie; 1329 1330 nhdr.namesz = sizeof("OpenBSD"); 1331 nhdr.descsz = sizeof(register_t); 1332 nhdr.type = NT_OPENBSD_WCOOKIE; 1333 1334 wcookie = process_get_wcookie(p); 1335 error = coredump_writenote_elf(p, iocookie, &nhdr, 1336 "OpenBSD", &wcookie); 1337 if (error) 1338 return (error); 1339 } 1340 size += notesize; 1341 #endif 1342 1343 /* 1344 * Now write the register info for the thread that caused the 1345 * coredump. 1346 */ 1347 error = coredump_note_elf(p, iocookie, ¬esize); 1348 if (error) 1349 return (error); 1350 size += notesize; 1351 1352 /* 1353 * Now, for each thread, write the register info and any other 1354 * per-thread notes. Since we're dumping core, all the other 1355 * threads in the process have been stopped and the list can't 1356 * change. 1357 */ 1358 TAILQ_FOREACH(q, &pr->ps_threads, p_thr_link) { 1359 if (q == p) /* we've taken care of this thread */ 1360 continue; 1361 error = coredump_note_elf(q, iocookie, ¬esize); 1362 if (error) 1363 return (error); 1364 size += notesize; 1365 } 1366 1367 *sizep = size; 1368 return (0); 1369 } 1370 1371 int 1372 coredump_note_elf(struct proc *p, void *iocookie, size_t *sizep) 1373 { 1374 Elf_Note nhdr; 1375 int size, notesize, error; 1376 int namesize; 1377 char name[64+ELFROUNDSIZE]; 1378 struct reg intreg; 1379 #ifdef PT_GETFPREGS 1380 struct fpreg freg; 1381 #endif 1382 1383 size = 0; 1384 1385 snprintf(name, sizeof(name)-ELFROUNDSIZE, "%s@%d", 1386 "OpenBSD", p->p_tid + THREAD_PID_OFFSET); 1387 namesize = strlen(name) + 1; 1388 memset(name + namesize, 0, elfround(namesize) - namesize); 1389 1390 notesize = sizeof(nhdr) + elfround(namesize) + elfround(sizeof(intreg)); 1391 if (iocookie) { 1392 error = process_read_regs(p, &intreg); 1393 if (error) 1394 return (error); 1395 1396 nhdr.namesz = namesize; 1397 nhdr.descsz = sizeof(intreg); 1398 nhdr.type = NT_OPENBSD_REGS; 1399 1400 error = coredump_writenote_elf(p, iocookie, &nhdr, 1401 name, &intreg); 1402 if (error) 1403 return (error); 1404 1405 } 1406 size += notesize; 1407 1408 #ifdef PT_GETFPREGS 1409 notesize = sizeof(nhdr) + elfround(namesize) + elfround(sizeof(freg)); 1410 if (iocookie) { 1411 error = process_read_fpregs(p, &freg); 1412 if (error) 1413 return (error); 1414 1415 nhdr.namesz = namesize; 1416 nhdr.descsz = sizeof(freg); 1417 nhdr.type = NT_OPENBSD_FPREGS; 1418 1419 error = coredump_writenote_elf(p, iocookie, &nhdr, name, &freg); 1420 if (error) 1421 return (error); 1422 } 1423 size += notesize; 1424 #endif 1425 1426 *sizep = size; 1427 /* XXX Add hook for machdep per-LWP notes. */ 1428 return (0); 1429 } 1430 1431 int 1432 coredump_writenote_elf(struct proc *p, void *cookie, Elf_Note *nhdr, 1433 const char *name, void *data) 1434 { 1435 int error; 1436 1437 error = coredump_write(cookie, UIO_SYSSPACE, nhdr, sizeof(*nhdr)); 1438 if (error) 1439 return error; 1440 1441 error = coredump_write(cookie, UIO_SYSSPACE, name, 1442 elfround(nhdr->namesz)); 1443 if (error) 1444 return error; 1445 1446 return coredump_write(cookie, UIO_SYSSPACE, data, nhdr->descsz); 1447 } 1448 #endif /* !SMALL_KERNEL */ 1449