1 /* $NetBSD: kvm_proc.c,v 1.16 1996/03/18 22:33:57 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1994, 1995 Charles M. Hannum. All rights reserved. 5 * Copyright (c) 1989, 1992, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software developed by the Computer Systems 9 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract 10 * BG 91-66 and contributed to Berkeley. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 */ 40 41 #if defined(LIBC_SCCS) && !defined(lint) 42 #if 0 43 static char sccsid[] = "@(#)kvm_proc.c 8.3 (Berkeley) 9/23/93"; 44 #else 45 static char *rcsid = "$NetBSD: kvm_proc.c,v 1.16 1996/03/18 22:33:57 thorpej Exp $"; 46 #endif 47 #endif /* LIBC_SCCS and not lint */ 48 49 /* 50 * Proc traversal interface for kvm. ps and w are (probably) the exclusive 51 * users of this code, so we've factored it out into a separate module. 52 * Thus, we keep this grunge out of the other kvm applications (i.e., 53 * most other applications are interested only in open/close/read/nlist). 54 */ 55 56 #include <sys/param.h> 57 #include <sys/user.h> 58 #include <sys/proc.h> 59 #include <sys/exec.h> 60 #include <sys/stat.h> 61 #include <sys/ioctl.h> 62 #include <sys/tty.h> 63 #include <stdlib.h> 64 #include <string.h> 65 #include <unistd.h> 66 #include <nlist.h> 67 #include <kvm.h> 68 69 #include <vm/vm.h> 70 #include <vm/vm_param.h> 71 #include <vm/swap_pager.h> 72 73 #include <sys/sysctl.h> 74 75 #include <limits.h> 76 #include <db.h> 77 #include <paths.h> 78 79 #include "kvm_private.h" 80 81 #define KREAD(kd, addr, obj) \ 82 (kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj)) 83 84 int _kvm_readfromcore __P((kvm_t *, u_long, u_long)); 85 int _kvm_readfrompager __P((kvm_t *, struct vm_object *, u_long)); 86 ssize_t kvm_uread __P((kvm_t *, const struct proc *, u_long, char *, 87 size_t)); 88 89 static char **kvm_argv __P((kvm_t *, const struct proc *, u_long, int, 90 int)); 91 static int kvm_deadprocs __P((kvm_t *, int, int, u_long, u_long, int)); 92 static char **kvm_doargv __P((kvm_t *, const struct kinfo_proc *, int, 93 void (*)(struct ps_strings *, u_long *, int *))); 94 static int kvm_proclist __P((kvm_t *, int, int, struct proc *, 95 struct kinfo_proc *, int)); 96 static int proc_verify __P((kvm_t *, u_long, const struct proc *)); 97 static void ps_str_a __P((struct ps_strings *, u_long *, int *)); 98 static void ps_str_e __P((struct ps_strings *, u_long *, int *)); 99 100 char * 101 _kvm_uread(kd, p, va, cnt) 102 kvm_t *kd; 103 const struct proc *p; 104 u_long va; 105 u_long *cnt; 106 { 107 register u_long addr, head; 108 register u_long offset; 109 struct vm_map_entry vme; 110 struct vm_object vmo; 111 int rv; 112 113 if (kd->swapspc == 0) { 114 kd->swapspc = (char *)_kvm_malloc(kd, kd->nbpg); 115 if (kd->swapspc == 0) 116 return (0); 117 } 118 119 /* 120 * Look through the address map for the memory object 121 * that corresponds to the given virtual address. 122 * The header just has the entire valid range. 123 */ 124 head = (u_long)&p->p_vmspace->vm_map.header; 125 addr = head; 126 while (1) { 127 if (KREAD(kd, addr, &vme)) 128 return (0); 129 130 if (va >= vme.start && va < vme.end && 131 vme.object.vm_object != 0) 132 break; 133 134 addr = (u_long)vme.next; 135 if (addr == head) 136 return (0); 137 } 138 139 /* 140 * We found the right object -- follow shadow links. 141 */ 142 offset = va - vme.start + vme.offset; 143 addr = (u_long)vme.object.vm_object; 144 145 while (1) { 146 /* Try reading the page from core first. */ 147 if ((rv = _kvm_readfromcore(kd, addr, offset))) 148 break; 149 150 if (KREAD(kd, addr, &vmo)) 151 return (0); 152 153 /* If there is a pager here, see if it has the page. */ 154 if (vmo.pager != 0 && 155 (rv = _kvm_readfrompager(kd, &vmo, offset))) 156 break; 157 158 /* Move down the shadow chain. */ 159 addr = (u_long)vmo.shadow; 160 if (addr == 0) 161 return (0); 162 offset += vmo.shadow_offset; 163 } 164 165 if (rv == -1) 166 return (0); 167 168 /* Found the page. */ 169 offset %= kd->nbpg; 170 *cnt = kd->nbpg - offset; 171 return (&kd->swapspc[offset]); 172 } 173 174 #define vm_page_hash(kd, object, offset) \ 175 (((u_long)object + (u_long)(offset / kd->nbpg)) & kd->vm_page_hash_mask) 176 177 int 178 _kvm_coreinit(kd) 179 kvm_t *kd; 180 { 181 struct nlist nlist[3]; 182 183 nlist[0].n_name = "_vm_page_buckets"; 184 nlist[1].n_name = "_vm_page_hash_mask"; 185 nlist[2].n_name = 0; 186 if (kvm_nlist(kd, nlist) != 0) 187 return (-1); 188 189 if (KREAD(kd, nlist[0].n_value, &kd->vm_page_buckets) || 190 KREAD(kd, nlist[1].n_value, &kd->vm_page_hash_mask)) 191 return (-1); 192 193 return (0); 194 } 195 196 int 197 _kvm_readfromcore(kd, object, offset) 198 kvm_t *kd; 199 u_long object, offset; 200 { 201 u_long addr; 202 struct pglist bucket; 203 struct vm_page mem; 204 off_t seekpoint; 205 206 if (kd->vm_page_buckets == 0 && 207 _kvm_coreinit(kd)) 208 return (-1); 209 210 addr = (u_long)&kd->vm_page_buckets[vm_page_hash(kd, object, offset)]; 211 if (KREAD(kd, addr, &bucket)) 212 return (-1); 213 214 addr = (u_long)bucket.tqh_first; 215 offset &= ~(kd->nbpg -1); 216 while (1) { 217 if (addr == 0) 218 return (0); 219 220 if (KREAD(kd, addr, &mem)) 221 return (-1); 222 223 if ((u_long)mem.object == object && 224 (u_long)mem.offset == offset) 225 break; 226 227 addr = (u_long)mem.hashq.tqe_next; 228 } 229 230 seekpoint = mem.phys_addr; 231 232 if (lseek(kd->pmfd, seekpoint, 0) == -1) 233 return (-1); 234 if (read(kd->pmfd, kd->swapspc, kd->nbpg) != kd->nbpg) 235 return (-1); 236 237 return (1); 238 } 239 240 int 241 _kvm_readfrompager(kd, vmop, offset) 242 kvm_t *kd; 243 struct vm_object *vmop; 244 u_long offset; 245 { 246 u_long addr; 247 struct pager_struct pager; 248 struct swpager swap; 249 int ix; 250 struct swblock swb; 251 off_t seekpoint; 252 253 /* Read in the pager info and make sure it's a swap device. */ 254 addr = (u_long)vmop->pager; 255 if (KREAD(kd, addr, &pager) || pager.pg_type != PG_SWAP) 256 return (-1); 257 258 /* Read in the swap_pager private data. */ 259 addr = (u_long)pager.pg_data; 260 if (KREAD(kd, addr, &swap)) 261 return (-1); 262 263 /* 264 * Calculate the paging offset, and make sure it's within the 265 * bounds of the pager. 266 */ 267 offset += vmop->paging_offset; 268 ix = offset / dbtob(swap.sw_bsize); 269 #if 0 270 if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks) 271 return (-1); 272 #else 273 if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks) { 274 int i; 275 printf("BUG BUG BUG BUG:\n"); 276 printf("object %x offset %x pgoffset %x pager %x swpager %x\n", 277 vmop, offset - vmop->paging_offset, vmop->paging_offset, 278 vmop->pager, pager.pg_data); 279 printf("osize %x bsize %x blocks %x nblocks %x\n", 280 swap.sw_osize, swap.sw_bsize, swap.sw_blocks, 281 swap.sw_nblocks); 282 for (ix = 0; ix < swap.sw_nblocks; ix++) { 283 addr = (u_long)&swap.sw_blocks[ix]; 284 if (KREAD(kd, addr, &swb)) 285 return (0); 286 printf("sw_blocks[%d]: block %x mask %x\n", ix, 287 swb.swb_block, swb.swb_mask); 288 } 289 return (-1); 290 } 291 #endif 292 293 /* Read in the swap records. */ 294 addr = (u_long)&swap.sw_blocks[ix]; 295 if (KREAD(kd, addr, &swb)) 296 return (-1); 297 298 /* Calculate offset within pager. */ 299 offset %= dbtob(swap.sw_bsize); 300 301 /* Check that the page is actually present. */ 302 if ((swb.swb_mask & (1 << (offset / kd->nbpg))) == 0) 303 return (0); 304 305 if (!ISALIVE(kd)) 306 return (-1); 307 308 /* Calculate the physical address and read the page. */ 309 seekpoint = dbtob(swb.swb_block) + (offset & ~(kd->nbpg -1)); 310 311 if (lseek(kd->swfd, seekpoint, 0) == -1) 312 return (-1); 313 if (read(kd->swfd, kd->swapspc, kd->nbpg) != kd->nbpg) 314 return (-1); 315 316 return (1); 317 } 318 319 /* 320 * Read proc's from memory file into buffer bp, which has space to hold 321 * at most maxcnt procs. 322 */ 323 static int 324 kvm_proclist(kd, what, arg, p, bp, maxcnt) 325 kvm_t *kd; 326 int what, arg; 327 struct proc *p; 328 struct kinfo_proc *bp; 329 int maxcnt; 330 { 331 register int cnt = 0; 332 struct eproc eproc; 333 struct pgrp pgrp; 334 struct session sess; 335 struct tty tty; 336 struct proc proc; 337 338 for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) { 339 if (KREAD(kd, (u_long)p, &proc)) { 340 _kvm_err(kd, kd->program, "can't read proc at %x", p); 341 return (-1); 342 } 343 if (KREAD(kd, (u_long)proc.p_cred, &eproc.e_pcred) == 0) 344 KREAD(kd, (u_long)eproc.e_pcred.pc_ucred, 345 &eproc.e_ucred); 346 347 switch(what) { 348 349 case KERN_PROC_PID: 350 if (proc.p_pid != (pid_t)arg) 351 continue; 352 break; 353 354 case KERN_PROC_UID: 355 if (eproc.e_ucred.cr_uid != (uid_t)arg) 356 continue; 357 break; 358 359 case KERN_PROC_RUID: 360 if (eproc.e_pcred.p_ruid != (uid_t)arg) 361 continue; 362 break; 363 } 364 /* 365 * We're going to add another proc to the set. If this 366 * will overflow the buffer, assume the reason is because 367 * nprocs (or the proc list) is corrupt and declare an error. 368 */ 369 if (cnt >= maxcnt) { 370 _kvm_err(kd, kd->program, "nprocs corrupt"); 371 return (-1); 372 } 373 /* 374 * gather eproc 375 */ 376 eproc.e_paddr = p; 377 if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) { 378 _kvm_err(kd, kd->program, "can't read pgrp at %x", 379 proc.p_pgrp); 380 return (-1); 381 } 382 eproc.e_sess = pgrp.pg_session; 383 eproc.e_pgid = pgrp.pg_id; 384 eproc.e_jobc = pgrp.pg_jobc; 385 if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) { 386 _kvm_err(kd, kd->program, "can't read session at %x", 387 pgrp.pg_session); 388 return (-1); 389 } 390 if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) { 391 if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) { 392 _kvm_err(kd, kd->program, 393 "can't read tty at %x", sess.s_ttyp); 394 return (-1); 395 } 396 eproc.e_tdev = tty.t_dev; 397 eproc.e_tsess = tty.t_session; 398 if (tty.t_pgrp != NULL) { 399 if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) { 400 _kvm_err(kd, kd->program, 401 "can't read tpgrp at &x", 402 tty.t_pgrp); 403 return (-1); 404 } 405 eproc.e_tpgid = pgrp.pg_id; 406 } else 407 eproc.e_tpgid = -1; 408 } else 409 eproc.e_tdev = NODEV; 410 eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0; 411 if (sess.s_leader == p) 412 eproc.e_flag |= EPROC_SLEADER; 413 if (proc.p_wmesg) 414 (void)kvm_read(kd, (u_long)proc.p_wmesg, 415 eproc.e_wmesg, WMESGLEN); 416 417 (void)kvm_read(kd, (u_long)proc.p_vmspace, 418 (char *)&eproc.e_vm, sizeof(eproc.e_vm)); 419 420 eproc.e_xsize = eproc.e_xrssize = 0; 421 eproc.e_xccount = eproc.e_xswrss = 0; 422 423 switch (what) { 424 425 case KERN_PROC_PGRP: 426 if (eproc.e_pgid != (pid_t)arg) 427 continue; 428 break; 429 430 case KERN_PROC_TTY: 431 if ((proc.p_flag & P_CONTROLT) == 0 || 432 eproc.e_tdev != (dev_t)arg) 433 continue; 434 break; 435 } 436 bcopy(&proc, &bp->kp_proc, sizeof(proc)); 437 bcopy(&eproc, &bp->kp_eproc, sizeof(eproc)); 438 ++bp; 439 ++cnt; 440 } 441 return (cnt); 442 } 443 444 /* 445 * Build proc info array by reading in proc list from a crash dump. 446 * Return number of procs read. maxcnt is the max we will read. 447 */ 448 static int 449 kvm_deadprocs(kd, what, arg, a_allproc, a_zombproc, maxcnt) 450 kvm_t *kd; 451 int what, arg; 452 u_long a_allproc; 453 u_long a_zombproc; 454 int maxcnt; 455 { 456 register struct kinfo_proc *bp = kd->procbase; 457 register int acnt, zcnt; 458 struct proc *p; 459 460 if (KREAD(kd, a_allproc, &p)) { 461 _kvm_err(kd, kd->program, "cannot read allproc"); 462 return (-1); 463 } 464 acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt); 465 if (acnt < 0) 466 return (acnt); 467 468 if (KREAD(kd, a_zombproc, &p)) { 469 _kvm_err(kd, kd->program, "cannot read zombproc"); 470 return (-1); 471 } 472 zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt); 473 if (zcnt < 0) 474 zcnt = 0; 475 476 return (acnt + zcnt); 477 } 478 479 struct kinfo_proc * 480 kvm_getprocs(kd, op, arg, cnt) 481 kvm_t *kd; 482 int op, arg; 483 int *cnt; 484 { 485 size_t size; 486 int mib[4], st, nprocs; 487 488 if (kd->procbase != 0) { 489 free((void *)kd->procbase); 490 /* 491 * Clear this pointer in case this call fails. Otherwise, 492 * kvm_close() will free it again. 493 */ 494 kd->procbase = 0; 495 } 496 if (ISALIVE(kd)) { 497 size = 0; 498 mib[0] = CTL_KERN; 499 mib[1] = KERN_PROC; 500 mib[2] = op; 501 mib[3] = arg; 502 st = sysctl(mib, 4, NULL, &size, NULL, 0); 503 if (st == -1) { 504 _kvm_syserr(kd, kd->program, "kvm_getprocs"); 505 return (0); 506 } 507 kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size); 508 if (kd->procbase == 0) 509 return (0); 510 st = sysctl(mib, 4, kd->procbase, &size, NULL, 0); 511 if (st == -1) { 512 _kvm_syserr(kd, kd->program, "kvm_getprocs"); 513 return (0); 514 } 515 if (size % sizeof(struct kinfo_proc) != 0) { 516 _kvm_err(kd, kd->program, 517 "proc size mismatch (%d total, %d chunks)", 518 size, sizeof(struct kinfo_proc)); 519 return (0); 520 } 521 nprocs = size / sizeof(struct kinfo_proc); 522 } else { 523 struct nlist nl[4], *p; 524 525 nl[0].n_name = "_nprocs"; 526 nl[1].n_name = "_allproc"; 527 nl[2].n_name = "_zombproc"; 528 nl[3].n_name = 0; 529 530 if (kvm_nlist(kd, nl) != 0) { 531 for (p = nl; p->n_type != 0; ++p) 532 ; 533 _kvm_err(kd, kd->program, 534 "%s: no such symbol", p->n_name); 535 return (0); 536 } 537 if (KREAD(kd, nl[0].n_value, &nprocs)) { 538 _kvm_err(kd, kd->program, "can't read nprocs"); 539 return (0); 540 } 541 size = nprocs * sizeof(struct kinfo_proc); 542 kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size); 543 if (kd->procbase == 0) 544 return (0); 545 546 nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value, 547 nl[2].n_value, nprocs); 548 #ifdef notdef 549 size = nprocs * sizeof(struct kinfo_proc); 550 (void)realloc(kd->procbase, size); 551 #endif 552 } 553 *cnt = nprocs; 554 return (kd->procbase); 555 } 556 557 void 558 _kvm_freeprocs(kd) 559 kvm_t *kd; 560 { 561 if (kd->procbase) { 562 free(kd->procbase); 563 kd->procbase = 0; 564 } 565 } 566 567 void * 568 _kvm_realloc(kd, p, n) 569 kvm_t *kd; 570 void *p; 571 size_t n; 572 { 573 void *np = (void *)realloc(p, n); 574 575 if (np == 0) 576 _kvm_err(kd, kd->program, "out of memory"); 577 return (np); 578 } 579 580 #ifndef MAX 581 #define MAX(a, b) ((a) > (b) ? (a) : (b)) 582 #endif 583 584 /* 585 * Read in an argument vector from the user address space of process p. 586 * addr if the user-space base address of narg null-terminated contiguous 587 * strings. This is used to read in both the command arguments and 588 * environment strings. Read at most maxcnt characters of strings. 589 */ 590 static char ** 591 kvm_argv(kd, p, addr, narg, maxcnt) 592 kvm_t *kd; 593 const struct proc *p; 594 register u_long addr; 595 register int narg; 596 register int maxcnt; 597 { 598 register char *np, *cp, *ep, *ap; 599 register u_long oaddr = -1; 600 register int len, cc; 601 register char **argv; 602 603 /* 604 * Check that there aren't an unreasonable number of agruments, 605 * and that the address is in user space. 606 */ 607 if (narg > ARG_MAX || addr < VM_MIN_ADDRESS || addr >= VM_MAXUSER_ADDRESS) 608 return (0); 609 610 if (kd->argv == 0) { 611 /* 612 * Try to avoid reallocs. 613 */ 614 kd->argc = MAX(narg + 1, 32); 615 kd->argv = (char **)_kvm_malloc(kd, kd->argc * 616 sizeof(*kd->argv)); 617 if (kd->argv == 0) 618 return (0); 619 } else if (narg + 1 > kd->argc) { 620 kd->argc = MAX(2 * kd->argc, narg + 1); 621 kd->argv = (char **)_kvm_realloc(kd, kd->argv, kd->argc * 622 sizeof(*kd->argv)); 623 if (kd->argv == 0) 624 return (0); 625 } 626 if (kd->argspc == 0) { 627 kd->argspc = (char *)_kvm_malloc(kd, kd->nbpg); 628 if (kd->argspc == 0) 629 return (0); 630 kd->arglen = kd->nbpg; 631 } 632 if (kd->argbuf == 0) { 633 kd->argbuf = (char *)_kvm_malloc(kd, kd->nbpg); 634 if (kd->argbuf == 0) 635 return (0); 636 } 637 cc = sizeof(char *) * narg; 638 if (kvm_uread(kd, p, addr, (char *)kd->argv, cc) != cc) 639 return (0); 640 ap = np = kd->argspc; 641 argv = kd->argv; 642 len = 0; 643 /* 644 * Loop over pages, filling in the argument vector. 645 */ 646 while (argv < kd->argv + narg && *argv != 0) { 647 addr = (u_long)*argv & ~(kd->nbpg - 1); 648 if (addr != oaddr) { 649 if (kvm_uread(kd, p, addr, kd->argbuf, kd->nbpg) != 650 kd->nbpg) 651 return (0); 652 oaddr = addr; 653 } 654 addr = (u_long)*argv & (kd->nbpg - 1); 655 cp = kd->argbuf + addr; 656 cc = kd->nbpg - addr; 657 if (maxcnt > 0 && cc > maxcnt - len) 658 cc = maxcnt - len;; 659 ep = memchr(cp, '\0', cc); 660 if (ep != 0) 661 cc = ep - cp + 1; 662 if (len + cc > kd->arglen) { 663 register int off; 664 register char **pp; 665 register char *op = kd->argspc; 666 667 kd->arglen *= 2; 668 kd->argspc = (char *)_kvm_realloc(kd, kd->argspc, 669 kd->arglen); 670 if (kd->argspc == 0) 671 return (0); 672 /* 673 * Adjust argv pointers in case realloc moved 674 * the string space. 675 */ 676 off = kd->argspc - op; 677 for (pp = kd->argv; pp < argv; pp++) 678 *pp += off; 679 ap += off; 680 np += off; 681 } 682 memcpy(np, cp, cc); 683 np += cc; 684 len += cc; 685 if (ep != 0) { 686 *argv++ = ap; 687 ap = np; 688 } else 689 *argv += cc; 690 if (maxcnt > 0 && len >= maxcnt) { 691 /* 692 * We're stopping prematurely. Terminate the 693 * current string. 694 */ 695 if (ep == 0) { 696 *np = '\0'; 697 *argv++ = ap; 698 } 699 break; 700 } 701 } 702 /* Make sure argv is terminated. */ 703 *argv = 0; 704 return (kd->argv); 705 } 706 707 static void 708 ps_str_a(p, addr, n) 709 struct ps_strings *p; 710 u_long *addr; 711 int *n; 712 { 713 *addr = (u_long)p->ps_argvstr; 714 *n = p->ps_nargvstr; 715 } 716 717 static void 718 ps_str_e(p, addr, n) 719 struct ps_strings *p; 720 u_long *addr; 721 int *n; 722 { 723 *addr = (u_long)p->ps_envstr; 724 *n = p->ps_nenvstr; 725 } 726 727 /* 728 * Determine if the proc indicated by p is still active. 729 * This test is not 100% foolproof in theory, but chances of 730 * being wrong are very low. 731 */ 732 static int 733 proc_verify(kd, kernp, p) 734 kvm_t *kd; 735 u_long kernp; 736 const struct proc *p; 737 { 738 struct proc kernproc; 739 740 /* 741 * Just read in the whole proc. It's not that big relative 742 * to the cost of the read system call. 743 */ 744 if (kvm_read(kd, kernp, (char *)&kernproc, sizeof(kernproc)) != 745 sizeof(kernproc)) 746 return (0); 747 return (p->p_pid == kernproc.p_pid && 748 (kernproc.p_stat != SZOMB || p->p_stat == SZOMB)); 749 } 750 751 static char ** 752 kvm_doargv(kd, kp, nchr, info) 753 kvm_t *kd; 754 const struct kinfo_proc *kp; 755 int nchr; 756 void (*info)(struct ps_strings *, u_long *, int *); 757 { 758 register const struct proc *p = &kp->kp_proc; 759 register char **ap; 760 u_long addr; 761 int cnt; 762 struct ps_strings arginfo; 763 764 /* 765 * Pointers are stored at the top of the user stack. 766 */ 767 if (p->p_stat == SZOMB || 768 kvm_uread(kd, p, USRSTACK - sizeof(arginfo), (char *)&arginfo, 769 sizeof(arginfo)) != sizeof(arginfo)) 770 return (0); 771 772 (*info)(&arginfo, &addr, &cnt); 773 if (cnt == 0) 774 return (0); 775 ap = kvm_argv(kd, p, addr, cnt, nchr); 776 /* 777 * For live kernels, make sure this process didn't go away. 778 */ 779 if (ap != 0 && ISALIVE(kd) && 780 !proc_verify(kd, (u_long)kp->kp_eproc.e_paddr, p)) 781 ap = 0; 782 return (ap); 783 } 784 785 /* 786 * Get the command args. This code is now machine independent. 787 */ 788 char ** 789 kvm_getargv(kd, kp, nchr) 790 kvm_t *kd; 791 const struct kinfo_proc *kp; 792 int nchr; 793 { 794 return (kvm_doargv(kd, kp, nchr, ps_str_a)); 795 } 796 797 char ** 798 kvm_getenvv(kd, kp, nchr) 799 kvm_t *kd; 800 const struct kinfo_proc *kp; 801 int nchr; 802 { 803 return (kvm_doargv(kd, kp, nchr, ps_str_e)); 804 } 805 806 /* 807 * Read from user space. The user context is given by p. 808 */ 809 ssize_t 810 kvm_uread(kd, p, uva, buf, len) 811 kvm_t *kd; 812 register const struct proc *p; 813 register u_long uva; 814 register char *buf; 815 register size_t len; 816 { 817 register char *cp; 818 819 cp = buf; 820 while (len > 0) { 821 register int cc; 822 register char *dp; 823 u_long cnt; 824 825 dp = _kvm_uread(kd, p, uva, &cnt); 826 if (dp == 0) { 827 _kvm_err(kd, 0, "invalid address (%x)", uva); 828 return (0); 829 } 830 cc = MIN(cnt, len); 831 bcopy(dp, cp, cc); 832 833 cp += cc; 834 uva += cc; 835 len -= cc; 836 } 837 return (ssize_t)(cp - buf); 838 } 839