1 /* $NetBSD: kvm_proc.c,v 1.95 2021/07/19 10:30:36 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Charles M. Hannum. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 1989, 1992, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * This code is derived from software developed by the Computer Systems 37 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract 38 * BG 91-66 and contributed to Berkeley. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. Neither the name of the University nor the names of its contributors 49 * may be used to endorse or promote products derived from this software 50 * without specific prior written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * SUCH DAMAGE. 63 */ 64 65 #include <sys/cdefs.h> 66 #if defined(LIBC_SCCS) && !defined(lint) 67 #if 0 68 static char sccsid[] = "@(#)kvm_proc.c 8.3 (Berkeley) 9/23/93"; 69 #else 70 __RCSID("$NetBSD: kvm_proc.c,v 1.95 2021/07/19 10:30:36 christos Exp $"); 71 #endif 72 #endif /* LIBC_SCCS and not lint */ 73 74 /* 75 * Proc traversal interface for kvm. ps and w are (probably) the exclusive 76 * users of this code, so we've factored it out into a separate module. 77 * Thus, we keep this grunge out of the other kvm applications (i.e., 78 * most other applications are interested only in open/close/read/nlist). 79 */ 80 81 #include <sys/param.h> 82 #include <sys/lwp.h> 83 #include <sys/wait.h> 84 #include <sys/proc.h> 85 #include <sys/exec.h> 86 #include <sys/stat.h> 87 #include <sys/ioctl.h> 88 #include <sys/tty.h> 89 #include <sys/resourcevar.h> 90 #include <sys/mutex.h> 91 #include <sys/specificdata.h> 92 #include <sys/types.h> 93 94 #include <errno.h> 95 #include <stdlib.h> 96 #include <stddef.h> 97 #include <string.h> 98 #include <unistd.h> 99 #include <nlist.h> 100 #include <kvm.h> 101 102 #include <uvm/uvm_extern.h> 103 #include <uvm/uvm_param.h> 104 #include <uvm/uvm_amap.h> 105 #include <uvm/uvm_page.h> 106 107 #include <sys/sysctl.h> 108 109 #include <limits.h> 110 #include <db.h> 111 #include <paths.h> 112 113 #include "kvm_private.h" 114 115 /* 116 * Common info from kinfo_proc and kinfo_proc2 used by helper routines. 117 */ 118 struct miniproc { 119 struct vmspace *p_vmspace; 120 char p_stat; 121 struct proc *p_paddr; 122 pid_t p_pid; 123 }; 124 125 /* 126 * Convert from struct proc and kinfo_proc{,2} to miniproc. 127 */ 128 #define PTOMINI(kp, p) \ 129 do { \ 130 (p)->p_stat = (kp)->p_stat; \ 131 (p)->p_pid = (kp)->p_pid; \ 132 (p)->p_paddr = NULL; \ 133 (p)->p_vmspace = (kp)->p_vmspace; \ 134 } while (/*CONSTCOND*/0); 135 136 #define KPTOMINI(kp, p) \ 137 do { \ 138 (p)->p_stat = (kp)->kp_proc.p_stat; \ 139 (p)->p_pid = (kp)->kp_proc.p_pid; \ 140 (p)->p_paddr = (kp)->kp_eproc.e_paddr; \ 141 (p)->p_vmspace = (kp)->kp_proc.p_vmspace; \ 142 } while (/*CONSTCOND*/0); 143 144 #define KP2TOMINI(kp, p) \ 145 do { \ 146 (p)->p_stat = (kp)->p_stat; \ 147 (p)->p_pid = (kp)->p_pid; \ 148 (p)->p_paddr = (void *)(long)(kp)->p_paddr; \ 149 (p)->p_vmspace = (void *)(long)(kp)->p_vmspace; \ 150 } while (/*CONSTCOND*/0); 151 152 /* 153 * NetBSD uses kauth(9) to manage credentials, which are stored in kauth_cred_t, 154 * a kernel-only opaque type. This is an embedded version which is *INTERNAL* to 155 * kvm(3) so dumps can be read properly. 156 * 157 * Whenever NetBSD starts exporting credentials to userland consistently (using 158 * 'struct uucred', or something) this will have to be updated again. 159 */ 160 struct kvm_kauth_cred { 161 u_int cr_refcnt; /* reference count */ 162 #if COHERENCY_UNIT > 4 163 uint8_t cr_pad[COHERENCY_UNIT - 4]; 164 #endif 165 uid_t cr_uid; /* user id */ 166 uid_t cr_euid; /* effective user id */ 167 uid_t cr_svuid; /* saved effective user id */ 168 gid_t cr_gid; /* group id */ 169 gid_t cr_egid; /* effective group id */ 170 gid_t cr_svgid; /* saved effective group id */ 171 u_int cr_ngroups; /* number of groups */ 172 gid_t cr_groups[NGROUPS]; /* group memberships */ 173 specificdata_reference cr_sd; /* specific data */ 174 }; 175 176 /* XXX: What uses these two functions? */ 177 char *_kvm_uread(kvm_t *, const struct proc *, u_long, u_long *); 178 ssize_t kvm_uread(kvm_t *, const struct proc *, u_long, char *, 179 size_t); 180 181 static char *_kvm_ureadm(kvm_t *, const struct miniproc *, u_long, 182 u_long *); 183 static ssize_t kvm_ureadm(kvm_t *, const struct miniproc *, u_long, 184 char *, size_t); 185 186 static char **kvm_argv(kvm_t *, const struct miniproc *, u_long, int, int); 187 static int kvm_deadprocs(kvm_t *, int, int, u_long, u_long, int); 188 static char **kvm_doargv(kvm_t *, const struct miniproc *, int, 189 void (*)(struct ps_strings *, u_long *, int *)); 190 static char **kvm_doargv2(kvm_t *, pid_t, int, int); 191 static int kvm_proclist(kvm_t *, int, int, struct proc *, 192 struct kinfo_proc *, int); 193 static int proc_verify(kvm_t *, u_long, const struct miniproc *); 194 static void ps_str_a(struct ps_strings *, u_long *, int *); 195 static void ps_str_e(struct ps_strings *, u_long *, int *); 196 197 198 static char * 199 _kvm_ureadm(kvm_t *kd, const struct miniproc *p, u_long va, u_long *cnt) 200 { 201 u_long addr, head; 202 u_long offset; 203 struct vm_map_entry vme; 204 struct vm_amap amap; 205 struct vm_anon *anonp, anon; 206 struct vm_page pg; 207 u_long slot; 208 209 if (kd->swapspc == NULL) { 210 kd->swapspc = _kvm_malloc(kd, (size_t)kd->nbpg); 211 if (kd->swapspc == NULL) 212 return (NULL); 213 } 214 215 /* 216 * Look through the address map for the memory object 217 * that corresponds to the given virtual address. 218 * The header just has the entire valid range. 219 */ 220 head = (u_long)&p->p_vmspace->vm_map.header; 221 addr = head; 222 for (;;) { 223 if (KREAD(kd, addr, &vme)) 224 return (NULL); 225 226 if (va >= vme.start && va < vme.end && 227 vme.aref.ar_amap != NULL) 228 break; 229 230 addr = (u_long)vme.next; 231 if (addr == head) 232 return (NULL); 233 } 234 235 /* 236 * we found the map entry, now to find the object... 237 */ 238 if (vme.aref.ar_amap == NULL) 239 return (NULL); 240 241 addr = (u_long)vme.aref.ar_amap; 242 if (KREAD(kd, addr, &amap)) 243 return (NULL); 244 245 offset = va - vme.start; 246 slot = offset / kd->nbpg + vme.aref.ar_pageoff; 247 /* sanity-check slot number */ 248 if (slot > amap.am_nslot) 249 return (NULL); 250 251 addr = (u_long)amap.am_anon + (offset / kd->nbpg) * sizeof(anonp); 252 if (KREAD(kd, addr, &anonp)) 253 return (NULL); 254 255 addr = (u_long)anonp; 256 if (KREAD(kd, addr, &anon)) 257 return (NULL); 258 259 addr = (u_long)anon.an_page; 260 if (addr) { 261 if (KREAD(kd, addr, &pg)) 262 return (NULL); 263 264 if (_kvm_pread(kd, kd->pmfd, kd->swapspc, (size_t)kd->nbpg, 265 (off_t)pg.phys_addr & ~(kd->nbpg - 1)) != kd->nbpg) 266 return (NULL); 267 } else { 268 if (kd->swfd < 0 || 269 _kvm_pread(kd, kd->swfd, kd->swapspc, (size_t)kd->nbpg, 270 (off_t)(anon.an_swslot * kd->nbpg)) != kd->nbpg) 271 return (NULL); 272 } 273 274 /* Found the page. */ 275 offset %= kd->nbpg; 276 *cnt = kd->nbpg - offset; 277 return (&kd->swapspc[(size_t)offset]); 278 } 279 280 char * 281 _kvm_uread(kvm_t *kd, const struct proc *p, u_long va, u_long *cnt) 282 { 283 struct miniproc mp; 284 285 PTOMINI(p, &mp); 286 return (_kvm_ureadm(kd, &mp, va, cnt)); 287 } 288 289 /* 290 * Convert credentials located in kernel space address 'cred' and store 291 * them in the appropriate members of 'eproc'. 292 */ 293 static int 294 _kvm_convertcred(kvm_t *kd, u_long cred, struct eproc *eproc) 295 { 296 struct kvm_kauth_cred kauthcred; 297 struct ki_pcred *pc = &eproc->e_pcred; 298 struct ki_ucred *uc = &eproc->e_ucred; 299 300 if (KREAD(kd, cred, &kauthcred) != 0) 301 return (-1); 302 303 /* inlined version of kauth_cred_to_pcred, see kauth(9). */ 304 pc->p_ruid = kauthcred.cr_uid; 305 pc->p_svuid = kauthcred.cr_svuid; 306 pc->p_rgid = kauthcred.cr_gid; 307 pc->p_svgid = kauthcred.cr_svgid; 308 pc->p_refcnt = kauthcred.cr_refcnt; 309 pc->p_pad = NULL; 310 311 /* inlined version of kauth_cred_to_ucred(), see kauth(9). */ 312 uc->cr_ref = kauthcred.cr_refcnt; 313 uc->cr_uid = kauthcred.cr_euid; 314 uc->cr_gid = kauthcred.cr_egid; 315 uc->cr_ngroups = (uint32_t)MIN(kauthcred.cr_ngroups, 316 sizeof(uc->cr_groups) / sizeof(uc->cr_groups[0])); 317 memcpy(uc->cr_groups, kauthcred.cr_groups, 318 uc->cr_ngroups * sizeof(uc->cr_groups[0])); 319 320 return (0); 321 } 322 323 /* 324 * Read proc's from memory file into buffer bp, which has space to hold 325 * at most maxcnt procs. 326 */ 327 static int 328 kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p, 329 struct kinfo_proc *bp, int maxcnt) 330 { 331 int cnt = 0; 332 int nlwps; 333 struct kinfo_lwp *kl; 334 struct eproc eproc; 335 struct pgrp pgrp; 336 struct session sess; 337 struct tty tty; 338 struct proc proc; 339 340 for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) { 341 if (KREAD(kd, (u_long)p, &proc)) { 342 _kvm_err(kd, kd->program, "can't read proc at %p", p); 343 return (-1); 344 } 345 if (_kvm_convertcred(kd, (u_long)proc.p_cred, &eproc) != 0) { 346 _kvm_err(kd, kd->program, 347 "can't read proc credentials at %p", p); 348 return (-1); 349 } 350 351 switch (what) { 352 353 case KERN_PROC_PID: 354 if (proc.p_pid != (pid_t)arg) 355 continue; 356 break; 357 358 case KERN_PROC_UID: 359 if (eproc.e_ucred.cr_uid != (uid_t)arg) 360 continue; 361 break; 362 363 case KERN_PROC_RUID: 364 if (eproc.e_pcred.p_ruid != (uid_t)arg) 365 continue; 366 break; 367 } 368 /* 369 * We're going to add another proc to the set. If this 370 * will overflow the buffer, assume the reason is because 371 * nprocs (or the proc list) is corrupt and declare an error. 372 */ 373 if (cnt >= maxcnt) { 374 _kvm_err(kd, kd->program, "nprocs corrupt"); 375 return (-1); 376 } 377 /* 378 * gather eproc 379 */ 380 eproc.e_paddr = p; 381 if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) { 382 _kvm_err(kd, kd->program, "can't read pgrp at %p", 383 proc.p_pgrp); 384 return (-1); 385 } 386 eproc.e_sess = pgrp.pg_session; 387 eproc.e_pgid = pgrp.pg_id; 388 eproc.e_jobc = pgrp.pg_jobc; 389 if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) { 390 _kvm_err(kd, kd->program, "can't read session at %p", 391 pgrp.pg_session); 392 return (-1); 393 } 394 if ((proc.p_lflag & PL_CONTROLT) && sess.s_ttyp != NULL) { 395 if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) { 396 _kvm_err(kd, kd->program, 397 "can't read tty at %p", sess.s_ttyp); 398 return (-1); 399 } 400 eproc.e_tdev = (uint32_t)tty.t_dev; 401 eproc.e_tsess = tty.t_session; 402 if (tty.t_pgrp != NULL) { 403 if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) { 404 _kvm_err(kd, kd->program, 405 "can't read tpgrp at %p", 406 tty.t_pgrp); 407 return (-1); 408 } 409 eproc.e_tpgid = pgrp.pg_id; 410 } else 411 eproc.e_tpgid = -1; 412 } else 413 eproc.e_tdev = (uint32_t)NODEV; 414 eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0; 415 eproc.e_sid = sess.s_sid; 416 if (sess.s_leader == p) 417 eproc.e_flag |= EPROC_SLEADER; 418 /* 419 * Fill in the old-style proc.p_wmesg by copying the wmesg 420 * from the first available LWP. 421 */ 422 kl = kvm_getlwps(kd, proc.p_pid, 423 (u_long)PTRTOUINT64(eproc.e_paddr), 424 sizeof(struct kinfo_lwp), &nlwps); 425 if (kl) { 426 if (nlwps > 0) { 427 strcpy(eproc.e_wmesg, kl[0].l_wmesg); 428 } 429 } 430 (void)kvm_read(kd, (u_long)proc.p_vmspace, &eproc.e_vm, 431 sizeof(eproc.e_vm)); 432 433 eproc.e_xsize = eproc.e_xrssize = 0; 434 eproc.e_xccount = eproc.e_xswrss = 0; 435 436 switch (what) { 437 438 case KERN_PROC_PGRP: 439 if (eproc.e_pgid != (pid_t)arg) 440 continue; 441 break; 442 443 case KERN_PROC_TTY: 444 if ((proc.p_lflag & PL_CONTROLT) == 0 || 445 eproc.e_tdev != (dev_t)arg) 446 continue; 447 break; 448 } 449 memcpy(&bp->kp_proc, &proc, sizeof(proc)); 450 memcpy(&bp->kp_eproc, &eproc, sizeof(eproc)); 451 ++bp; 452 ++cnt; 453 } 454 return (cnt); 455 } 456 457 /* 458 * Build proc info array by reading in proc list from a crash dump. 459 * Return number of procs read. maxcnt is the max we will read. 460 */ 461 static int 462 kvm_deadprocs(kvm_t *kd, int what, int arg, u_long a_allproc, 463 u_long a_zombproc, int maxcnt) 464 { 465 struct kinfo_proc *bp = kd->procbase; 466 int acnt, zcnt; 467 struct proc *p; 468 469 if (KREAD(kd, a_allproc, &p)) { 470 _kvm_err(kd, kd->program, "cannot read allproc"); 471 return (-1); 472 } 473 acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt); 474 if (acnt < 0) 475 return (acnt); 476 477 if (KREAD(kd, a_zombproc, &p)) { 478 _kvm_err(kd, kd->program, "cannot read zombproc"); 479 return (-1); 480 } 481 zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, 482 maxcnt - acnt); 483 if (zcnt < 0) 484 zcnt = 0; 485 486 return (acnt + zcnt); 487 } 488 489 struct kinfo_proc2 * 490 kvm_getproc2(kvm_t *kd, int op, int arg, size_t esize, int *cnt) 491 { 492 size_t size; 493 int mib[6], st, nprocs; 494 struct pstats pstats; 495 496 if (ISSYSCTL(kd)) { 497 size = 0; 498 mib[0] = CTL_KERN; 499 mib[1] = KERN_PROC2; 500 mib[2] = op; 501 mib[3] = arg; 502 mib[4] = (int)esize; 503 again: 504 mib[5] = 0; 505 st = sysctl(mib, 6, NULL, &size, NULL, (size_t)0); 506 if (st == -1) { 507 _kvm_syserr(kd, kd->program, "kvm_getproc2"); 508 return (NULL); 509 } 510 511 mib[5] = (int) (size / esize); 512 KVM_ALLOC(kd, procbase2, size); 513 st = sysctl(mib, 6, kd->procbase2, &size, NULL, (size_t)0); 514 if (st == -1) { 515 if (errno == ENOMEM) { 516 goto again; 517 } 518 _kvm_syserr(kd, kd->program, "kvm_getproc2"); 519 return (NULL); 520 } 521 nprocs = (int) (size / esize); 522 } else { 523 char *kp2c; 524 struct kinfo_proc *kp; 525 struct kinfo_proc2 kp2, *kp2p; 526 struct kinfo_lwp *kl; 527 int i, nlwps; 528 529 kp = kvm_getprocs(kd, op, arg, &nprocs); 530 if (kp == NULL) 531 return (NULL); 532 533 size = nprocs * esize; 534 KVM_ALLOC(kd, procbase2, size); 535 kp2c = (char *)(void *)kd->procbase2; 536 kp2p = &kp2; 537 for (i = 0; i < nprocs; i++, kp++) { 538 struct timeval tv; 539 540 kl = kvm_getlwps(kd, kp->kp_proc.p_pid, 541 (u_long)PTRTOUINT64(kp->kp_eproc.e_paddr), 542 sizeof(struct kinfo_lwp), &nlwps); 543 544 if (kl == NULL) { 545 _kvm_syserr(kd, NULL, 546 "kvm_getlwps() failed on process %u\n", 547 kp->kp_proc.p_pid); 548 if (nlwps == 0) 549 return NULL; 550 else 551 continue; 552 } 553 554 /* We use kl[0] as the "representative" LWP */ 555 memset(kp2p, 0, sizeof(kp2)); 556 kp2p->p_forw = kl[0].l_forw; 557 kp2p->p_back = kl[0].l_back; 558 kp2p->p_paddr = PTRTOUINT64(kp->kp_eproc.e_paddr); 559 kp2p->p_addr = kl[0].l_addr; 560 kp2p->p_fd = PTRTOUINT64(kp->kp_proc.p_fd); 561 kp2p->p_cwdi = PTRTOUINT64(kp->kp_proc.p_cwdi); 562 kp2p->p_stats = PTRTOUINT64(kp->kp_proc.p_stats); 563 kp2p->p_limit = PTRTOUINT64(kp->kp_proc.p_limit); 564 kp2p->p_vmspace = PTRTOUINT64(kp->kp_proc.p_vmspace); 565 kp2p->p_sigacts = PTRTOUINT64(kp->kp_proc.p_sigacts); 566 kp2p->p_sess = PTRTOUINT64(kp->kp_eproc.e_sess); 567 kp2p->p_tsess = 0; 568 #if 1 /* XXX: dsl - p_ru was only ever non-zero for zombies */ 569 kp2p->p_ru = 0; 570 #else 571 kp2p->p_ru = PTRTOUINT64(pstats.p_ru); 572 #endif 573 574 kp2p->p_eflag = 0; 575 kp2p->p_exitsig = kp->kp_proc.p_exitsig; 576 kp2p->p_flag = kp->kp_proc.p_flag; 577 578 kp2p->p_pid = kp->kp_proc.p_pid; 579 580 kp2p->p_ppid = kp->kp_eproc.e_ppid; 581 kp2p->p_sid = kp->kp_eproc.e_sid; 582 kp2p->p__pgid = kp->kp_eproc.e_pgid; 583 584 kp2p->p_tpgid = -1 /* XXX NO_PGID! */; 585 586 kp2p->p_uid = kp->kp_eproc.e_ucred.cr_uid; 587 kp2p->p_ruid = kp->kp_eproc.e_pcred.p_ruid; 588 kp2p->p_svuid = kp->kp_eproc.e_pcred.p_svuid; 589 kp2p->p_gid = kp->kp_eproc.e_ucred.cr_gid; 590 kp2p->p_rgid = kp->kp_eproc.e_pcred.p_rgid; 591 kp2p->p_svgid = kp->kp_eproc.e_pcred.p_svgid; 592 593 /*CONSTCOND*/ 594 memcpy(kp2p->p_groups, kp->kp_eproc.e_ucred.cr_groups, 595 MIN(sizeof(kp2p->p_groups), 596 sizeof(kp->kp_eproc.e_ucred.cr_groups))); 597 kp2p->p_ngroups = kp->kp_eproc.e_ucred.cr_ngroups; 598 599 kp2p->p_jobc = kp->kp_eproc.e_jobc; 600 kp2p->p_tdev = kp->kp_eproc.e_tdev; 601 kp2p->p_tpgid = kp->kp_eproc.e_tpgid; 602 kp2p->p_tsess = PTRTOUINT64(kp->kp_eproc.e_tsess); 603 604 kp2p->p_estcpu = 0; 605 bintime2timeval(&kp->kp_proc.p_rtime, &tv); 606 kp2p->p_rtime_sec = (uint32_t)tv.tv_sec; 607 kp2p->p_rtime_usec = (uint32_t)tv.tv_usec; 608 kp2p->p_cpticks = kl[0].l_cpticks; 609 kp2p->p_pctcpu = kp->kp_proc.p_pctcpu; 610 kp2p->p_swtime = kl[0].l_swtime; 611 kp2p->p_slptime = kl[0].l_slptime; 612 #if 0 /* XXX thorpej */ 613 kp2p->p_schedflags = kp->kp_proc.p_schedflags; 614 #else 615 kp2p->p_schedflags = 0; 616 #endif 617 618 kp2p->p_uticks = kp->kp_proc.p_uticks; 619 kp2p->p_sticks = kp->kp_proc.p_sticks; 620 kp2p->p_iticks = kp->kp_proc.p_iticks; 621 622 kp2p->p_tracep = PTRTOUINT64(kp->kp_proc.p_tracep); 623 kp2p->p_traceflag = kp->kp_proc.p_traceflag; 624 625 kp2p->p_holdcnt = kl[0].l_holdcnt; 626 627 memcpy(&kp2p->p_siglist, 628 &kp->kp_proc.p_sigpend.sp_set, 629 sizeof(ki_sigset_t)); 630 memset(&kp2p->p_sigmask, 0, 631 sizeof(ki_sigset_t)); 632 memcpy(&kp2p->p_sigignore, 633 &kp->kp_proc.p_sigctx.ps_sigignore, 634 sizeof(ki_sigset_t)); 635 memcpy(&kp2p->p_sigcatch, 636 &kp->kp_proc.p_sigctx.ps_sigcatch, 637 sizeof(ki_sigset_t)); 638 639 kp2p->p_stat = kl[0].l_stat; 640 kp2p->p_priority = kl[0].l_priority; 641 kp2p->p_usrpri = kl[0].l_priority; 642 kp2p->p_nice = kp->kp_proc.p_nice; 643 644 kp2p->p_xstat = P_WAITSTATUS(&kp->kp_proc); 645 kp2p->p_acflag = kp->kp_proc.p_acflag; 646 647 /*CONSTCOND*/ 648 strncpy(kp2p->p_comm, kp->kp_proc.p_comm, 649 MIN(sizeof(kp2p->p_comm), 650 sizeof(kp->kp_proc.p_comm))); 651 652 strncpy(kp2p->p_wmesg, kp->kp_eproc.e_wmesg, 653 sizeof(kp2p->p_wmesg)); 654 kp2p->p_wchan = kl[0].l_wchan; 655 strncpy(kp2p->p_login, kp->kp_eproc.e_login, 656 sizeof(kp2p->p_login)); 657 658 kp2p->p_vm_rssize = kp->kp_eproc.e_xrssize; 659 kp2p->p_vm_tsize = kp->kp_eproc.e_vm.vm_tsize; 660 kp2p->p_vm_dsize = kp->kp_eproc.e_vm.vm_dsize; 661 kp2p->p_vm_ssize = kp->kp_eproc.e_vm.vm_ssize; 662 kp2p->p_vm_vsize = kp->kp_eproc.e_vm.vm_map.size 663 / kd->nbpg; 664 /* Adjust mapped size */ 665 kp2p->p_vm_msize = 666 (kp->kp_eproc.e_vm.vm_map.size / kd->nbpg) - 667 kp->kp_eproc.e_vm.vm_issize + 668 kp->kp_eproc.e_vm.vm_ssize; 669 670 kp2p->p_eflag = (int32_t)kp->kp_eproc.e_flag; 671 672 kp2p->p_realflag = kp->kp_proc.p_flag; 673 kp2p->p_nlwps = kp->kp_proc.p_nlwps; 674 kp2p->p_nrlwps = kp->kp_proc.p_nrlwps; 675 kp2p->p_realstat = kp->kp_proc.p_stat; 676 677 if (P_ZOMBIE(&kp->kp_proc) || 678 kp->kp_proc.p_stats == NULL || 679 KREAD(kd, (u_long)kp->kp_proc.p_stats, &pstats)) { 680 kp2p->p_uvalid = 0; 681 } else { 682 kp2p->p_uvalid = 1; 683 684 kp2p->p_ustart_sec = (u_int32_t) 685 pstats.p_start.tv_sec; 686 kp2p->p_ustart_usec = (u_int32_t) 687 pstats.p_start.tv_usec; 688 689 kp2p->p_uutime_sec = (u_int32_t) 690 pstats.p_ru.ru_utime.tv_sec; 691 kp2p->p_uutime_usec = (u_int32_t) 692 pstats.p_ru.ru_utime.tv_usec; 693 kp2p->p_ustime_sec = (u_int32_t) 694 pstats.p_ru.ru_stime.tv_sec; 695 kp2p->p_ustime_usec = (u_int32_t) 696 pstats.p_ru.ru_stime.tv_usec; 697 698 kp2p->p_uru_maxrss = pstats.p_ru.ru_maxrss; 699 kp2p->p_uru_ixrss = pstats.p_ru.ru_ixrss; 700 kp2p->p_uru_idrss = pstats.p_ru.ru_idrss; 701 kp2p->p_uru_isrss = pstats.p_ru.ru_isrss; 702 kp2p->p_uru_minflt = pstats.p_ru.ru_minflt; 703 kp2p->p_uru_majflt = pstats.p_ru.ru_majflt; 704 kp2p->p_uru_nswap = pstats.p_ru.ru_nswap; 705 kp2p->p_uru_inblock = pstats.p_ru.ru_inblock; 706 kp2p->p_uru_oublock = pstats.p_ru.ru_oublock; 707 kp2p->p_uru_msgsnd = pstats.p_ru.ru_msgsnd; 708 kp2p->p_uru_msgrcv = pstats.p_ru.ru_msgrcv; 709 kp2p->p_uru_nsignals = pstats.p_ru.ru_nsignals; 710 kp2p->p_uru_nvcsw = pstats.p_ru.ru_nvcsw; 711 kp2p->p_uru_nivcsw = pstats.p_ru.ru_nivcsw; 712 713 kp2p->p_uctime_sec = (u_int32_t) 714 (pstats.p_cru.ru_utime.tv_sec + 715 pstats.p_cru.ru_stime.tv_sec); 716 kp2p->p_uctime_usec = (u_int32_t) 717 (pstats.p_cru.ru_utime.tv_usec + 718 pstats.p_cru.ru_stime.tv_usec); 719 } 720 721 memcpy(kp2c, &kp2, esize); 722 kp2c += esize; 723 } 724 } 725 *cnt = nprocs; 726 return (kd->procbase2); 727 } 728 729 struct kinfo_lwp * 730 kvm_getlwps(kvm_t *kd, int pid, u_long paddr, size_t esize, int *cnt) 731 { 732 size_t size; 733 int mib[5], nlwps; 734 ssize_t st; 735 struct kinfo_lwp *kl; 736 737 if (ISSYSCTL(kd)) { 738 size = 0; 739 mib[0] = CTL_KERN; 740 mib[1] = KERN_LWP; 741 mib[2] = pid; 742 mib[3] = (int)esize; 743 mib[4] = 0; 744 again: 745 st = sysctl(mib, 5, NULL, &size, NULL, (size_t)0); 746 if (st == -1) { 747 switch (errno) { 748 case ESRCH: /* Treat this as a soft error; see kvm.c */ 749 _kvm_syserr(kd, NULL, "kvm_getlwps"); 750 return NULL; 751 default: 752 _kvm_syserr(kd, kd->program, "kvm_getlwps"); 753 return NULL; 754 } 755 } 756 mib[4] = (int) (size / esize); 757 KVM_ALLOC(kd, lwpbase, size); 758 st = sysctl(mib, 5, kd->lwpbase, &size, NULL, (size_t)0); 759 if (st == -1) { 760 switch (errno) { 761 case ESRCH: /* Treat this as a soft error; see kvm.c */ 762 _kvm_syserr(kd, NULL, "kvm_getlwps"); 763 return NULL; 764 case ENOMEM: 765 goto again; 766 default: 767 _kvm_syserr(kd, kd->program, "kvm_getlwps"); 768 return NULL; 769 } 770 } 771 nlwps = (int) (size / esize); 772 } else { 773 /* grovel through the memory image */ 774 struct proc p; 775 struct lwp l; 776 u_long laddr; 777 void *back; 778 int i; 779 780 st = kvm_read(kd, paddr, &p, sizeof(p)); 781 if (st == -1) { 782 _kvm_syserr(kd, kd->program, "kvm_getlwps"); 783 return (NULL); 784 } 785 786 nlwps = p.p_nlwps; 787 size = nlwps * sizeof(*kd->lwpbase); 788 KVM_ALLOC(kd, lwpbase, size); 789 laddr = (u_long)PTRTOUINT64(p.p_lwps.lh_first); 790 for (i = 0; (i < nlwps) && (laddr != 0); i++) { 791 st = kvm_read(kd, laddr, &l, sizeof(l)); 792 if (st == -1) { 793 _kvm_syserr(kd, kd->program, "kvm_getlwps"); 794 return (NULL); 795 } 796 kl = &kd->lwpbase[i]; 797 kl->l_laddr = laddr; 798 kl->l_forw = PTRTOUINT64(l.l_runq.tqe_next); 799 laddr = (u_long)PTRTOUINT64(l.l_runq.tqe_prev); 800 st = kvm_read(kd, laddr, &back, sizeof(back)); 801 if (st == -1) { 802 _kvm_syserr(kd, kd->program, "kvm_getlwps"); 803 return (NULL); 804 } 805 kl->l_back = PTRTOUINT64(back); 806 kl->l_addr = PTRTOUINT64(l.l_addr); 807 kl->l_lid = l.l_lid; 808 kl->l_flag = l.l_flag; 809 kl->l_swtime = l.l_swtime; 810 kl->l_slptime = l.l_slptime; 811 kl->l_schedflags = 0; /* XXX */ 812 kl->l_holdcnt = 0; 813 kl->l_priority = l.l_priority; 814 kl->l_usrpri = l.l_priority; 815 kl->l_stat = l.l_stat; 816 kl->l_wchan = PTRTOUINT64(l.l_wchan); 817 if (l.l_wmesg) 818 (void)kvm_read(kd, (u_long)l.l_wmesg, 819 kl->l_wmesg, (size_t)WMESGLEN); 820 kl->l_cpuid = KI_NOCPU; 821 laddr = (u_long)PTRTOUINT64(l.l_sibling.le_next); 822 } 823 } 824 825 *cnt = nlwps; 826 return (kd->lwpbase); 827 } 828 829 struct kinfo_proc * 830 kvm_getprocs(kvm_t *kd, int op, int arg, int *cnt) 831 { 832 size_t size; 833 int mib[4], st, nprocs; 834 835 if (ISALIVE(kd)) { 836 size = 0; 837 mib[0] = CTL_KERN; 838 mib[1] = KERN_PROC; 839 mib[2] = op; 840 mib[3] = arg; 841 st = sysctl(mib, 4, NULL, &size, NULL, (size_t)0); 842 if (st == -1) { 843 _kvm_syserr(kd, kd->program, "kvm_getprocs"); 844 return (NULL); 845 } 846 KVM_ALLOC(kd, procbase, size); 847 st = sysctl(mib, 4, kd->procbase, &size, NULL, (size_t)0); 848 if (st == -1) { 849 _kvm_syserr(kd, kd->program, "kvm_getprocs"); 850 return (NULL); 851 } 852 if (size % sizeof(struct kinfo_proc) != 0) { 853 _kvm_err(kd, kd->program, 854 "proc size mismatch (%lu total, %lu chunks)", 855 (u_long)size, (u_long)sizeof(struct kinfo_proc)); 856 return (NULL); 857 } 858 nprocs = (int) (size / sizeof(struct kinfo_proc)); 859 } else { 860 struct nlist nl[4], *p; 861 862 (void)memset(nl, 0, sizeof(nl)); 863 nl[0].n_name = "_nprocs"; 864 nl[1].n_name = "_allproc"; 865 nl[2].n_name = "_zombproc"; 866 nl[3].n_name = NULL; 867 868 if (kvm_nlist(kd, nl) != 0) { 869 for (p = nl; p->n_type != 0; ++p) 870 continue; 871 _kvm_err(kd, kd->program, 872 "%s: no such symbol", p->n_name); 873 return (NULL); 874 } 875 if (KREAD(kd, nl[0].n_value, &nprocs)) { 876 _kvm_err(kd, kd->program, "can't read nprocs"); 877 return (NULL); 878 } 879 size = nprocs * sizeof(*kd->procbase); 880 KVM_ALLOC(kd, procbase, size); 881 nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value, 882 nl[2].n_value, nprocs); 883 if (nprocs < 0) 884 return (NULL); 885 #ifdef notdef 886 size = nprocs * sizeof(struct kinfo_proc); 887 (void)realloc(kd->procbase, size); 888 #endif 889 } 890 *cnt = nprocs; 891 return (kd->procbase); 892 } 893 894 void * 895 _kvm_realloc(kvm_t *kd, void *p, size_t n) 896 { 897 void *np = realloc(p, n); 898 899 if (np == NULL) 900 _kvm_err(kd, kd->program, "out of memory"); 901 return (np); 902 } 903 904 /* 905 * Read in an argument vector from the user address space of process p. 906 * addr if the user-space base address of narg null-terminated contiguous 907 * strings. This is used to read in both the command arguments and 908 * environment strings. Read at most maxcnt characters of strings. 909 */ 910 static char ** 911 kvm_argv(kvm_t *kd, const struct miniproc *p, u_long addr, int narg, 912 int maxcnt) 913 { 914 char *np, *cp, *ep, *ap; 915 u_long oaddr = (u_long)~0L; 916 u_long len; 917 size_t cc; 918 char **argv; 919 920 /* 921 * Check that there aren't an unreasonable number of arguments, 922 * and that the address is in user space. 923 */ 924 if (narg > ARG_MAX || addr < kd->min_uva || addr >= kd->max_uva) 925 return (NULL); 926 927 if (kd->argv == NULL) { 928 /* 929 * Try to avoid reallocs. 930 */ 931 kd->argc = MAX(narg + 1, 32); 932 kd->argv = _kvm_malloc(kd, kd->argc * sizeof(*kd->argv)); 933 if (kd->argv == NULL) 934 return (NULL); 935 } else if (narg + 1 > kd->argc) { 936 kd->argc = MAX(2 * kd->argc, narg + 1); 937 kd->argv = _kvm_realloc(kd, kd->argv, kd->argc * 938 sizeof(*kd->argv)); 939 if (kd->argv == NULL) 940 return (NULL); 941 } 942 if (kd->argspc == NULL) { 943 kd->argspc = _kvm_malloc(kd, (size_t)kd->nbpg); 944 if (kd->argspc == NULL) 945 return (NULL); 946 kd->argspc_len = kd->nbpg; 947 } 948 if (kd->argbuf == NULL) { 949 kd->argbuf = _kvm_malloc(kd, (size_t)kd->nbpg); 950 if (kd->argbuf == NULL) 951 return (NULL); 952 } 953 cc = sizeof(char *) * narg; 954 if (kvm_ureadm(kd, p, addr, (void *)kd->argv, cc) != cc) 955 return (NULL); 956 ap = np = kd->argspc; 957 argv = kd->argv; 958 len = 0; 959 /* 960 * Loop over pages, filling in the argument vector. 961 */ 962 while (argv < kd->argv + narg && *argv != NULL) { 963 addr = (u_long)*argv & ~(kd->nbpg - 1); 964 if (addr != oaddr) { 965 if (kvm_ureadm(kd, p, addr, kd->argbuf, 966 (size_t)kd->nbpg) != kd->nbpg) 967 return (NULL); 968 oaddr = addr; 969 } 970 addr = (u_long)*argv & (kd->nbpg - 1); 971 cp = kd->argbuf + (size_t)addr; 972 cc = kd->nbpg - (size_t)addr; 973 if (maxcnt > 0 && cc > (size_t)(maxcnt - len)) 974 cc = (size_t)(maxcnt - len); 975 ep = memchr(cp, '\0', cc); 976 if (ep != NULL) 977 cc = ep - cp + 1; 978 if (len + cc > kd->argspc_len) { 979 ptrdiff_t off; 980 char **pp; 981 char *op = kd->argspc; 982 983 kd->argspc_len *= 2; 984 kd->argspc = _kvm_realloc(kd, kd->argspc, 985 kd->argspc_len); 986 if (kd->argspc == NULL) 987 return (NULL); 988 /* 989 * Adjust argv pointers in case realloc moved 990 * the string space. 991 */ 992 off = kd->argspc - op; 993 for (pp = kd->argv; pp < argv; pp++) 994 *pp += off; 995 ap += off; 996 np += off; 997 } 998 memcpy(np, cp, cc); 999 np += cc; 1000 len += cc; 1001 if (ep != NULL) { 1002 *argv++ = ap; 1003 ap = np; 1004 } else 1005 *argv += cc; 1006 if (maxcnt > 0 && len >= maxcnt) { 1007 /* 1008 * We're stopping prematurely. Terminate the 1009 * current string. 1010 */ 1011 if (ep == NULL) { 1012 *np = '\0'; 1013 *argv++ = ap; 1014 } 1015 break; 1016 } 1017 } 1018 /* Make sure argv is terminated. */ 1019 *argv = NULL; 1020 return (kd->argv); 1021 } 1022 1023 static void 1024 ps_str_a(struct ps_strings *p, u_long *addr, int *n) 1025 { 1026 1027 *addr = (u_long)p->ps_argvstr; 1028 *n = p->ps_nargvstr; 1029 } 1030 1031 static void 1032 ps_str_e(struct ps_strings *p, u_long *addr, int *n) 1033 { 1034 1035 *addr = (u_long)p->ps_envstr; 1036 *n = p->ps_nenvstr; 1037 } 1038 1039 /* 1040 * Determine if the proc indicated by p is still active. 1041 * This test is not 100% foolproof in theory, but chances of 1042 * being wrong are very low. 1043 */ 1044 static int 1045 proc_verify(kvm_t *kd, u_long kernp, const struct miniproc *p) 1046 { 1047 struct proc kernproc; 1048 1049 /* 1050 * Just read in the whole proc. It's not that big relative 1051 * to the cost of the read system call. 1052 */ 1053 if (kvm_read(kd, kernp, &kernproc, sizeof(kernproc)) != 1054 sizeof(kernproc)) 1055 return (0); 1056 return (p->p_pid == kernproc.p_pid && 1057 (kernproc.p_stat != SZOMB || p->p_stat == SZOMB)); 1058 } 1059 1060 static char ** 1061 kvm_doargv(kvm_t *kd, const struct miniproc *p, int nchr, 1062 void (*info)(struct ps_strings *, u_long *, int *)) 1063 { 1064 char **ap; 1065 u_long addr; 1066 int cnt; 1067 struct ps_strings arginfo; 1068 1069 /* 1070 * Pointers are stored at the top of the user stack. 1071 */ 1072 if (p->p_stat == SZOMB) 1073 return (NULL); 1074 cnt = (int)kvm_ureadm(kd, p, kd->usrstack - sizeof(arginfo), 1075 (void *)&arginfo, sizeof(arginfo)); 1076 if (cnt != sizeof(arginfo)) 1077 return (NULL); 1078 1079 (*info)(&arginfo, &addr, &cnt); 1080 if (cnt == 0) 1081 return (NULL); 1082 ap = kvm_argv(kd, p, addr, cnt, nchr); 1083 /* 1084 * For live kernels, make sure this process didn't go away. 1085 */ 1086 if (ap != NULL && ISALIVE(kd) && 1087 !proc_verify(kd, (u_long)p->p_paddr, p)) 1088 ap = NULL; 1089 return (ap); 1090 } 1091 1092 /* 1093 * Get the command args. This code is now machine independent. 1094 */ 1095 char ** 1096 kvm_getargv(kvm_t *kd, const struct kinfo_proc *kp, int nchr) 1097 { 1098 struct miniproc p; 1099 1100 KPTOMINI(kp, &p); 1101 return (kvm_doargv(kd, &p, nchr, ps_str_a)); 1102 } 1103 1104 char ** 1105 kvm_getenvv(kvm_t *kd, const struct kinfo_proc *kp, int nchr) 1106 { 1107 struct miniproc p; 1108 1109 KPTOMINI(kp, &p); 1110 return (kvm_doargv(kd, &p, nchr, ps_str_e)); 1111 } 1112 1113 static char ** 1114 kvm_doargv2(kvm_t *kd, pid_t pid, int type, int nchr) 1115 { 1116 size_t bufs; 1117 int narg, mib[4]; 1118 size_t newargspc_len; 1119 char **ap, *bp, *endp; 1120 1121 /* 1122 * Check that there aren't an unreasonable number of arguments. 1123 */ 1124 if (nchr > ARG_MAX) 1125 return (NULL); 1126 1127 if (nchr == 0) 1128 nchr = ARG_MAX; 1129 1130 /* Get number of strings in argv */ 1131 mib[0] = CTL_KERN; 1132 mib[1] = KERN_PROC_ARGS; 1133 mib[2] = pid; 1134 mib[3] = type == KERN_PROC_ARGV ? KERN_PROC_NARGV : KERN_PROC_NENV; 1135 bufs = sizeof(narg); 1136 if (sysctl(mib, 4, &narg, &bufs, NULL, (size_t)0) == -1) 1137 return (NULL); 1138 1139 if (kd->argv == NULL) { 1140 /* 1141 * Try to avoid reallocs. 1142 */ 1143 kd->argc = MAX(narg + 1, 32); 1144 kd->argv = _kvm_malloc(kd, kd->argc * sizeof(*kd->argv)); 1145 if (kd->argv == NULL) 1146 return (NULL); 1147 } else if (narg + 1 > kd->argc) { 1148 kd->argc = MAX(2 * kd->argc, narg + 1); 1149 kd->argv = _kvm_realloc(kd, kd->argv, kd->argc * 1150 sizeof(*kd->argv)); 1151 if (kd->argv == NULL) 1152 return (NULL); 1153 } 1154 1155 newargspc_len = MIN(nchr, ARG_MAX); 1156 KVM_ALLOC(kd, argspc, newargspc_len); 1157 memset(kd->argspc, 0, (size_t)kd->argspc_len); /* XXX necessary? */ 1158 1159 mib[0] = CTL_KERN; 1160 mib[1] = KERN_PROC_ARGS; 1161 mib[2] = pid; 1162 mib[3] = type; 1163 bufs = kd->argspc_len; 1164 if (sysctl(mib, 4, kd->argspc, &bufs, NULL, (size_t)0) == -1) 1165 return (NULL); 1166 1167 bp = kd->argspc; 1168 bp[kd->argspc_len-1] = '\0'; /* make sure the string ends with nul */ 1169 ap = kd->argv; 1170 endp = bp + MIN(nchr, bufs); 1171 1172 while (bp < endp) { 1173 *ap++ = bp; 1174 /* 1175 * XXX: don't need following anymore, or stick check 1176 * for max argc in above while loop? 1177 */ 1178 if (ap >= kd->argv + kd->argc) { 1179 kd->argc *= 2; 1180 kd->argv = _kvm_realloc(kd, kd->argv, 1181 kd->argc * sizeof(*kd->argv)); 1182 ap = kd->argv; 1183 } 1184 bp += strlen(bp) + 1; 1185 } 1186 *ap = NULL; 1187 1188 return (kd->argv); 1189 } 1190 1191 char ** 1192 kvm_getargv2(kvm_t *kd, const struct kinfo_proc2 *kp, int nchr) 1193 { 1194 1195 return (kvm_doargv2(kd, kp->p_pid, KERN_PROC_ARGV, nchr)); 1196 } 1197 1198 char ** 1199 kvm_getenvv2(kvm_t *kd, const struct kinfo_proc2 *kp, int nchr) 1200 { 1201 1202 return (kvm_doargv2(kd, kp->p_pid, KERN_PROC_ENV, nchr)); 1203 } 1204 1205 /* 1206 * Read from user space. The user context is given by p. 1207 */ 1208 static ssize_t 1209 kvm_ureadm(kvm_t *kd, const struct miniproc *p, u_long uva, 1210 char *buf, size_t len) 1211 { 1212 char *cp; 1213 1214 cp = buf; 1215 while (len > 0) { 1216 size_t cc; 1217 char *dp; 1218 u_long cnt; 1219 1220 dp = _kvm_ureadm(kd, p, uva, &cnt); 1221 if (dp == NULL) { 1222 _kvm_err(kd, 0, "invalid address (%lx)", uva); 1223 return (0); 1224 } 1225 cc = (size_t)MIN(cnt, len); 1226 memcpy(cp, dp, cc); 1227 cp += cc; 1228 uva += cc; 1229 len -= cc; 1230 } 1231 return (ssize_t)(cp - buf); 1232 } 1233 1234 ssize_t 1235 kvm_uread(kvm_t *kd, const struct proc *p, u_long uva, char *buf, size_t len) 1236 { 1237 struct miniproc mp; 1238 1239 PTOMINI(p, &mp); 1240 return (kvm_ureadm(kd, &mp, uva, buf, len)); 1241 } 1242