1 /* $NetBSD: procfs_mem.c,v 1.18 1998/02/10 14:10:35 mrg Exp $ */ 2 3 /* 4 * Copyright (c) 1993 Jan-Simon Pendry 5 * Copyright (c) 1993 Sean Eric Fagan 6 * Copyright (c) 1993 7 * The Regents of the University of California. All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * Jan-Simon Pendry and Sean Eric Fagan. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)procfs_mem.c 8.5 (Berkeley) 6/15/94 41 */ 42 43 /* 44 * This is a lightly hacked and merged version 45 * of sef's pread/pwrite functions 46 */ 47 48 #include "opt_uvm.h" 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/time.h> 53 #include <sys/kernel.h> 54 #include <sys/proc.h> 55 #include <sys/vnode.h> 56 #include <miscfs/procfs/procfs.h> 57 #include <vm/vm.h> 58 #include <vm/vm_kern.h> 59 #include <vm/vm_page.h> 60 61 #if defined(UVM) 62 #include <uvm/uvm_extern.h> 63 #endif 64 65 #define ISSET(t, f) ((t) & (f)) 66 67 #if !defined(UVM) 68 static int procfs_rwmem __P((struct proc *, struct uio *)); 69 70 static int 71 procfs_rwmem(p, uio) 72 struct proc *p; 73 struct uio *uio; 74 { 75 int error; 76 int writing; 77 78 writing = uio->uio_rw == UIO_WRITE; 79 80 /* 81 * Only map in one page at a time. We don't have to, but it 82 * makes things easier. This way is trivial - right? 83 */ 84 do { 85 vm_map_t map, tmap; 86 vm_object_t object; 87 vm_offset_t kva; 88 vm_offset_t uva; 89 int page_offset; /* offset into page */ 90 vm_offset_t pageno; /* page number */ 91 vm_map_entry_t out_entry; 92 vm_prot_t out_prot; 93 vm_page_t m; 94 boolean_t wired, single_use; 95 vm_offset_t off; 96 u_int len; 97 int fix_prot; 98 99 uva = (vm_offset_t) uio->uio_offset; 100 if (uva > VM_MAXUSER_ADDRESS) { 101 error = 0; 102 break; 103 } 104 105 /* 106 * Get the page number of this segment. 107 */ 108 pageno = trunc_page(uva); 109 page_offset = uva - pageno; 110 111 /* 112 * How many bytes to copy 113 */ 114 len = min(PAGE_SIZE - page_offset, uio->uio_resid); 115 116 /* 117 * The map we want... 118 */ 119 map = &p->p_vmspace->vm_map; 120 121 /* 122 * Check the permissions for the area we're interested 123 * in. 124 */ 125 fix_prot = 0; 126 if (writing) 127 fix_prot = !vm_map_check_protection(map, pageno, 128 pageno + PAGE_SIZE, VM_PROT_WRITE); 129 130 if (fix_prot) { 131 /* 132 * If the page is not writable, we make it so. 133 * XXX It is possible that a page may *not* be 134 * read/executable, if a process changes that! 135 * We will assume, for now, that a page is either 136 * VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE. 137 */ 138 error = vm_map_protect(map, pageno, 139 pageno + PAGE_SIZE, VM_PROT_ALL, 0); 140 if (error) 141 break; 142 } 143 144 /* 145 * Now we need to get the page. out_entry, out_prot, wired, 146 * and single_use aren't used. One would think the vm code 147 * would be a *bit* nicer... We use tmap because 148 * vm_map_lookup() can change the map argument. 149 */ 150 tmap = map; 151 error = vm_map_lookup(&tmap, pageno, 152 writing ? VM_PROT_WRITE : VM_PROT_READ, 153 &out_entry, &object, &off, &out_prot, 154 &wired, &single_use); 155 /* 156 * We're done with tmap now. 157 */ 158 if (!error) 159 vm_map_lookup_done(tmap, out_entry); 160 161 /* 162 * Fault the page in... 163 */ 164 if (!error && writing && object->shadow) { 165 m = vm_page_lookup(object, off); 166 if (m == 0 || (m->flags & PG_COPYONWRITE)) 167 error = vm_fault(map, pageno, 168 VM_PROT_WRITE, FALSE); 169 } 170 171 /* Find space in kernel_map for the page we're interested in */ 172 if (!error) { 173 kva = VM_MIN_KERNEL_ADDRESS; 174 error = vm_map_find(kernel_map, object, off, &kva, 175 PAGE_SIZE, 1); 176 } 177 178 if (!error) { 179 /* 180 * Neither vm_map_lookup() nor vm_map_find() appear 181 * to add a reference count to the object, so we do 182 * that here and now. 183 */ 184 vm_object_reference(object); 185 186 /* 187 * Mark the page we just found as pageable. 188 */ 189 error = vm_map_pageable(kernel_map, kva, 190 kva + PAGE_SIZE, 0); 191 192 /* 193 * Now do the i/o move. 194 */ 195 if (!error) 196 error = uiomove((caddr_t) (kva + page_offset), 197 len, uio); 198 199 vm_map_remove(kernel_map, kva, kva + PAGE_SIZE); 200 } 201 if (fix_prot) 202 vm_map_protect(map, pageno, pageno + PAGE_SIZE, 203 VM_PROT_READ|VM_PROT_EXECUTE, 0); 204 } while (error == 0 && uio->uio_resid > 0); 205 206 return (error); 207 } 208 #endif 209 210 /* 211 * Copy data in and out of the target process. 212 * We do this by mapping the process's page into 213 * the kernel and then doing a uiomove direct 214 * from the kernel address space. 215 */ 216 int 217 procfs_domem(curp, p, pfs, uio) 218 struct proc *curp; /* tracer */ 219 struct proc *p; /* traced */ 220 struct pfsnode *pfs; 221 struct uio *uio; 222 { 223 int error; 224 225 if (uio->uio_resid == 0) 226 return (0); 227 228 if ((error = procfs_checkioperm(curp, p)) != 0) 229 return (error); 230 231 #if defined(UVM) 232 /* XXXCDC: how should locking work here? */ 233 if ((p->p_flag & P_WEXIT) || (p->p_vmspace->vm_refcnt < 1)) 234 return(EFAULT); 235 PHOLD(p); 236 p->p_vmspace->vm_refcnt++; /* XXX */ 237 error = uvm_io(&p->p_vmspace->vm_map, uio); 238 PRELE(p); 239 uvmspace_free(p->p_vmspace); 240 #else 241 PHOLD(p); 242 error = procfs_rwmem(p, uio); 243 PRELE(p); 244 #endif 245 return (error); 246 } 247 248 /* 249 * Given process (p), find the vnode from which 250 * it's text segment is being executed. 251 * 252 * It would be nice to grab this information from 253 * the VM system, however, there is no sure-fire 254 * way of doing that. Instead, fork(), exec() and 255 * wait() all maintain the p_textvp field in the 256 * process proc structure which contains a held 257 * reference to the exec'ed vnode. 258 */ 259 struct vnode * 260 procfs_findtextvp(p) 261 struct proc *p; 262 { 263 264 return (p->p_textvp); 265 } 266 267 /* 268 * Ensure that a process has permission to perform I/O on another. 269 * Arguments: 270 * p The process wishing to do the I/O (the tracer). 271 * t The process who's memory/registers will be read/written. 272 */ 273 int 274 procfs_checkioperm(p, t) 275 struct proc *p, *t; 276 { 277 int error; 278 279 /* 280 * You cannot attach to a processes mem/regs if: 281 * 282 * (1) it's not owned by you, or is set-id on exec 283 * (unless you're root), or... 284 */ 285 if ((t->p_cred->p_ruid != p->p_cred->p_ruid || 286 ISSET(t->p_flag, P_SUGID)) && 287 (error = suser(p->p_ucred, &p->p_acflag)) != 0) 288 return (error); 289 290 /* 291 * (2) ...it's init, which controls the security level 292 * of the entire system, and the system was not 293 * compiled with permanetly insecure mode turned on. 294 */ 295 if (t == initproc && securelevel > -1) 296 return (EPERM); 297 298 return (0); 299 } 300 301 #ifdef probably_never 302 /* 303 * Given process (p), find the vnode from which 304 * it's text segment is being mapped. 305 * 306 * (This is here, rather than in procfs_subr in order 307 * to keep all the VM related code in one place.) 308 */ 309 struct vnode * 310 procfs_findtextvp(p) 311 struct proc *p; 312 { 313 int error; 314 vm_object_t object; 315 vm_offset_t pageno; /* page number */ 316 317 /* find a vnode pager for the user address space */ 318 319 for (pageno = VM_MIN_ADDRESS; 320 pageno < VM_MAXUSER_ADDRESS; 321 pageno += PAGE_SIZE) { 322 vm_map_t map; 323 vm_map_entry_t out_entry; 324 vm_prot_t out_prot; 325 boolean_t wired, single_use; 326 vm_offset_t off; 327 328 map = &p->p_vmspace->vm_map; 329 error = vm_map_lookup(&map, pageno, 330 VM_PROT_READ, 331 &out_entry, &object, &off, &out_prot, 332 &wired, &single_use); 333 334 if (!error) { 335 vm_pager_t pager; 336 337 printf("procfs: found vm object\n"); 338 vm_map_lookup_done(map, out_entry); 339 printf("procfs: vm object = %p\n", object); 340 341 /* 342 * At this point, assuming no errors, object 343 * is the VM object mapping UVA (pageno). 344 * Ensure it has a vnode pager, then grab 345 * the vnode from that pager's handle. 346 */ 347 348 pager = object->pager; 349 printf("procfs: pager = %p\n", pager); 350 if (pager) 351 printf("procfs: found pager, type = %d\n", 352 pager->pg_type); 353 if (pager && pager->pg_type == PG_VNODE) { 354 struct vnode *vp; 355 356 vp = (struct vnode *) pager->pg_handle; 357 printf("procfs: vp = %p\n", vp); 358 return (vp); 359 } 360 } 361 } 362 363 printf("procfs: text object not found\n"); 364 return (0); 365 } 366 #endif /* probably_never */ 367