1*65522Spendry /* 2*65522Spendry * Copyright (c) 1993 The Regents of the University of California. 3*65522Spendry * Copyright (c) 1993 Jan-Simon Pendry 4*65522Spendry * All rights reserved. 5*65522Spendry * 6*65522Spendry * This code is derived from software contributed to Berkeley by 7*65522Spendry * Jan-Simon Pendry. 8*65522Spendry * 9*65522Spendry * %sccs.include.redist.c% 10*65522Spendry * 11*65522Spendry * @(#)procfs_mem.c 8.1 (Berkeley) 01/05/94 12*65522Spendry * 13*65522Spendry * From: 14*65522Spendry * $Id: procfs_mem.c,v 3.2 1993/12/15 09:40:17 jsp Exp $ 15*65522Spendry */ 16*65522Spendry 17*65522Spendry /* 18*65522Spendry * This is a lightly hacked and merged version 19*65522Spendry * of sef's pread/pwrite functions 20*65522Spendry */ 21*65522Spendry 22*65522Spendry #include <sys/param.h> 23*65522Spendry #include <sys/systm.h> 24*65522Spendry #include <sys/time.h> 25*65522Spendry #include <sys/kernel.h> 26*65522Spendry #include <sys/proc.h> 27*65522Spendry #include <sys/vnode.h> 28*65522Spendry #include <miscfs/procfs/procfs.h> 29*65522Spendry #include <vm/vm.h> 30*65522Spendry #include <vm/vm_kern.h> 31*65522Spendry #include <vm/vm_page.h> 32*65522Spendry 33*65522Spendry static int 34*65522Spendry procfs_rwmem(p, uio) 35*65522Spendry struct proc *p; 36*65522Spendry struct uio *uio; 37*65522Spendry { 38*65522Spendry int error; 39*65522Spendry int writing; 40*65522Spendry 41*65522Spendry writing = uio->uio_rw == UIO_WRITE; 42*65522Spendry 43*65522Spendry /* 44*65522Spendry * Only map in one page at a time. We don't have to, but it 45*65522Spendry * makes things easier. This way is trivial - right? 46*65522Spendry */ 47*65522Spendry do { 48*65522Spendry vm_map_t map, tmap; 49*65522Spendry vm_object_t object; 50*65522Spendry vm_offset_t kva; 51*65522Spendry vm_offset_t uva; 52*65522Spendry int page_offset; /* offset into page */ 53*65522Spendry vm_offset_t pageno; /* page number */ 54*65522Spendry vm_map_entry_t out_entry; 55*65522Spendry vm_prot_t out_prot; 56*65522Spendry vm_page_t m; 57*65522Spendry boolean_t wired, single_use; 58*65522Spendry vm_offset_t off; 59*65522Spendry u_int len; 60*65522Spendry int fix_prot; 61*65522Spendry 62*65522Spendry uva = (vm_offset_t) uio->uio_offset; 63*65522Spendry if (uva > VM_MAXUSER_ADDRESS) { 64*65522Spendry error = 0; 65*65522Spendry break; 66*65522Spendry } 67*65522Spendry 68*65522Spendry /* 69*65522Spendry * Get the page number of this segment. 70*65522Spendry */ 71*65522Spendry pageno = trunc_page(uva); 72*65522Spendry page_offset = uva - pageno; 73*65522Spendry 74*65522Spendry /* 75*65522Spendry * How many bytes to copy 76*65522Spendry */ 77*65522Spendry len = min(PAGE_SIZE - page_offset, uio->uio_resid); 78*65522Spendry 79*65522Spendry /* 80*65522Spendry * The map we want... 81*65522Spendry */ 82*65522Spendry map = &p->p_vmspace->vm_map; 83*65522Spendry 84*65522Spendry /* 85*65522Spendry * Check the permissions for the area we're interested 86*65522Spendry * in. 87*65522Spendry */ 88*65522Spendry fix_prot = 0; 89*65522Spendry if (writing) 90*65522Spendry fix_prot = !vm_map_check_protection(map, pageno, 91*65522Spendry pageno + PAGE_SIZE, VM_PROT_WRITE); 92*65522Spendry 93*65522Spendry if (fix_prot) { 94*65522Spendry /* 95*65522Spendry * If the page is not writable, we make it so. 96*65522Spendry * XXX It is possible that a page may *not* be 97*65522Spendry * read/executable, if a process changes that! 98*65522Spendry * We will assume, for now, that a page is either 99*65522Spendry * VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE. 100*65522Spendry */ 101*65522Spendry error = vm_map_protect(map, pageno, 102*65522Spendry pageno + PAGE_SIZE, VM_PROT_ALL, 0); 103*65522Spendry if (error) 104*65522Spendry break; 105*65522Spendry } 106*65522Spendry 107*65522Spendry /* 108*65522Spendry * Now we need to get the page. out_entry, out_prot, wired, 109*65522Spendry * and single_use aren't used. One would think the vm code 110*65522Spendry * would be a *bit* nicer... We use tmap because 111*65522Spendry * vm_map_lookup() can change the map argument. 112*65522Spendry */ 113*65522Spendry tmap = map; 114*65522Spendry error = vm_map_lookup(&tmap, pageno, 115*65522Spendry writing ? VM_PROT_WRITE : VM_PROT_READ, 116*65522Spendry &out_entry, &object, &off, &out_prot, 117*65522Spendry &wired, &single_use); 118*65522Spendry /* 119*65522Spendry * We're done with tmap now. 120*65522Spendry */ 121*65522Spendry if (!error) 122*65522Spendry vm_map_lookup_done(tmap, out_entry); 123*65522Spendry 124*65522Spendry /* 125*65522Spendry * Fault the page in... 126*65522Spendry */ 127*65522Spendry if (!error && writing && object->shadow) { 128*65522Spendry m = vm_page_lookup(object, off); 129*65522Spendry if (m == 0 || (m->flags & PG_COPYONWRITE)) 130*65522Spendry error = vm_fault(map, pageno, 131*65522Spendry VM_PROT_WRITE, FALSE); 132*65522Spendry } 133*65522Spendry 134*65522Spendry /* Find space in kernel_map for the page we're interested in */ 135*65522Spendry if (!error) 136*65522Spendry error = vm_map_find(kernel_map, object, off, &kva, 137*65522Spendry PAGE_SIZE, 1); 138*65522Spendry 139*65522Spendry if (!error) { 140*65522Spendry /* 141*65522Spendry * Neither vm_map_lookup() nor vm_map_find() appear 142*65522Spendry * to add a reference count to the object, so we do 143*65522Spendry * that here and now. 144*65522Spendry */ 145*65522Spendry vm_object_reference(object); 146*65522Spendry 147*65522Spendry /* 148*65522Spendry * Mark the page we just found as pageable. 149*65522Spendry */ 150*65522Spendry error = vm_map_pageable(kernel_map, kva, 151*65522Spendry kva + PAGE_SIZE, 0); 152*65522Spendry 153*65522Spendry /* 154*65522Spendry * Now do the i/o move. 155*65522Spendry */ 156*65522Spendry if (!error) 157*65522Spendry error = uiomove(kva + page_offset, len, uio); 158*65522Spendry 159*65522Spendry vm_map_remove(kernel_map, kva, kva + PAGE_SIZE); 160*65522Spendry } 161*65522Spendry if (fix_prot) 162*65522Spendry vm_map_protect(map, pageno, pageno + PAGE_SIZE, 163*65522Spendry VM_PROT_READ|VM_PROT_EXECUTE, 0); 164*65522Spendry } while (error == 0 && uio->uio_resid > 0); 165*65522Spendry 166*65522Spendry return (error); 167*65522Spendry } 168*65522Spendry 169*65522Spendry /* 170*65522Spendry * Copy data in and out of the target process. 171*65522Spendry * We do this by mapping the process's page into 172*65522Spendry * the kernel and then doing a uiomove direct 173*65522Spendry * from the kernel address space. 174*65522Spendry */ 175*65522Spendry int 176*65522Spendry procfs_domem(curp, p, pfs, uio) 177*65522Spendry struct proc *curp; 178*65522Spendry struct proc *p; 179*65522Spendry struct pfsnode *pfs; 180*65522Spendry struct uio *uio; 181*65522Spendry { 182*65522Spendry int error; 183*65522Spendry 184*65522Spendry if (uio->uio_resid == 0) 185*65522Spendry return (0); 186*65522Spendry 187*65522Spendry error = procfs_rwmem(p, uio); 188*65522Spendry 189*65522Spendry return (error); 190*65522Spendry } 191*65522Spendry 192*65522Spendry /* 193*65522Spendry * Given process (p), find the vnode from which 194*65522Spendry * it's text segment is being executed. 195*65522Spendry * 196*65522Spendry * It would be nice to grab this information from 197*65522Spendry * the VM system, however, there is no sure-fire 198*65522Spendry * way of doing that. Instead, fork(), exec() and 199*65522Spendry * wait() all maintain the p_textvp field in the 200*65522Spendry * process proc structure which contains a held 201*65522Spendry * reference to the exec'ed vnode. 202*65522Spendry */ 203*65522Spendry struct vnode * 204*65522Spendry procfs_findtextvp(p) 205*65522Spendry struct proc *p; 206*65522Spendry { 207*65522Spendry return (p->p_textvp); 208*65522Spendry } 209*65522Spendry 210*65522Spendry 211*65522Spendry #ifdef probably_never 212*65522Spendry /* 213*65522Spendry * Given process (p), find the vnode from which 214*65522Spendry * it's text segment is being mapped. 215*65522Spendry * 216*65522Spendry * (This is here, rather than in procfs_subr in order 217*65522Spendry * to keep all the VM related code in one place.) 218*65522Spendry */ 219*65522Spendry struct vnode * 220*65522Spendry procfs_findtextvp(p) 221*65522Spendry struct proc *p; 222*65522Spendry { 223*65522Spendry int error; 224*65522Spendry vm_object_t object; 225*65522Spendry vm_offset_t pageno; /* page number */ 226*65522Spendry 227*65522Spendry /* find a vnode pager for the user address space */ 228*65522Spendry 229*65522Spendry for (pageno = VM_MIN_ADDRESS; 230*65522Spendry pageno < VM_MAXUSER_ADDRESS; 231*65522Spendry pageno += PAGE_SIZE) { 232*65522Spendry vm_map_t map; 233*65522Spendry vm_map_entry_t out_entry; 234*65522Spendry vm_prot_t out_prot; 235*65522Spendry boolean_t wired, single_use; 236*65522Spendry vm_offset_t off; 237*65522Spendry 238*65522Spendry map = &p->p_vmspace->vm_map; 239*65522Spendry error = vm_map_lookup(&map, pageno, 240*65522Spendry VM_PROT_READ, 241*65522Spendry &out_entry, &object, &off, &out_prot, 242*65522Spendry &wired, &single_use); 243*65522Spendry 244*65522Spendry if (!error) { 245*65522Spendry vm_pager_t pager; 246*65522Spendry 247*65522Spendry printf("procfs: found vm object\n"); 248*65522Spendry vm_map_lookup_done(map, out_entry); 249*65522Spendry printf("procfs: vm object = %x\n", object); 250*65522Spendry 251*65522Spendry /* 252*65522Spendry * At this point, assuming no errors, object 253*65522Spendry * is the VM object mapping UVA (pageno). 254*65522Spendry * Ensure it has a vnode pager, then grab 255*65522Spendry * the vnode from that pager's handle. 256*65522Spendry */ 257*65522Spendry 258*65522Spendry pager = object->pager; 259*65522Spendry printf("procfs: pager = %x\n", pager); 260*65522Spendry if (pager) 261*65522Spendry printf("procfs: found pager, type = %d\n", pager->pg_type); 262*65522Spendry if (pager && pager->pg_type == PG_VNODE) { 263*65522Spendry struct vnode *vp; 264*65522Spendry 265*65522Spendry vp = (struct vnode *) pager->pg_handle; 266*65522Spendry printf("procfs: vp = 0x%x\n", vp); 267*65522Spendry return (vp); 268*65522Spendry } 269*65522Spendry } 270*65522Spendry } 271*65522Spendry 272*65522Spendry printf("procfs: text object not found\n"); 273*65522Spendry return (0); 274*65522Spendry } 275*65522Spendry #endif /* probably_never */ 276