165522Spendry /*
265522Spendry * Copyright (c) 1993 Jan-Simon Pendry
365790Sbostic * Copyright (c) 1993 Sean Eric Fagan
465808Sbostic * Copyright (c) 1993
565808Sbostic * The Regents of the University of California. All rights reserved.
665522Spendry *
765522Spendry * This code is derived from software contributed to Berkeley by
865756Spendry * Jan-Simon Pendry and Sean Eric Fagan.
965522Spendry *
1065522Spendry * %sccs.include.redist.c%
1165522Spendry *
12*67389Spendry * @(#)procfs_mem.c 8.5 (Berkeley) 06/15/94
1365522Spendry *
1465522Spendry * From:
1565522Spendry * $Id: procfs_mem.c,v 3.2 1993/12/15 09:40:17 jsp Exp $
1665522Spendry */
1765522Spendry
1865522Spendry /*
1965522Spendry * This is a lightly hacked and merged version
2065522Spendry * of sef's pread/pwrite functions
2165522Spendry */
2265522Spendry
2365522Spendry #include <sys/param.h>
2465522Spendry #include <sys/systm.h>
2565522Spendry #include <sys/time.h>
2665522Spendry #include <sys/kernel.h>
2765522Spendry #include <sys/proc.h>
2865522Spendry #include <sys/vnode.h>
2965522Spendry #include <miscfs/procfs/procfs.h>
3065522Spendry #include <vm/vm.h>
3165522Spendry #include <vm/vm_kern.h>
3265522Spendry #include <vm/vm_page.h>
3365522Spendry
3465522Spendry static int
procfs_rwmem(p,uio)3565522Spendry procfs_rwmem(p, uio)
3665522Spendry struct proc *p;
3765522Spendry struct uio *uio;
3865522Spendry {
3965522Spendry int error;
4065522Spendry int writing;
4165522Spendry
4265522Spendry writing = uio->uio_rw == UIO_WRITE;
4365522Spendry
4465522Spendry /*
4565522Spendry * Only map in one page at a time. We don't have to, but it
4665522Spendry * makes things easier. This way is trivial - right?
4765522Spendry */
4865522Spendry do {
4965522Spendry vm_map_t map, tmap;
5065522Spendry vm_object_t object;
5165522Spendry vm_offset_t kva;
5265522Spendry vm_offset_t uva;
5365522Spendry int page_offset; /* offset into page */
5465522Spendry vm_offset_t pageno; /* page number */
5565522Spendry vm_map_entry_t out_entry;
5665522Spendry vm_prot_t out_prot;
5765522Spendry vm_page_t m;
5865522Spendry boolean_t wired, single_use;
5965522Spendry vm_offset_t off;
6065522Spendry u_int len;
6165522Spendry int fix_prot;
6265522Spendry
6365522Spendry uva = (vm_offset_t) uio->uio_offset;
6465522Spendry if (uva > VM_MAXUSER_ADDRESS) {
6565522Spendry error = 0;
6665522Spendry break;
6765522Spendry }
6865522Spendry
6965522Spendry /*
7065522Spendry * Get the page number of this segment.
7165522Spendry */
7265522Spendry pageno = trunc_page(uva);
7365522Spendry page_offset = uva - pageno;
7465522Spendry
7565522Spendry /*
7665522Spendry * How many bytes to copy
7765522Spendry */
7865522Spendry len = min(PAGE_SIZE - page_offset, uio->uio_resid);
7965522Spendry
8065522Spendry /*
8165522Spendry * The map we want...
8265522Spendry */
8365522Spendry map = &p->p_vmspace->vm_map;
8465522Spendry
8565522Spendry /*
8665522Spendry * Check the permissions for the area we're interested
8765522Spendry * in.
8865522Spendry */
8965522Spendry fix_prot = 0;
9065522Spendry if (writing)
9165522Spendry fix_prot = !vm_map_check_protection(map, pageno,
9265522Spendry pageno + PAGE_SIZE, VM_PROT_WRITE);
9365522Spendry
9465522Spendry if (fix_prot) {
9565522Spendry /*
9665522Spendry * If the page is not writable, we make it so.
9765522Spendry * XXX It is possible that a page may *not* be
9865522Spendry * read/executable, if a process changes that!
9965522Spendry * We will assume, for now, that a page is either
10065522Spendry * VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE.
10165522Spendry */
10265522Spendry error = vm_map_protect(map, pageno,
10365522Spendry pageno + PAGE_SIZE, VM_PROT_ALL, 0);
10465522Spendry if (error)
10565522Spendry break;
10665522Spendry }
10765522Spendry
10865522Spendry /*
10965522Spendry * Now we need to get the page. out_entry, out_prot, wired,
11065522Spendry * and single_use aren't used. One would think the vm code
11165522Spendry * would be a *bit* nicer... We use tmap because
11265522Spendry * vm_map_lookup() can change the map argument.
11365522Spendry */
11465522Spendry tmap = map;
11565522Spendry error = vm_map_lookup(&tmap, pageno,
11665522Spendry writing ? VM_PROT_WRITE : VM_PROT_READ,
11765522Spendry &out_entry, &object, &off, &out_prot,
11865522Spendry &wired, &single_use);
11965522Spendry /*
12065522Spendry * We're done with tmap now.
12165522Spendry */
12265522Spendry if (!error)
12365522Spendry vm_map_lookup_done(tmap, out_entry);
12465522Spendry
12565522Spendry /*
12665522Spendry * Fault the page in...
12765522Spendry */
12865522Spendry if (!error && writing && object->shadow) {
12965522Spendry m = vm_page_lookup(object, off);
13065522Spendry if (m == 0 || (m->flags & PG_COPYONWRITE))
13165522Spendry error = vm_fault(map, pageno,
13265522Spendry VM_PROT_WRITE, FALSE);
13365522Spendry }
13465522Spendry
13565522Spendry /* Find space in kernel_map for the page we're interested in */
13665522Spendry if (!error)
13765522Spendry error = vm_map_find(kernel_map, object, off, &kva,
13865522Spendry PAGE_SIZE, 1);
13965522Spendry
14065522Spendry if (!error) {
14165522Spendry /*
14265522Spendry * Neither vm_map_lookup() nor vm_map_find() appear
14365522Spendry * to add a reference count to the object, so we do
14465522Spendry * that here and now.
14565522Spendry */
14665522Spendry vm_object_reference(object);
14765522Spendry
14865522Spendry /*
14965522Spendry * Mark the page we just found as pageable.
15065522Spendry */
15165522Spendry error = vm_map_pageable(kernel_map, kva,
15265522Spendry kva + PAGE_SIZE, 0);
15365522Spendry
15465522Spendry /*
15565522Spendry * Now do the i/o move.
15665522Spendry */
15765522Spendry if (!error)
15865522Spendry error = uiomove(kva + page_offset, len, uio);
15965522Spendry
16065522Spendry vm_map_remove(kernel_map, kva, kva + PAGE_SIZE);
16165522Spendry }
16265522Spendry if (fix_prot)
16365522Spendry vm_map_protect(map, pageno, pageno + PAGE_SIZE,
16465522Spendry VM_PROT_READ|VM_PROT_EXECUTE, 0);
16565522Spendry } while (error == 0 && uio->uio_resid > 0);
16665522Spendry
16765522Spendry return (error);
16865522Spendry }
16965522Spendry
17065522Spendry /*
17165522Spendry * Copy data in and out of the target process.
17265522Spendry * We do this by mapping the process's page into
17365522Spendry * the kernel and then doing a uiomove direct
17465522Spendry * from the kernel address space.
17565522Spendry */
17665522Spendry int
procfs_domem(curp,p,pfs,uio)17765522Spendry procfs_domem(curp, p, pfs, uio)
17865522Spendry struct proc *curp;
17965522Spendry struct proc *p;
18065522Spendry struct pfsnode *pfs;
18165522Spendry struct uio *uio;
18265522Spendry {
18365522Spendry
18465522Spendry if (uio->uio_resid == 0)
18565522Spendry return (0);
18665522Spendry
187*67389Spendry return (procfs_rwmem(p, uio));
18865522Spendry }
18965522Spendry
19065522Spendry /*
19165522Spendry * Given process (p), find the vnode from which
19265522Spendry * it's text segment is being executed.
19365522Spendry *
19465522Spendry * It would be nice to grab this information from
19565522Spendry * the VM system, however, there is no sure-fire
19665522Spendry * way of doing that. Instead, fork(), exec() and
19765522Spendry * wait() all maintain the p_textvp field in the
19865522Spendry * process proc structure which contains a held
19965522Spendry * reference to the exec'ed vnode.
20065522Spendry */
20165522Spendry struct vnode *
procfs_findtextvp(p)20265522Spendry procfs_findtextvp(p)
20365522Spendry struct proc *p;
20465522Spendry {
205*67389Spendry
20665522Spendry return (p->p_textvp);
20765522Spendry }
20865522Spendry
20965522Spendry
21065522Spendry #ifdef probably_never
21165522Spendry /*
21265522Spendry * Given process (p), find the vnode from which
21365522Spendry * it's text segment is being mapped.
21465522Spendry *
21565522Spendry * (This is here, rather than in procfs_subr in order
21665522Spendry * to keep all the VM related code in one place.)
21765522Spendry */
21865522Spendry struct vnode *
procfs_findtextvp(p)21965522Spendry procfs_findtextvp(p)
22065522Spendry struct proc *p;
22165522Spendry {
22265522Spendry int error;
22365522Spendry vm_object_t object;
22465522Spendry vm_offset_t pageno; /* page number */
22565522Spendry
22665522Spendry /* find a vnode pager for the user address space */
22765522Spendry
22865522Spendry for (pageno = VM_MIN_ADDRESS;
22965522Spendry pageno < VM_MAXUSER_ADDRESS;
23065522Spendry pageno += PAGE_SIZE) {
23165522Spendry vm_map_t map;
23265522Spendry vm_map_entry_t out_entry;
23365522Spendry vm_prot_t out_prot;
23465522Spendry boolean_t wired, single_use;
23565522Spendry vm_offset_t off;
23665522Spendry
23765522Spendry map = &p->p_vmspace->vm_map;
23865522Spendry error = vm_map_lookup(&map, pageno,
23965522Spendry VM_PROT_READ,
24065522Spendry &out_entry, &object, &off, &out_prot,
24165522Spendry &wired, &single_use);
24265522Spendry
24365522Spendry if (!error) {
24465522Spendry vm_pager_t pager;
24565522Spendry
24665522Spendry printf("procfs: found vm object\n");
24765522Spendry vm_map_lookup_done(map, out_entry);
24865522Spendry printf("procfs: vm object = %x\n", object);
24965522Spendry
25065522Spendry /*
25165522Spendry * At this point, assuming no errors, object
25265522Spendry * is the VM object mapping UVA (pageno).
25365522Spendry * Ensure it has a vnode pager, then grab
25465522Spendry * the vnode from that pager's handle.
25565522Spendry */
25665522Spendry
25765522Spendry pager = object->pager;
25865522Spendry printf("procfs: pager = %x\n", pager);
25965522Spendry if (pager)
26065522Spendry printf("procfs: found pager, type = %d\n", pager->pg_type);
26165522Spendry if (pager && pager->pg_type == PG_VNODE) {
26265522Spendry struct vnode *vp;
26365522Spendry
26465522Spendry vp = (struct vnode *) pager->pg_handle;
26565522Spendry printf("procfs: vp = 0x%x\n", vp);
26665522Spendry return (vp);
26765522Spendry }
26865522Spendry }
26965522Spendry }
27065522Spendry
27165522Spendry printf("procfs: text object not found\n");
27265522Spendry return (0);
27365522Spendry }
27465522Spendry #endif /* probably_never */
275