xref: /netbsd-src/sys/miscfs/procfs/procfs_mem.c (revision f9777e4a1c1c921e6efff061f94c9d94aee98341)
1 /*	$NetBSD: procfs_mem.c,v 1.12 1997/08/12 22:47:20 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1993 Jan-Simon Pendry
5  * Copyright (c) 1993 Sean Eric Fagan
6  * Copyright (c) 1993
7  *	The Regents of the University of California.  All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * Jan-Simon Pendry and Sean Eric Fagan.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)procfs_mem.c	8.5 (Berkeley) 6/15/94
41  */
42 
43 /*
44  * This is a lightly hacked and merged version
45  * of sef's pread/pwrite functions
46  */
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/time.h>
51 #include <sys/kernel.h>
52 #include <sys/proc.h>
53 #include <sys/vnode.h>
54 #include <miscfs/procfs/procfs.h>
55 #include <vm/vm.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_page.h>
58 
59 static int procfs_rwmem __P((struct proc *, struct uio *));
60 
61 static int
62 procfs_rwmem(p, uio)
63 	struct proc *p;
64 	struct uio *uio;
65 {
66 	int error;
67 	int writing;
68 
69 	writing = uio->uio_rw == UIO_WRITE;
70 
71 	/*
72 	 * Only map in one page at a time.  We don't have to, but it
73 	 * makes things easier.  This way is trivial - right?
74 	 */
75 	do {
76 		vm_map_t map, tmap;
77 		vm_object_t object;
78 		vm_offset_t kva;
79 		vm_offset_t uva;
80 		int page_offset;		/* offset into page */
81 		vm_offset_t pageno;		/* page number */
82 		vm_map_entry_t out_entry;
83 		vm_prot_t out_prot;
84 		vm_page_t m;
85 		boolean_t wired, single_use;
86 		vm_offset_t off;
87 		u_int len;
88 		int fix_prot;
89 
90 		uva = (vm_offset_t) uio->uio_offset;
91 		if (uva > VM_MAXUSER_ADDRESS) {
92 			error = 0;
93 			break;
94 		}
95 
96 		/*
97 		 * Get the page number of this segment.
98 		 */
99 		pageno = trunc_page(uva);
100 		page_offset = uva - pageno;
101 
102 		/*
103 		 * How many bytes to copy
104 		 */
105 		len = min(PAGE_SIZE - page_offset, uio->uio_resid);
106 
107 		/*
108 		 * The map we want...
109 		 */
110 		map = &p->p_vmspace->vm_map;
111 
112 		/*
113 		 * Check the permissions for the area we're interested
114 		 * in.
115 		 */
116 		fix_prot = 0;
117 		if (writing)
118 			fix_prot = !vm_map_check_protection(map, pageno,
119 					pageno + PAGE_SIZE, VM_PROT_WRITE);
120 
121 		if (fix_prot) {
122 			/*
123 			 * If the page is not writable, we make it so.
124 			 * XXX It is possible that a page may *not* be
125 			 * read/executable, if a process changes that!
126 			 * We will assume, for now, that a page is either
127 			 * VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE.
128 			 */
129 			error = vm_map_protect(map, pageno,
130 					pageno + PAGE_SIZE, VM_PROT_ALL, 0);
131 			if (error)
132 				break;
133 		}
134 
135 		/*
136 		 * Now we need to get the page.  out_entry, out_prot, wired,
137 		 * and single_use aren't used.  One would think the vm code
138 		 * would be a *bit* nicer...  We use tmap because
139 		 * vm_map_lookup() can change the map argument.
140 		 */
141 		tmap = map;
142 		error = vm_map_lookup(&tmap, pageno,
143 				      writing ? VM_PROT_WRITE : VM_PROT_READ,
144 				      &out_entry, &object, &off, &out_prot,
145 				      &wired, &single_use);
146 		/*
147 		 * We're done with tmap now.
148 		 */
149 		if (!error)
150 			vm_map_lookup_done(tmap, out_entry);
151 
152 		/*
153 		 * Fault the page in...
154 		 */
155 		if (!error && writing && object->shadow) {
156 			m = vm_page_lookup(object, off);
157 			if (m == 0 || (m->flags & PG_COPYONWRITE))
158 				error = vm_fault(map, pageno,
159 							VM_PROT_WRITE, FALSE);
160 		}
161 
162 		/* Find space in kernel_map for the page we're interested in */
163 		if (!error) {
164 			kva = VM_MIN_KERNEL_ADDRESS;
165 			error = vm_map_find(kernel_map, object, off, &kva,
166 					PAGE_SIZE, 1);
167 		}
168 
169 		if (!error) {
170 			/*
171 			 * Neither vm_map_lookup() nor vm_map_find() appear
172 			 * to add a reference count to the object, so we do
173 			 * that here and now.
174 			 */
175 			vm_object_reference(object);
176 
177 			/*
178 			 * Mark the page we just found as pageable.
179 			 */
180 			error = vm_map_pageable(kernel_map, kva,
181 				kva + PAGE_SIZE, 0);
182 
183 			/*
184 			 * Now do the i/o move.
185 			 */
186 			if (!error)
187 				error = uiomove((caddr_t) (kva + page_offset),
188 						len, uio);
189 
190 			vm_map_remove(kernel_map, kva, kva + PAGE_SIZE);
191 		}
192 		if (fix_prot)
193 			vm_map_protect(map, pageno, pageno + PAGE_SIZE,
194 					VM_PROT_READ|VM_PROT_EXECUTE, 0);
195 	} while (error == 0 && uio->uio_resid > 0);
196 
197 	return (error);
198 }
199 
200 /*
201  * Copy data in and out of the target process.
202  * We do this by mapping the process's page into
203  * the kernel and then doing a uiomove direct
204  * from the kernel address space.
205  */
206 int
207 procfs_domem(curp, p, pfs, uio)
208 	struct proc *curp;
209 	struct proc *p;
210 	struct pfsnode *pfs;
211 	struct uio *uio;
212 {
213 	int error;
214 
215 	if (uio->uio_resid == 0)
216 		return (0);
217 
218 	if ((error = procfs_checkioperm(curp, p)) != 0)
219 		return (error);
220 
221 	PHOLD(p);
222 	error = procfs_rwmem(p, uio);
223 	PRELE(p);
224 	return (error);
225 }
226 
227 /*
228  * Given process (p), find the vnode from which
229  * it's text segment is being executed.
230  *
231  * It would be nice to grab this information from
232  * the VM system, however, there is no sure-fire
233  * way of doing that.  Instead, fork(), exec() and
234  * wait() all maintain the p_textvp field in the
235  * process proc structure which contains a held
236  * reference to the exec'ed vnode.
237  */
238 struct vnode *
239 procfs_findtextvp(p)
240 	struct proc *p;
241 {
242 
243 	return (p->p_textvp);
244 }
245 
246 
247 #ifdef probably_never
248 /*
249  * Given process (p), find the vnode from which
250  * it's text segment is being mapped.
251  *
252  * (This is here, rather than in procfs_subr in order
253  * to keep all the VM related code in one place.)
254  */
255 struct vnode *
256 procfs_findtextvp(p)
257 	struct proc *p;
258 {
259 	int error;
260 	vm_object_t object;
261 	vm_offset_t pageno;		/* page number */
262 
263 	/* find a vnode pager for the user address space */
264 
265 	for (pageno = VM_MIN_ADDRESS;
266 			pageno < VM_MAXUSER_ADDRESS;
267 			pageno += PAGE_SIZE) {
268 		vm_map_t map;
269 		vm_map_entry_t out_entry;
270 		vm_prot_t out_prot;
271 		boolean_t wired, single_use;
272 		vm_offset_t off;
273 
274 		map = &p->p_vmspace->vm_map;
275 		error = vm_map_lookup(&map, pageno,
276 			      VM_PROT_READ,
277 			      &out_entry, &object, &off, &out_prot,
278 			      &wired, &single_use);
279 
280 		if (!error) {
281 			vm_pager_t pager;
282 
283 			printf("procfs: found vm object\n");
284 			vm_map_lookup_done(map, out_entry);
285 			printf("procfs: vm object = %x\n", object);
286 
287 			/*
288 			 * At this point, assuming no errors, object
289 			 * is the VM object mapping UVA (pageno).
290 			 * Ensure it has a vnode pager, then grab
291 			 * the vnode from that pager's handle.
292 			 */
293 
294 			pager = object->pager;
295 			printf("procfs: pager = %x\n", pager);
296 			if (pager)
297 				printf("procfs: found pager, type = %d\n",
298 				    pager->pg_type);
299 			if (pager && pager->pg_type == PG_VNODE) {
300 				struct vnode *vp;
301 
302 				vp = (struct vnode *) pager->pg_handle;
303 				printf("procfs: vp = 0x%x\n", vp);
304 				return (vp);
305 			}
306 		}
307 	}
308 
309 	printf("procfs: text object not found\n");
310 	return (0);
311 }
312 #endif /* probably_never */
313