xref: /netbsd-src/sys/miscfs/procfs/procfs_mem.c (revision 5e7b128211fac2cd786c3bd0f5732cda99cda524)
1 /*	$NetBSD: procfs_mem.c,v 1.21 1999/03/13 00:57:13 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1993 Jan-Simon Pendry
5  * Copyright (c) 1993 Sean Eric Fagan
6  * Copyright (c) 1993
7  *	The Regents of the University of California.  All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * Jan-Simon Pendry and Sean Eric Fagan.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)procfs_mem.c	8.5 (Berkeley) 6/15/94
41  */
42 
43 /*
44  * This is a lightly hacked and merged version
45  * of sef's pread/pwrite functions
46  */
47 
48 #include "opt_uvm.h"
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/time.h>
53 #include <sys/kernel.h>
54 #include <sys/proc.h>
55 #include <sys/vnode.h>
56 #include <miscfs/procfs/procfs.h>
57 #include <vm/vm.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_page.h>
60 
61 #if defined(UVM)
62 #include <uvm/uvm_extern.h>
63 #endif
64 
65 #define	ISSET(t, f)	((t) & (f))
66 
67 #if !defined(UVM)
68 int
69 procfs_rwmem(p, uio)
70 	struct proc *p;
71 	struct uio *uio;
72 {
73 	int error;
74 	int writing;
75 
76 	writing = uio->uio_rw == UIO_WRITE;
77 
78 	/*
79 	 * Only map in one page at a time.  We don't have to, but it
80 	 * makes things easier.  This way is trivial - right?
81 	 */
82 	do {
83 		vm_map_t map, tmap;
84 		vm_object_t object;
85 		vaddr_t kva;
86 		vaddr_t uva;
87 		int page_offset;		/* offset into page */
88 		vaddr_t pageno;		/* page number */
89 		vm_map_entry_t out_entry;
90 		vm_prot_t out_prot;
91 		vm_page_t m;
92 		boolean_t wired, single_use;
93 		vaddr_t off;
94 		u_int len;
95 		int fix_prot;
96 
97 		uva = (vaddr_t) uio->uio_offset;
98 		if (uva > VM_MAXUSER_ADDRESS) {
99 			error = 0;
100 			break;
101 		}
102 
103 		/*
104 		 * Get the page number of this segment.
105 		 */
106 		pageno = trunc_page(uva);
107 		page_offset = uva - pageno;
108 
109 		/*
110 		 * How many bytes to copy
111 		 */
112 		len = min(PAGE_SIZE - page_offset, uio->uio_resid);
113 
114 		/*
115 		 * The map we want...
116 		 */
117 		map = &p->p_vmspace->vm_map;
118 
119 		/*
120 		 * Check the permissions for the area we're interested
121 		 * in.
122 		 */
123 		fix_prot = 0;
124 		if (writing)
125 			fix_prot = !vm_map_check_protection(map, pageno,
126 					pageno + PAGE_SIZE, VM_PROT_WRITE);
127 
128 		if (fix_prot) {
129 			/*
130 			 * If the page is not writable, we make it so.
131 			 * XXX It is possible that a page may *not* be
132 			 * read/executable, if a process changes that!
133 			 * We will assume, for now, that a page is either
134 			 * VM_PROT_ALL, or VM_PROT_READ|VM_PROT_EXECUTE.
135 			 */
136 			error = vm_map_protect(map, pageno,
137 					pageno + PAGE_SIZE, VM_PROT_ALL, 0);
138 			if (error)
139 				break;
140 		}
141 
142 		/*
143 		 * Now we need to get the page.  out_entry, out_prot, wired,
144 		 * and single_use aren't used.  One would think the vm code
145 		 * would be a *bit* nicer...  We use tmap because
146 		 * vm_map_lookup() can change the map argument.
147 		 */
148 		tmap = map;
149 		error = vm_map_lookup(&tmap, pageno,
150 				      writing ? VM_PROT_WRITE : VM_PROT_READ,
151 				      &out_entry, &object, &off, &out_prot,
152 				      &wired, &single_use);
153 		/*
154 		 * We're done with tmap now.
155 		 */
156 		if (!error)
157 			vm_map_lookup_done(tmap, out_entry);
158 
159 		/*
160 		 * Fault the page in...
161 		 */
162 		if (!error && writing && object->shadow) {
163 			m = vm_page_lookup(object, off);
164 			if (m == 0 || (m->flags & PG_COPYONWRITE))
165 				error = vm_fault(map, pageno,
166 							VM_PROT_WRITE, FALSE);
167 		}
168 
169 		/* Find space in kernel_map for the page we're interested in */
170 		if (!error) {
171 			kva = VM_MIN_KERNEL_ADDRESS;
172 			error = vm_map_find(kernel_map, object, off, &kva,
173 					PAGE_SIZE, 1);
174 		}
175 
176 		if (!error) {
177 			/*
178 			 * Neither vm_map_lookup() nor vm_map_find() appear
179 			 * to add a reference count to the object, so we do
180 			 * that here and now.
181 			 */
182 			vm_object_reference(object);
183 
184 			/*
185 			 * Mark the page we just found as pageable.
186 			 */
187 			error = vm_map_pageable(kernel_map, kva,
188 				kva + PAGE_SIZE, 0);
189 
190 			/*
191 			 * Now do the i/o move.
192 			 */
193 			if (!error)
194 				error = uiomove((caddr_t) (kva + page_offset),
195 						len, uio);
196 
197 			vm_map_remove(kernel_map, kva, kva + PAGE_SIZE);
198 		}
199 		if (fix_prot)
200 			vm_map_protect(map, pageno, pageno + PAGE_SIZE,
201 					VM_PROT_READ|VM_PROT_EXECUTE, 0);
202 	} while (error == 0 && uio->uio_resid > 0);
203 
204 	return (error);
205 }
206 #endif
207 
208 /*
209  * Copy data in and out of the target process.
210  * We do this by mapping the process's page into
211  * the kernel and then doing a uiomove direct
212  * from the kernel address space.
213  */
214 int
215 procfs_domem(curp, p, pfs, uio)
216 	struct proc *curp;		/* tracer */
217 	struct proc *p;			/* traced */
218 	struct pfsnode *pfs;
219 	struct uio *uio;
220 {
221 	int error;
222 
223 	size_t len;
224 	vaddr_t	addr;
225 
226 	len = uio->uio_resid;
227 
228 	if (len == 0)
229 		return (0);
230 
231 	addr = uio->uio_offset;
232 
233 	if ((error = procfs_checkioperm(curp, p)) != 0)
234 		return (error);
235 
236 #if defined(UVM)
237 	/* XXXCDC: how should locking work here? */
238 	if ((p->p_flag & P_WEXIT) || (p->p_vmspace->vm_refcnt < 1))
239 		return(EFAULT);
240 	PHOLD(p);
241 	p->p_vmspace->vm_refcnt++;  /* XXX */
242 	error = uvm_io(&p->p_vmspace->vm_map, uio);
243 	PRELE(p);
244 	uvmspace_free(p->p_vmspace);
245 
246 #ifdef PMAP_NEED_PROCWR
247 	if (uio->uio_rw == UIO_WRITE)
248 		pmap_procwr(p, addr, len);
249 #endif
250 
251 #else
252 	PHOLD(p);
253 	error = procfs_rwmem(p, uio);
254 	PRELE(p);
255 #endif
256 	return (error);
257 }
258 
259 /*
260  * Given process (p), find the vnode from which
261  * it's text segment is being executed.
262  *
263  * It would be nice to grab this information from
264  * the VM system, however, there is no sure-fire
265  * way of doing that.  Instead, fork(), exec() and
266  * wait() all maintain the p_textvp field in the
267  * process proc structure which contains a held
268  * reference to the exec'ed vnode.
269  */
270 struct vnode *
271 procfs_findtextvp(p)
272 	struct proc *p;
273 {
274 
275 	return (p->p_textvp);
276 }
277 
278 /*
279  * Ensure that a process has permission to perform I/O on another.
280  * Arguments:
281  *	p	The process wishing to do the I/O (the tracer).
282  *	t	The process who's memory/registers will be read/written.
283  */
284 int
285 procfs_checkioperm(p, t)
286 	struct proc *p, *t;
287 {
288 	int error;
289 
290 	/*
291 	 * You cannot attach to a processes mem/regs if:
292 	 *
293 	 *	(1) it's not owned by you, or is set-id on exec
294 	 *	    (unless you're root), or...
295 	 */
296 	if ((t->p_cred->p_ruid != p->p_cred->p_ruid ||
297 		ISSET(t->p_flag, P_SUGID)) &&
298 	    (error = suser(p->p_ucred, &p->p_acflag)) != 0)
299 		return (error);
300 
301 	/*
302 	 *	(2) ...it's init, which controls the security level
303 	 *	    of the entire system, and the system was not
304 	 *	    compiled with permanetly insecure mode turned on.
305 	 */
306 	if (t == initproc && securelevel > -1)
307 		return (EPERM);
308 
309 	return (0);
310 }
311 
312 #ifdef probably_never
313 /*
314  * Given process (p), find the vnode from which
315  * it's text segment is being mapped.
316  *
317  * (This is here, rather than in procfs_subr in order
318  * to keep all the VM related code in one place.)
319  */
320 struct vnode *
321 procfs_findtextvp(p)
322 	struct proc *p;
323 {
324 	int error;
325 	vm_object_t object;
326 	vaddr_t pageno;		/* page number */
327 
328 	/* find a vnode pager for the user address space */
329 
330 	for (pageno = VM_MIN_ADDRESS;
331 			pageno < VM_MAXUSER_ADDRESS;
332 			pageno += PAGE_SIZE) {
333 		vm_map_t map;
334 		vm_map_entry_t out_entry;
335 		vm_prot_t out_prot;
336 		boolean_t wired, single_use;
337 		vaddr_t off;
338 
339 		map = &p->p_vmspace->vm_map;
340 		error = vm_map_lookup(&map, pageno,
341 			      VM_PROT_READ,
342 			      &out_entry, &object, &off, &out_prot,
343 			      &wired, &single_use);
344 
345 		if (!error) {
346 			vm_pager_t pager;
347 
348 			printf("procfs: found vm object\n");
349 			vm_map_lookup_done(map, out_entry);
350 			printf("procfs: vm object = %p\n", object);
351 
352 			/*
353 			 * At this point, assuming no errors, object
354 			 * is the VM object mapping UVA (pageno).
355 			 * Ensure it has a vnode pager, then grab
356 			 * the vnode from that pager's handle.
357 			 */
358 
359 			pager = object->pager;
360 			printf("procfs: pager = %p\n", pager);
361 			if (pager)
362 				printf("procfs: found pager, type = %d\n",
363 				    pager->pg_type);
364 			if (pager && pager->pg_type == PG_VNODE) {
365 				struct vnode *vp;
366 
367 				vp = (struct vnode *) pager->pg_handle;
368 				printf("procfs: vp = %p\n", vp);
369 				return (vp);
370 			}
371 		}
372 	}
373 
374 	printf("procfs: text object not found\n");
375 	return (0);
376 }
377 #endif /* probably_never */
378