xref: /netbsd-src/sys/miscfs/procfs/procfs_subr.c (revision 4b30c543a0b21e3ba94f2c569e9a82b4fdb2075f)
1 /*
2  * Copyright (c) 1993 Paul Kranenburg
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed by Paul Kranenburg.
16  * 4. The name of the author may not be used to endorse or promote products
17  *    derived from this software withough specific prior written permission
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  *	$Id: procfs_subr.c,v 1.3 1993/08/24 16:47:24 pk Exp $
31  */
32 #include "param.h"
33 #include "systm.h"
34 #include "time.h"
35 #include "kernel.h"
36 #include "ioctl.h"
37 #include "proc.h"
38 #include "buf.h"
39 #include "vnode.h"
40 #include "file.h"
41 #include "resourcevar.h"
42 #include "vm/vm.h"
43 #include "vm/vm_page.h"
44 #include "vm/vm_kern.h"
45 #include "kinfo.h"
46 #include "kinfo_proc.h"
47 
48 #include "procfs.h"
49 #include "pfsnode.h"
50 
51 #include "machine/vmparam.h"
52 
53 /*
54  * Get process address map (PIOCGMAP)
55  */
56 int
57 pfs_vmmap(procp, pfsp, pmapp)
58 struct proc	*procp;
59 struct nfsnode	*pfsp;
60 struct procmap	*pmapp;
61 {
62 	int		error = 0;
63 	vm_map_t	map;
64 	vm_map_entry_t	entry;
65 	struct procmap	prmap;
66 
67 	map = &procp->p_vmspace->vm_map;
68 	vm_map_lock(map);
69 	entry = map->header.next;
70 
71 	while (entry != &map->header) {
72 		if (entry->is_a_map) {
73 			vm_map_t	submap = entry->object.share_map;
74 			vm_map_entry_t	subentry;
75 
76 			vm_map_lock(submap);
77 			subentry = submap->header.next;
78 			while (subentry != &submap->header) {
79 				prmap.vaddr = subentry->start;
80 				prmap.size = subentry->end - subentry->start;
81 				prmap.offset = subentry->offset;
82 				prmap.prot = subentry->protection;
83 				error = copyout(&prmap, pmapp, sizeof(prmap));
84 				if (error)
85 					break;
86 				pmapp++;
87 				subentry = subentry->next;
88 			}
89 			vm_map_unlock(submap);
90 			if (error)
91 				break;
92 		}
93 		prmap.vaddr = entry->start;
94 		prmap.size = entry->end - entry->start;
95 		prmap.offset = entry->offset;
96 		prmap.prot = entry->protection;
97 		error = copyout(&prmap, pmapp, sizeof(prmap));
98 		if (error)
99 			break;
100 		pmapp++;
101 		entry = entry->next;
102 	}
103 
104 	vm_map_unlock(map);
105 	return error;
106 }
107 
108 /*
109  * Count number of VM entries of process (PIOCNMAP)
110  */
111 int
112 pfs_vm_nentries(procp, pfsp)
113 struct proc	*procp;
114 struct nfsnode	*pfsp;
115 {
116 	int		count = 0;
117 	vm_map_t	map;
118 	vm_map_entry_t	entry;
119 
120 	map = &procp->p_vmspace->vm_map;
121 	vm_map_lock(map);
122 	entry = map->header.next;
123 
124 	while (entry != &map->header) {
125 		if (entry->is_a_map)
126 			count += entry->object.share_map->nentries;
127 		else
128 			count++;
129 		entry = entry->next;
130 	}
131 
132 	vm_map_unlock(map);
133 	return count;
134 }
135 
136 /*
137  * Map process mapped file to file descriptor (PIOCGMAPFD)
138  */
139 int
140 pfs_vmfd(procp, pfsp, vmfdp, p)
141 struct proc	*procp;
142 struct pfsnode	*pfsp;
143 struct vmfd	*vmfdp;
144 struct proc	*p;
145 {
146 	int		rv;
147 	vm_map_t	map;
148 	vm_offset_t	addr;
149 	vm_size_t	size;
150 	vm_prot_t	prot, maxprot;
151 	vm_inherit_t	inherit;
152 	boolean_t	shared;
153 	vm_object_t	object;
154 	vm_offset_t	objoff;
155 	struct vnode	*vp;
156 	struct file	*fp;
157 	extern struct fileops	vnops;
158 
159 	map = &procp->p_vmspace->vm_map;
160 
161 	addr = vmfdp->vaddr;
162 	rv = vm_region(map, &addr, &size, &prot, &maxprot,
163 			&inherit, &shared, &object, &objoff);
164 
165 	if (rv != KERN_SUCCESS)
166 		return EINVAL;
167 
168 	while (object != NULL && object->pager == NULL)
169 		object = object->shadow;
170 
171 	if (object == NULL || object->pager == NULL
172 			/* Nobody seems to care || !object->pager_ready */ )
173 		return ENOENT;
174 
175 	if (object->pager->pg_type != PG_VNODE)
176 		return ENOENT;
177 
178 	/* We have a vnode pager, allocate file descriptor */
179 	vp = (struct vnode *)object->pager->pg_handle;
180 	if (VOP_ACCESS(vp, VREAD, p->p_ucred, p)) {
181 		rv = EACCES;
182 		goto out;
183 	}
184 	rv = falloc(p, &fp, &vmfdp->fd);
185 	if (rv)
186 		goto out;
187 
188 	VREF(vp);
189 	fp->f_type = DTYPE_VNODE;
190 	fp->f_ops = &vnops;
191 	fp->f_data = (caddr_t)vp;
192 	fp->f_flag = FREAD;
193 
194 out:
195 	vm_object_unlock(object);
196 	return rv;
197 }
198 
199 /*
200  * Vnode op for reading/writing.
201  */
202 /* ARGSUSED */
203 pfs_doio(vp, uio, ioflag, cred)
204 	struct vnode *vp;
205 	register struct uio *uio;
206 	int ioflag;
207 	struct ucred *cred;
208 {
209 	struct pfsnode	*pfsp = VTOPFS(vp);
210 	struct proc	*procp;
211 	int		error = 0;
212 	long		n, on;
213 
214 #ifdef DEBUG
215 	if (pfs_debug)
216 		printf("pfs_doio(%s): vp 0x%x, proc %x\n",
217 			uio->uio_rw==UIO_READ?"R":"W", vp, uio->uio_procp);
218 #endif
219 
220 #ifdef DIAGNOSTIC
221 	if (vp->v_type != VPROC)
222 		panic("pfs_doio vtype");
223 #endif
224 	procp = pfsp->pfs_pid?pfind(pfsp->pfs_pid):&proc0;
225 	if (!procp)
226 		return ESRCH;
227 
228 	if (procp->p_flag & SSYS)
229 		return EACCES;
230 
231 	if (uio->uio_resid == 0)
232 		return (0);
233 	if (uio->uio_offset < 0)
234 		return (EINVAL);
235 
236 	do { /* One page at a time */
237 		int		rv;
238 		vm_map_t	map;
239 		vm_offset_t	offset;
240 		vm_size_t	size;
241 		vm_prot_t	oldprot = 0, prot, maxprot;
242 		vm_inherit_t	inherit;
243 		boolean_t	shared;
244 		vm_object_t	object;
245 		vm_offset_t	objoff;
246 		vm_page_t	m;
247 		vm_offset_t	kva;
248 
249 		on = uio->uio_offset - trunc_page(uio->uio_offset);
250 		n = MIN(PAGE_SIZE-on, uio->uio_resid);
251 
252 		/* Map page into kernel space */
253 
254 		map = &procp->p_vmspace->vm_map;
255 #if 0
256 	vm_map_print(map, 1);
257 #endif
258 
259 		offset = trunc_page(uio->uio_offset);
260 
261 		rv = vm_region(map, &offset, &size, &prot, &maxprot,
262 				&inherit, &shared, &object, &objoff);
263 		if (rv != KERN_SUCCESS)
264 			return EINVAL;
265 
266 		vm_object_unlock(object);
267 
268 		if (uio->uio_rw == UIO_WRITE && (prot & VM_PROT_WRITE) == 0) {
269 			oldprot = prot;
270 			prot |= VM_PROT_WRITE;
271 			rv = vm_protect(map, offset, PAGE_SIZE, FALSE, prot);
272 			if (rv != KERN_SUCCESS)
273 				return EPERM;
274 		}
275 		/* Just fault the page */
276 		rv = vm_fault(map, offset, prot, FALSE);
277 		if (rv != KERN_SUCCESS)
278 			return EFAULT;
279 
280 		/* Look up again as vm_fault() may have inserted a shadow object */
281 		rv = vm_region(map, &offset, &size, &prot, &maxprot,
282 				&inherit, &shared, &object, &objoff);
283 		if (rv != KERN_SUCCESS)
284 			return EINVAL;
285 
286 		/* Now find the page */
287 		/* XXX hope it's still there, should we have wired it? */
288 		m = vm_page_lookup(object, objoff);
289 		if (m == NULL)
290 			return ESRCH;
291 
292 		kva = kmem_alloc_wait(kernel_map, PAGE_SIZE);
293 
294 		pmap_enter(vm_map_pmap(kernel_map), kva, VM_PAGE_TO_PHYS(m),
295 			VM_PROT_DEFAULT, TRUE);
296 
297 		error = uiomove(kva + on, (int)n, uio);
298 
299 		pmap_remove(vm_map_pmap(kernel_map), kva, kva + PAGE_SIZE);
300 		kmem_free_wakeup(kernel_map, kva, PAGE_SIZE);
301 		if (oldprot) {
302 			rv = vm_protect(map, offset, PAGE_SIZE, FALSE, oldprot);
303 			if (rv != KERN_SUCCESS)
304 				return EPERM;
305 		}
306 
307 	} while (error == 0 && uio->uio_resid > 0);
308 
309 	return (error);
310 }
311 
312 #if 00
313 int
314 pfs_map(procp, kva, rw, offset)
315 struct proc	*procp;
316 int		rw;
317 vm_offset_t	*kva, offset;
318 {
319 	int		rv;
320 	vm_map_t	map;
321 	vm_size_t	size;
322 	vm_prot_t	prot, maxprot;
323 	vm_inherit_t	inherit;
324 	boolean_t	shared;
325 	vm_object_t	object;
326 	vm_offset_t	objoff;
327 	vm_page_t	m;
328 
329 	map = &procp->p_vmspace->vm_map;
330 #if 0
331 	vm_map_print(map, 1);
332 #endif
333 
334 	offset = trunc_page(offset);
335 
336 	rv = vm_region(map, &offset, &size, &prot, &maxprot,
337 			&inherit, &shared, &object, &objoff);
338 	if (rv != KERN_SUCCESS)
339 		return EINVAL;
340 
341 	vm_object_unlock(object);
342 
343 	if (rw == UIO_WRITE && (prot & VM_PROT_WRITE) == 0) {
344 		prot |= VM_PROT_WRITE;
345 		rv = vm_protect(map, offset, PAGE_SIZE, FALSE, prot);
346 		if (rv != KERN_SUCCESS)
347 			return EPERM;
348 	}
349 	/* Just fault page */
350 	rv = vm_fault(map, offset, prot, FALSE);
351 	if (rv != KERN_SUCCESS)
352 		return EFAULT;
353 
354 	/* Look up again as vm_fault() may have inserted a shadow object */
355 	rv = vm_region(map, &offset, &size, &prot, &maxprot,
356 			&inherit, &shared, &object, &objoff);
357 	if (rv != KERN_SUCCESS)
358 		return EINVAL;
359 
360 	m = vm_page_lookup(object, objoff);
361 	if (m == NULL)
362 		return ESRCH;
363 
364 	*kva = kmem_alloc_wait(kernel_map, PAGE_SIZE);
365 
366 	pmap_enter(vm_map_pmap(kernel_map), *kva, VM_PAGE_TO_PHYS(m),
367 			VM_PROT_DEFAULT, TRUE);
368 
369 	return 0;
370 }
371 
372 int
373 pfs_unmap(procp, kva)
374 struct proc	*procp;
375 vm_offset_t	kva;
376 {
377 	pmap_remove(vm_map_pmap(kernel_map), kva, kva + PAGE_SIZE);
378 	kmem_free_wakeup(kernel_map, kva, PAGE_SIZE);
379 }
380 #endif
381