xref: /dflybsd-src/sys/vfs/procfs/procfs_map.c (revision fc8827e60596dc2cc33113727f58ab754b7c46df)
1 /*
2  * Copyright (c) 1993 Jan-Simon Pendry
3  * Copyright (c) 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)procfs_status.c	8.3 (Berkeley) 2/17/94
34  *
35  * $FreeBSD: src/sys/miscfs/procfs/procfs_map.c,v 1.24.2.1 2001/08/04 13:12:24 rwatson Exp $
36  * $DragonFly: src/sys/vfs/procfs/procfs_map.c,v 1.7 2007/02/19 01:14:24 corecode Exp $
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <vfs/procfs/procfs.h>
44 
45 #include <vm/vm.h>
46 #include <sys/lock.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_object.h>
51 
52 #include <machine/limits.h>
53 
54 #define MEBUFFERSIZE 256
55 
56 /*
57  * The map entries can *almost* be read with programs like cat.  However,
58  * large maps need special programs to read.  It is not easy to implement
59  * a program that can sense the required size of the buffer, and then
60  * subsequently do a read with the appropriate size.  This operation cannot
61  * be atomic.  The best that we can do is to allow the program to do a read
62  * with an arbitrarily large buffer, and return as much as we can.  We can
63  * return an error code if the buffer is too small (EFBIG), then the program
64  * can try a bigger buffer.
65  */
66 int
67 procfs_domap(struct proc *curp, struct lwp *lp, struct pfsnode *pfs,
68 	     struct uio *uio)
69 {
70 	struct proc *p = lp->lwp_proc;
71 	int len;
72 	struct vnode *vp;
73 	char *fullpath, *freepath;
74 	int error;
75 	vm_map_t map = &p->p_vmspace->vm_map;
76 	pmap_t pmap = vmspace_pmap(p->p_vmspace);
77 	vm_map_entry_t entry;
78 	char mebuffer[MEBUFFERSIZE];
79 
80 	if (uio->uio_rw != UIO_READ)
81 		return (EOPNOTSUPP);
82 
83 	if (uio->uio_offset != 0)
84 		return (0);
85 
86 	error = 0;
87 	vm_map_lock_read(map);
88 	for (entry = map->header.next;
89 		((uio->uio_resid > 0) && (entry != &map->header));
90 		entry = entry->next) {
91 		vm_object_t obj, tobj, lobj;
92 		int ref_count, shadow_count, flags;
93 		vm_offset_t addr;
94 		vm_offset_t ostart;
95 		int resident, privateresident;
96 		char *type;
97 
98 		switch(entry->maptype) {
99 		case VM_MAPTYPE_NORMAL:
100 		case VM_MAPTYPE_VPAGETABLE:
101 			obj = entry->object.vm_object;
102 			if (obj)
103 				vm_object_hold(obj);
104 
105 			if (obj && (obj->shadow_count == 1))
106 				privateresident = obj->resident_page_count;
107 			else
108 				privateresident = 0;
109 			break;
110 		case VM_MAPTYPE_UKSMAP:
111 			obj = NULL;
112 			privateresident = 0;
113 			break;
114 		default:
115 			/* ignore entry */
116 			continue;
117 		}
118 
119 		/*
120 		 * Use map->hint as a poor man's ripout detector.
121 		 */
122 		map->hint = entry;
123 		ostart = entry->start;
124 
125 		/*
126 		 * Count resident pages (XXX can be horrible on 64-bit)
127 		 */
128 		resident = 0;
129 		addr = entry->start;
130 		while (addr < entry->end) {
131 			if (pmap_extract(pmap, addr))
132 				resident++;
133 			addr += PAGE_SIZE;
134 		}
135 		if (obj) {
136 			lobj = obj;
137 			while ((tobj = lobj->backing_object) != NULL) {
138 				KKASSERT(tobj != obj);
139 				vm_object_hold(tobj);
140 				if (tobj == lobj->backing_object) {
141 					if (lobj != obj) {
142 						vm_object_lock_swap();
143 						vm_object_drop(lobj);
144 					}
145 					lobj = tobj;
146 				} else {
147 					vm_object_drop(tobj);
148 				}
149 			}
150 		} else {
151 			lobj = NULL;
152 		}
153 
154 		freepath = NULL;
155 		fullpath = "-";
156 		if (lobj) {
157 			switch(lobj->type) {
158 			default:
159 			case OBJT_DEFAULT:
160 				type = "default";
161 				vp = NULL;
162 				break;
163 			case OBJT_VNODE:
164 				type = "vnode";
165 				vp = lobj->handle;
166 				vref(vp);
167 				break;
168 			case OBJT_SWAP:
169 				type = "swap";
170 				vp = NULL;
171 				break;
172 			case OBJT_DEVICE:
173 				type = "device";
174 				vp = NULL;
175 				break;
176 			case OBJT_MGTDEVICE:
177 				type = "mgtdevice";
178 				vp = NULL;
179 				break;
180 			}
181 
182 			flags = obj->flags;
183 			ref_count = obj->ref_count;
184 			shadow_count = obj->shadow_count;
185 			if (vp != NULL) {
186 				vn_fullpath(p, vp, &fullpath, &freepath, 1);
187 				vrele(vp);
188 			}
189 			if (lobj != obj)
190 				vm_object_drop(lobj);
191 		} else {
192 			flags = 0;
193 			ref_count = 0;
194 			shadow_count = 0;
195 
196 			switch(entry->maptype) {
197 			case VM_MAPTYPE_UKSMAP:
198 				type = "uksmap";
199 				break;
200 			default:
201 				type = "none";
202 				break;
203 			}
204 		}
205 
206 		/*
207 		 * format:
208 		 *  start, end, res, priv res, cow, access, type, (fullpath).
209 		 */
210 		ksnprintf(mebuffer, sizeof(mebuffer),
211 #if LONG_BIT == 64
212 			  "0x%016lx 0x%016lx %d %d %p %s%s%s %d %d "
213 #else
214 			  "0x%08lx 0x%08lx %d %d %p %s%s%s %d %d "
215 #endif
216 			  "0x%04x %s %s %s %s\n",
217 			(u_long)entry->start, (u_long)entry->end,
218 			resident, privateresident, obj,
219 			(entry->protection & VM_PROT_READ)?"r":"-",
220 			(entry->protection & VM_PROT_WRITE)?"w":"-",
221 			(entry->protection & VM_PROT_EXECUTE)?"x":"-",
222 			ref_count, shadow_count, flags,
223 			(entry->eflags & MAP_ENTRY_COW)?"COW":"NCOW",
224 			(entry->eflags & MAP_ENTRY_NEEDS_COPY)?"NC":"NNC",
225 			type, fullpath);
226 
227 		if (obj)
228 			vm_object_drop(obj);
229 
230 		if (freepath != NULL) {
231 			kfree(freepath, M_TEMP);
232 			freepath = NULL;
233 		}
234 
235 		len = strlen(mebuffer);
236 		if (len > uio->uio_resid) {
237 			error = EFBIG;
238 			break;
239 		}
240 
241 		/*
242 		 * We cannot safely hold the map locked while accessing
243 		 * userspace as a VM fault might recurse the locked map.
244 		 */
245 		vm_map_unlock_read(map);
246 		error = uiomove(mebuffer, len, uio);
247 		vm_map_lock_read(map);
248 		if (error)
249 			break;
250 
251 		/*
252 		 * We use map->hint as a poor man's ripout detector.  If
253 		 * it does not match the entry we set it to prior to
254 		 * unlocking the map the entry MIGHT now be stale.  In
255 		 * this case we do an expensive lookup to find our place
256 		 * in the iteration again.
257 		 */
258 		if (map->hint != entry) {
259 			vm_map_entry_t reentry;
260 
261 			vm_map_lookup_entry(map, ostart, &reentry);
262 			entry = reentry;
263 		}
264 	}
265 	vm_map_unlock_read(map);
266 
267 	return error;
268 }
269 
270 int
271 procfs_validmap(struct lwp *lp)
272 {
273 	return ((lp->lwp_proc->p_flags & P_SYSTEM) == 0);
274 }
275