xref: /dflybsd-src/sys/vfs/procfs/procfs_map.c (revision dae741e33c840b92a8a53bf9f01157ede145e256)
1 /*
2  * Copyright (c) 1993 Jan-Simon Pendry
3  * Copyright (c) 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * Jan-Simon Pendry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)procfs_status.c	8.3 (Berkeley) 2/17/94
34  *
35  * $FreeBSD: src/sys/miscfs/procfs/procfs_map.c,v 1.24.2.1 2001/08/04 13:12:24 rwatson Exp $
36  * $DragonFly: src/sys/vfs/procfs/procfs_map.c,v 1.7 2007/02/19 01:14:24 corecode Exp $
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <vfs/procfs/procfs.h>
44 
45 #include <vm/vm.h>
46 #include <sys/lock.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_object.h>
51 
52 
53 #define MEBUFFERSIZE 256
54 
55 /*
56  * The map entries can *almost* be read with programs like cat.  However,
57  * large maps need special programs to read.  It is not easy to implement
58  * a program that can sense the required size of the buffer, and then
59  * subsequently do a read with the appropriate size.  This operation cannot
60  * be atomic.  The best that we can do is to allow the program to do a read
61  * with an arbitrarily large buffer, and return as much as we can.  We can
62  * return an error code if the buffer is too small (EFBIG), then the program
63  * can try a bigger buffer.
64  */
65 int
66 procfs_domap(struct proc *curp, struct lwp *lp, struct pfsnode *pfs,
67 	     struct uio *uio)
68 {
69 	struct proc *p = lp->lwp_proc;
70 	int len;
71 	struct vnode *vp;
72 	char *fullpath, *freepath;
73 	int error;
74 	vm_map_t map = &p->p_vmspace->vm_map;
75 	pmap_t pmap = vmspace_pmap(p->p_vmspace);
76 	vm_map_entry_t entry;
77 	char mebuffer[MEBUFFERSIZE];
78 
79 	if (uio->uio_rw != UIO_READ)
80 		return (EOPNOTSUPP);
81 
82 	if (uio->uio_offset != 0)
83 		return (0);
84 
85 	error = 0;
86 	vm_map_lock_read(map);
87 	for (entry = map->header.next;
88 		((uio->uio_resid > 0) && (entry != &map->header));
89 		entry = entry->next) {
90 		vm_object_t obj, tobj, lobj;
91 		int ref_count, shadow_count, flags;
92 		vm_offset_t addr;
93 		vm_offset_t ostart;
94 		int resident, privateresident;
95 		char *type;
96 
97 		if (entry->maptype != VM_MAPTYPE_NORMAL &&
98 		    entry->maptype != VM_MAPTYPE_VPAGETABLE) {
99 			continue;
100 		}
101 
102 		obj = entry->object.vm_object;
103 		if (obj)
104 			vm_object_hold(obj);
105 
106 		if (obj && (obj->shadow_count == 1))
107 			privateresident = obj->resident_page_count;
108 		else
109 			privateresident = 0;
110 
111 		/*
112 		 * Use map->hint as a poor man's ripout detector.
113 		 */
114 		map->hint = entry;
115 		ostart = entry->start;
116 
117 		/*
118 		 * Count resident pages (XXX can be horrible on 64-bit)
119 		 */
120 		resident = 0;
121 		addr = entry->start;
122 		while (addr < entry->end) {
123 			if (pmap_extract(pmap, addr))
124 				resident++;
125 			addr += PAGE_SIZE;
126 		}
127 		if (obj) {
128 			lobj = obj;
129 			while ((tobj = lobj->backing_object) != NULL) {
130 				KKASSERT(tobj != obj);
131 				vm_object_hold(tobj);
132 				if (tobj == lobj->backing_object) {
133 					if (lobj != obj) {
134 						vm_object_lock_swap();
135 						vm_object_drop(lobj);
136 					}
137 					lobj = tobj;
138 				} else {
139 					vm_object_drop(tobj);
140 				}
141 			}
142 		} else {
143 			lobj = NULL;
144 		}
145 
146 		freepath = NULL;
147 		fullpath = "-";
148 		if (lobj) {
149 			switch(lobj->type) {
150 			default:
151 			case OBJT_DEFAULT:
152 				type = "default";
153 				vp = NULL;
154 				break;
155 			case OBJT_VNODE:
156 				type = "vnode";
157 				vp = lobj->handle;
158 				vref(vp);
159 				break;
160 			case OBJT_SWAP:
161 				type = "swap";
162 				vp = NULL;
163 				break;
164 			case OBJT_DEVICE:
165 				type = "device";
166 				vp = NULL;
167 				break;
168 			}
169 
170 			flags = obj->flags;
171 			ref_count = obj->ref_count;
172 			shadow_count = obj->shadow_count;
173 			if (vp != NULL) {
174 				vn_fullpath(p, vp, &fullpath, &freepath, 1);
175 				vrele(vp);
176 			}
177 			if (lobj != obj)
178 				vm_object_drop(lobj);
179 		} else {
180 			type = "none";
181 			flags = 0;
182 			ref_count = 0;
183 			shadow_count = 0;
184 		}
185 
186 		/*
187 		 * format:
188 		 *  start, end, res, priv res, cow, access, type, (fullpath).
189 		 */
190 		ksnprintf(mebuffer, sizeof(mebuffer),
191 		    "0x%lx 0x%lx %d %d %p %s%s%s %d %d 0x%x %s %s %s %s\n",
192 			(u_long)entry->start, (u_long)entry->end,
193 			resident, privateresident, obj,
194 			(entry->protection & VM_PROT_READ)?"r":"-",
195 			(entry->protection & VM_PROT_WRITE)?"w":"-",
196 			(entry->protection & VM_PROT_EXECUTE)?"x":"-",
197 			ref_count, shadow_count, flags,
198 			(entry->eflags & MAP_ENTRY_COW)?"COW":"NCOW",
199 			(entry->eflags & MAP_ENTRY_NEEDS_COPY)?"NC":"NNC",
200 			type, fullpath);
201 
202 		if (obj)
203 			vm_object_drop(obj);
204 
205 		if (freepath != NULL) {
206 			kfree(freepath, M_TEMP);
207 			freepath = NULL;
208 		}
209 
210 		len = strlen(mebuffer);
211 		if (len > uio->uio_resid) {
212 			error = EFBIG;
213 			break;
214 		}
215 
216 		/*
217 		 * We cannot safely hold the map locked while accessing
218 		 * userspace as a VM fault might recurse the locked map.
219 		 */
220 		vm_map_unlock_read(map);
221 		error = uiomove(mebuffer, len, uio);
222 		vm_map_lock_read(map);
223 		if (error)
224 			break;
225 
226 		/*
227 		 * We use map->hint as a poor man's ripout detector.  If
228 		 * it does not match the entry we set it to prior to
229 		 * unlocking the map the entry MIGHT now be stale.  In
230 		 * this case we do an expensive lookup to find our place
231 		 * in the iteration again.
232 		 */
233 		if (map->hint != entry) {
234 			vm_map_entry_t reentry;
235 
236 			vm_map_lookup_entry(map, ostart, &reentry);
237 			entry = reentry;
238 		}
239 	}
240 	vm_map_unlock_read(map);
241 
242 	return error;
243 }
244 
245 int
246 procfs_validmap(struct lwp *lp)
247 {
248 	return ((lp->lwp_proc->p_flag & P_SYSTEM) == 0);
249 }
250