xref: /netbsd-src/sys/uvm/uvm_coredump.c (revision d2a0ebb67a049a5c019aba780e67b6c65954d3db)
1 /*	$NetBSD: uvm_coredump.c,v 1.8 2020/02/23 15:46:43 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vm_glue.c	8.6 (Berkeley) 1/5/94
37  * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp
38  *
39  *
40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41  * All rights reserved.
42  *
43  * Permission to use, copy, modify and distribute this software and
44  * its documentation is hereby granted, provided that both the copyright
45  * notice and this permission notice appear in all copies of the
46  * software, derivative works or modified versions, and any portions
47  * thereof, and that both notices appear in supporting documentation.
48  *
49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52  *
53  * Carnegie Mellon requests users of this software to return to
54  *
55  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
56  *  School of Computer Science
57  *  Carnegie Mellon University
58  *  Pittsburgh PA 15213-3890
59  *
60  * any improvements or extensions that they make and grant Carnegie the
61  * rights to redistribute these changes.
62  */
63 
64 #include <sys/cdefs.h>
65 __KERNEL_RCSID(0, "$NetBSD: uvm_coredump.c,v 1.8 2020/02/23 15:46:43 ad Exp $");
66 
67 /*
68  * uvm_coredump.c: glue functions for coredump
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/proc.h>
74 
75 #include <uvm/uvm.h>
76 
77 /*
78  * uvm_coredump_walkmap: walk a process's map for the purpose of dumping
79  * a core file.
80  * XXX: I'm not entirely sure the locking is this function is in anyway
81  * correct.  If the process isn't actually stopped then the data passed
82  * to func() is at best stale, and horrid things might happen if the
83  * entry being processed is deleted (dsl).
84  */
85 
86 int
uvm_coredump_walkmap(struct proc * p,int (* func)(struct uvm_coredump_state *),void * cookie)87 uvm_coredump_walkmap(struct proc *p, int (*func)(struct uvm_coredump_state *),
88     void *cookie)
89 {
90 	struct uvm_coredump_state state;
91 	struct vmspace *vm = p->p_vmspace;
92 	struct vm_map *map = &vm->vm_map;
93 	struct vm_map_entry *entry;
94 	int error;
95 
96 	entry = NULL;
97 	vm_map_lock_read(map);
98 	state.end = 0;
99 	for (;;) {
100 		if (entry == NULL)
101 			entry = map->header.next;
102 		else if (!uvm_map_lookup_entry(map, state.end, &entry))
103 			entry = entry->next;
104 		if (entry == &map->header)
105 			break;
106 
107 		state.cookie = cookie;
108 		if (state.end > entry->start) {
109 			state.start = state.end;
110 		} else {
111 			state.start = entry->start;
112 		}
113 		state.realend = entry->end;
114 		state.end = entry->end;
115 		state.prot = entry->protection;
116 		state.flags = 0;
117 
118 		/*
119 		 * Dump the region unless one of the following is true:
120 		 *
121 		 * (1) the region has neither object nor amap behind it
122 		 *     (ie. it has never been accessed).
123 		 *
124 		 * (2) the region has no amap and is read-only
125 		 *     (eg. an executable text section).
126 		 *
127 		 * (3) the region's object is a device.
128 		 *
129 		 * (4) the region is unreadable by the process.
130 		 */
131 
132 		KASSERT(!UVM_ET_ISSUBMAP(entry));
133 #ifdef VM_MAXUSER_ADDRESS
134 		KASSERT(state.start < VM_MAXUSER_ADDRESS);
135 		KASSERT(state.end <= VM_MAXUSER_ADDRESS);
136 #endif
137 		if (entry->object.uvm_obj == NULL &&
138 		    entry->aref.ar_amap == NULL) {
139 			state.realend = state.start;
140 		} else if ((entry->protection & VM_PROT_WRITE) == 0 &&
141 		    entry->aref.ar_amap == NULL) {
142 			state.realend = state.start;
143 		} else if (entry->object.uvm_obj != NULL &&
144 		    UVM_OBJ_IS_DEVICE(entry->object.uvm_obj)) {
145 			state.realend = state.start;
146 		} else if ((entry->protection & VM_PROT_READ) == 0) {
147 			state.realend = state.start;
148 		} else {
149 			if (state.start >= (vaddr_t)vm->vm_maxsaddr)
150 				state.flags |= UVM_COREDUMP_STACK;
151 
152 			/*
153 			 * If this an anonymous entry, only dump instantiated
154 			 * pages.
155 			 */
156 			if (entry->object.uvm_obj == NULL) {
157 				vaddr_t end;
158 
159 				amap_lock(entry->aref.ar_amap, RW_READER);
160 				for (end = state.start;
161 				     end < state.end; end += PAGE_SIZE) {
162 					struct vm_anon *anon;
163 					anon = amap_lookup(&entry->aref,
164 					    end - entry->start);
165 					/*
166 					 * If we have already encountered an
167 					 * uninstantiated page, stop at the
168 					 * first instantied page.
169 					 */
170 					if (anon != NULL &&
171 					    state.realend != state.end) {
172 						state.end = end;
173 						break;
174 					}
175 
176 					/*
177 					 * If this page is the first
178 					 * uninstantiated page, mark this as
179 					 * the real ending point.  Continue to
180 					 * counting uninstantiated pages.
181 					 */
182 					if (anon == NULL &&
183 					    state.realend == state.end) {
184 						state.realend = end;
185 					}
186 				}
187 				amap_unlock(entry->aref.ar_amap);
188 			}
189 		}
190 
191 		vm_map_unlock_read(map);
192 		error = (*func)(&state);
193 		if (error)
194 			return (error);
195 		vm_map_lock_read(map);
196 	}
197 	vm_map_unlock_read(map);
198 
199 	return (0);
200 }
201 
202 static int
count_segs(struct uvm_coredump_state * s)203 count_segs(struct uvm_coredump_state *s)
204 {
205     (*(int *)s->cookie)++;
206 
207     return 0;
208 }
209 
210 int
uvm_coredump_count_segs(struct proc * p)211 uvm_coredump_count_segs(struct proc *p)
212 {
213 	int count = 0;
214 
215 	uvm_coredump_walkmap(p, count_segs, &count);
216 	return count;
217 }
218